Merge "dwc3-msm: Fix panic issue during unbind of msm_dwc3"
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..4341e3a
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,27 @@
+cc_binary_host {
+    name: "unifdef",
+    srcs: ["scripts/unifdef.c"],
+    sanitize: {
+        never: true,
+    }
+}
+
+gensrcs {
+    name: "qseecom-kernel-includes",
+
+    // move to out/ as root for header generation because of scripts/unifdef
+    // storage - at the expense of extra ../ references
+    cmd: "pushd out && mkdir -p scripts && rm -f scripts/unifdef && ln -s ../../$(location unifdef) scripts/unifdef && ../$(location scripts/headers_install.sh) `dirname ../$(out)` ../ $(in) && popd",
+
+    tools: ["unifdef"],
+    tool_files: ["scripts/headers_install.sh"],
+    export_include_dirs: ["include/uapi"],
+    srcs: ["include/uapi/linux/qseecom.h"],
+    output_extension: "h",
+}
+
+cc_library_headers {
+    name: "qseecom-kernel-headers",
+    generated_headers: ["qseecom-kernel-includes"],
+    export_generated_headers: ["qseecom-kernel-includes"],
+}
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index dfd56ec..6d75a9c 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -355,6 +355,7 @@
 		/sys/devices/system/cpu/vulnerabilities/meltdown
 		/sys/devices/system/cpu/vulnerabilities/spectre_v1
 		/sys/devices/system/cpu/vulnerabilities/spectre_v2
+		/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
 Date:		January 2018
 Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:	Information about CPU vulnerabilities
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index db7aab1..f82da9b 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -101,6 +101,7 @@
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
 Description:
 		 Controls the trimming rate in batch mode.
+		 <deprecated>
 
 What:		/sys/fs/f2fs/<disk>/cp_interval
 Date:		October 2015
@@ -192,3 +193,14 @@
 Contact:	"Sheng Yong" <shengyong1@huawei.com>
 Description:
 		 Controls readahead inode block in readdir.
+
+What:		/sys/fs/f2fs/<disk>/extension_list
+Date:		Feburary 2018
+Contact:	"Chao Yu" <yuchao0@huawei.com>
+Description:
+		 Used to control configure extension list:
+		 - Query: cat /sys/fs/f2fs/<disk>/extension_list
+		 - Add: echo '[h/c]extension' > /sys/fs/f2fs/<disk>/extension_list
+		 - Del: echo '[h/c]!extension' > /sys/fs/f2fs/<disk>/extension_list
+		 - [h] means add/del hot file extension
+		 - [c] means add/del cold file extension
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 1699a55..ef63996 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -112,9 +112,11 @@
 free space on the data device drops below this level then a dm event
 will be triggered which a userspace daemon should catch allowing it to
 extend the pool device.  Only one such event will be sent.
-Resuming a device with a new table itself triggers an event so the
-userspace daemon can use this to detect a situation where a new table
-already exceeds the threshold.
+
+No special event is triggered if a just resumed device's free space is below
+the low water mark. However, resuming a device always triggers an
+event; a userspace daemon should verify that free space exceeds the low
+water mark when handling this event.
 
 A low water mark for the metadata device is maintained in the kernel and
 will trigger a dm event if free space on the metadata device drops below
diff --git a/Documentation/device-mapper/verity.txt b/Documentation/device-mapper/verity.txt
index 89fd8f9..b3d2e4a 100644
--- a/Documentation/device-mapper/verity.txt
+++ b/Documentation/device-mapper/verity.txt
@@ -109,6 +109,17 @@
     This is the offset, in <data_block_size> blocks, from the start of the
     FEC device to the beginning of the encoding data.
 
+check_at_most_once
+    Verify data blocks only the first time they are read from the data device,
+    rather than every time.  This reduces the overhead of dm-verity so that it
+    can be used on systems that are memory and/or CPU constrained.  However, it
+    provides a reduced level of security because only offline tampering of the
+    data device's content will be detected, not online tampering.
+
+    Hash blocks are still verified each time they are read from the hash device,
+    since verification of hash blocks is less performance critical than data
+    blocks, and a hash block will not be verified any more after all the data
+    blocks it covers have been verified anyway.
 
 Theory of operation
 ===================
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index bda03f0..ee9b465 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -1,222 +1,12 @@
 * CoreSight Components:
 
 CoreSight components are compliant with the ARM CoreSight architecture
-specification and can be connected in various topologies to suite a particular
-SoCs tracing needs. These trace components can generally be classified as sinks,
-links and sources. Trace data produced by one or more sources flows through the
-intermediate links connecting the source to the currently selected sink. Each
-CoreSight component device should use these properties to describe its hardware
-characteristcs.
-
-Required properties:
-
-- compatible : name of the component used for driver matching, should be one of
-	the following:
-	"arm,coresight-tmc" for coresight tmc-etr or tmc-etf device,
-	"arm,coresight-tpiu" for coresight tpiu device,
-	"qcom,coresight-replicator" for coresight replicator device,
-	"arm,coresight-funnel" for coresight funnel devices,
-	"qcom,coresight-tpda" for coresight tpda device,
-	"qcom,coresight-tpdm" for coresight tpdm device,
-	"qcom,coresight-dbgui" for coresight dbgui device
-	"arm,coresight-stm" for coresight stm trace device,
-	"arm,coresight-etm" for coresight etm trace devices,
-	"arm,coresight-etmv4" for coresight etmv4 trace devices,
-	"qcom,coresight-csr" for coresight csr device,
-	"arm,coresight-cti" for coresight cti devices,
-	"qcom,coresight-hwevent" for coresight hardware event devices
-	"arm,coresight-fuse" for coresight fuse v1 device,
-	"arm,coresight-fuse-v2" for coresight fuse v2 device,
-	"arm,coresight-fuse-v3" for coresight fuse v3 device,
-	"qcom,coresight-remote-etm" for coresight remote processor etm trace device,
-	"qcom,coresight-qpdi" for coresight qpdi device
-- reg : physical base address and length of the register set(s) of the component.
-	Not required for the following compatible string:
-	- "qcom,coresight-remote-etm"
-- reg-names : names corresponding to each reg property value.
-	Not required for the following compatible string:
-	- "qcom,coresight-remote-etm"
-	The reg-names that need to be used with corresponding compatible string
-	for a coresight device are:
-	- for coresight tmc-etr or tmc-etf device:
-		compatible : should be "arm,coresight-tmc"
-		reg-names  : should be:
-			"tmc-base" - physical base address of tmc configuration
-				registers
-			"bam-base" - physical base address of tmc-etr bam registers
-	- for coresight tpiu device:
-		compatible : should be "arm,coresight-tpiu"
-		reg-names  : should be:
-			"tpiu-base" - physical base address of tpiu registers
-	- for coresight replicator device
-		compatible : should be "qcom,coresight-replicator"
-		reg-names  : should be:
-			"replicator-base" - physical base address of replicator
-				registers
-	- for coresight funnel devices
-		compatible : should be "arm,coresight-funnel"
-		reg-names  : should be:
-			"funnel-base" - physical base address of funnel registers
-	- for coresight tpda trace device
-		compatible : should be "qcom,coresight-tpda"
-		reg-names  : should be:
-			"tpda-base" - physical base address of tpda registers
-	- for coresight tpdm trace device
-		compatible : should be "qcom,coresight-tpdm"
-		reg-names  : should be:
-			"tpdm-base" - physical base address of tpdm registers
-	- for coresight dbgui device:
-		compatible : should be "qcom,coresight-dbgui"
-		reg-names  : should be:
-			"dbgui-base" - physical base address of dbgui registers
-	- for coresight stm trace device
-		compatible : should be "arm,coresight-stm"
-		reg-names  : should be:
-			"stm-base" - physical base address of stm configuration
-				registers
-			"stm-data-base" - physical base address of stm data registers
-	- for coresight etm trace devices
-		compatible : should be "arm,coresight-etm"
-		reg-names  : should be:
-			"etm-base" - physical base address of etm registers
-	- for coresight etmv4 trace devices
-		compatible : should be "arm,coresight-etmv4"
-		reg-names  : should be:
-			"etm-base" - physical base address of etmv4 registers
-	- for coresight csr device:
-		compatible : should be "qcom,coresight-csr"
-		reg-names  : should be:
-			"csr-base" - physical base address of csr registers
-	- for coresight cti devices:
-		compatible : should be "arm,coresight-cti"
-		reg-names  : should be:
-			"cti<num>-base" - physical base address of cti registers
-	- for coresight hardware event devices:
-		compatible : should be "qcom,coresight-hwevent"
-		reg-names  : should be:
-			"<ss-mux>" - physical base address of hardware event mux
-				control registers where <ss-mux> is subsystem mux it
-				represents
-	- for coresight fuse device:
-		compatible : should be "arm,coresight-fuse"
-		reg-names  : should be:
-			"fuse-base" - physical base address of fuse registers
-			"nidnt-fuse-base" - physical base address of nidnt fuse registers
-			"qpdi-fuse-base" - physical base address of qpdi fuse registers
-	- for coresight qpdi device:
-		compatible : should be "qcom,coresight-qpdi"
-		reg-names  : should be:
-			"qpdi-base" - physical base address of qpdi registers
-- coresight-id : unique integer identifier for the component
-- coresight-name : unique descriptive name of the component
-- coresight-nr-inports : number of input ports on the component
-
-Optional properties:
-
-- coresight-outports : list of output port numbers of this component
-- coresight-child-list : list of phandles pointing to the children of this
-			 component
-- coresight-child-ports : list of input port numbers of the children
-- coresight-default-sink : represents the default compile time CoreSight sink
-- coresight-ctis : list of ctis that this component interacts with
-- qcom,cti-save : boolean, indicating cti context needs to be saved and restored
-- qcom,cti-hwclk : boolean, indicating support of hardware clock to access cti
-		   registers to be saved and restored
-- qcom,cti-gpio-trigin : cti trigger input driven by gpio
-- qcom,cti-gpio-trigout : cti trigger output sent to gpio
-- qcom,pc-save : program counter save implemented
-- qcom,blk-size : block size for tmc-etr to usb transfers
-- qcom,memory-size : size of coherent memory to be allocated for tmc-etr buffer
-- qcom,round-robin : indicates if per core etms are allowed round-robin access
-		     by the funnel
-- qcom,write-64bit : only 64bit data writes supported by stm
-- qcom,data-barrier : barrier required for every stm data write to channel space
-- <supply-name>-supply: phandle to the regulator device tree node. The required
-			<supply-name> is "vdd" for SD card and "vdd-io" for SD
-			I/O supply. Used for tpiu component
-- qcom,<supply>-voltage-level : specifies voltage level for vdd supply. Should
-				be specified in pairs (min, max) with units
-				being uV. Here <supply> can be "vdd" for SD card
-				vdd supply or "vdd-io" for SD I/O vdd supply.
-- qcom,<supply>-current-level : specifies current load levels for vdd supply.
-				Should be specified in pairs (lpm, hpm) with
-				units being uA. Here <supply> can be "vdd" for
-				SD card vdd supply or "vdd-io" for SD I/O vdd
-				supply.
-- qcom,hwevent-clks : list of clocks required by hardware event driver
-- qcom,hwevent-regs : list of regulators required by hardware event driver
-- qcom,byte-cntr-absent : specifies if the byte counter feature is absent on
-			  the device. Only relevant in case of tmc-etr device.
-- interrupts : <a b c> where a is 0 or 1 depending on if the interrupt is
-		spi/ppi, b is the interrupt number and c is the mask,
-- interrupt-names : a list of strings that map in order to the list of
-		    interrupts specified in the 'interrupts' property.
-- qcom,sg-enable : indicates whether scatter gather feature is supported for TMC
-		   ETR configuration.
-- qcom,force-reg-dump : boolean, indicate whether TMC register need to be dumped.
-			Used for TMC component
-- qcom,nidntsw : boolean, indicating NIDnT software debug or trace support
-		 present. Used for tpiu component
-- qcom,nidnthw : boolean, indicating NIDnT hardware sensing support present.
-		 Used for tpiu component
-  qcom,nidntsw and qcom,nidnthw are mutually exclusive properties, either of
-  these may specified for tpiu component
-- qcom,nidnt-swduart : boolean, indicating NIDnT swd uart support present. Used
-		       for tpiu component
-- qcom,nidnt-swdtrc : boolean, indicating NIDnT swd trace support present. Used
-		      for tpiu component
-- qcom,nidnt-jtag : boolean, indicating NIDnT jtag debug support present. Used
-		    for tpiu component
-- qcom,nidnt-spmi : boolean, indicating NIDnT spmi debug support present. Used
-		    for tpiu component
-- nidnt-gpio : specifies gpio for NIDnT hardware detection
-- nidnt-gpio-polarity : specifies gpio polarity for NIDnT hardware detection
-- pinctrl-names : names corresponding to the numbered pinctrl. The allowed
-		  names are subset of the following: cti-trigin-pctrl,
-		  cti-trigout-pctrl. Used for cti component
-- pinctrl-<n>: list of pinctrl phandles for the different pinctrl states. Refer
-	       to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt".
-- qcom,funnel-save-restore : boolean, indicating funnel port needs to be disabled
-			     for the ETM whose CPU is being powered down. The port
-			     state is restored when CPU is powered up. Used for
-			     funnel component.
-- qcom,tmc-flush-powerdown : boolean, indicating trace data needs to be flushed before
-			     powering down CPU. Used for TMC component.
-- qcom,bc-elem-size : specifies the BC element size supported by each monitor
-		      connected to the aggregator on each port. Should be specified
-		      in pairs (port, bc element size).
-- qcom,tc-elem-size : specifies the TC element size supported by each monitor
-		      connected to the aggregator on each port. Should be specified
-		      in pairs (port, tc element size).
-- qcom,dsb-elem-size : specifies the DSB element size supported by each monitor
-		       connected to the aggregator on each port. Should be specified
-		       in pairs (port, dsb element size).
-- qcom,cmb-elem-size : specifies the CMB element size supported by each monitor
-		       connected to the aggregator on each port. Should be specified
-		       in pairs (port, cmb element size).
-- qcom,clk-enable: specifies whether additional clock bit needs to be set for
-		   M4M TPDM.
-- qcom,tpda-atid : specifies the ATID for TPDA.
-- qcom,inst-id : QMI instance id for remote ETMs.
-- qcom,noovrflw-enable : boolean, indicating whether no overflow bit needs to be
-			 set in ETM stall control register.
-- coresight-cti-cpu : cpu phandle for cpu cti, required when qcom,cti-save is true
-- coresight-etm-cpu : specifies phandle for the cpu associated with the ETM device
-- qcom,dbgui-addr-offset : indicates the offset of dbgui address registers
-- qcom,dbgui-data-offset : indicates the offset of dbgui data registers
-- qcom,dbgui-size : indicates the size of dbgui address and data registers
-- qcom,pmic-carddetect-gpio : indicates the hotplug capabilities of the qpdi driver
-- qcom,cpuss-debug-cgc: debug clock gating phandle for etm
-	reg : the clock gating register for each cluster
-	cluster : indicate the cluster number
-
-coresight-outports, coresight-child-list and coresight-child-ports lists will
-be of the same length and will have a one to one correspondence among the
-elements at the same list index.
-
-coresight-default-sink must be specified for one of the sink devices that is
-intended to be made the default sink. Other sink devices must not have this
-specified. Not specifying this property on any of the sinks is invalid.
+specification and can be connected in various topologies to suit a particular
+SoCs tracing needs. These trace components can generally be classified as
+sinks, links and sources. Trace data produced by one or more sources flows
+through the intermediate links connecting the source to the currently selected
+sink. Each CoreSight component device should use these properties to describe
+its hardware characteristcs.
 
 * Required properties for all components *except* non-configurable replicators:
 
@@ -320,6 +110,9 @@
 	* cpu: the cpu phandle this ETM/PTM is affined to. When omitted the
 	  source is considered to belong to CPU0.
 
+	* qcom,tupwr-disable: For ETM, don't keep trace unit powered across power
+	  collapse.
+
 * Optional property for TMC:
 
 	* arm,buffer-size: size of contiguous buffer space for TMC ETR
diff --git a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
index e63d09b..de2a963 100644
--- a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
+++ b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
@@ -29,6 +29,10 @@
 
 - qcom,allocate-boot-time:	Indicates whether clients needs boot time memory allocation.
 
+- qcom,allocate-on-request:	Indicates memory allocation happens only when client requests.
+
+/* "qcom,allocate-boot-time" and "qcom,allocate-on-request" are mutually exclusive properties. */
+
 Example:
 
 qcom,memshare {
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 9c2d647..b2640da 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -95,6 +95,9 @@
 - QCS605
   compatible = "qcom,qcs605"
 
+- SXR1120
+  compatible = "qcom,sxr1120"
+
 - SDA670
   compatible = "qcom,sda670"
 
@@ -324,8 +327,11 @@
 compatible = "qcom,sdm670-qrd"
 compatible = "qcom,qcs605-cdp"
 compatible = "qcom,qcs605-mtp"
+compatible = "qcom,sxr1120-mtp"
+compatible = "qcom,sxr1120-cdp"
 compatible = "qcom,sda670-cdp"
 compatible = "qcom,sda670-mtp"
+compatible = "qcom,sda670-hdk"
 compatible = "qcom,msm8952-rumi"
 compatible = "qcom,msm8952-sim"
 compatible = "qcom,msm8952-qrd"
@@ -382,4 +388,5 @@
 compatible = "qcom,sdxpoorwills-atp"
 compatible = "qcom,sdxpoorwills-mtp"
 compatible = "qcom,sdxpoorwills-cdp"
+compatible = "qcom,sdxpoorwills-ttp"
 compatible = "qcom,mdm9607-ttp"
diff --git a/Documentation/devicetree/bindings/arm/msm/pm.txt b/Documentation/devicetree/bindings/arm/msm/pm.txt
new file mode 100644
index 0000000..b66d4a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/pm.txt
@@ -0,0 +1,49 @@
+* MSM PM
+
+PM is the low power management device for MSM (Snapdragon class) chipsets.
+This device sets up different components to do low power modes and registers with
+the kernel to be notified of idle and suspend states and when called, follows
+through the set of instructions in putting the application cores to the lowest
+power mode possible.
+The PC debug counter reserves 16 registers in the IMEM memory space which maintains
+a count on the state of power collapse on each core. This count will be useful to
+debug the power collapse state on each core.
+
+The required properties for PM are:
+
+- compatible: "qcom,pm"
+
+The optional properties are:
+
+- reg: physical IMEM address reserved for PC counters and the size
+- qcom,use-sync-timer: Indicates whether the target uses the synchronized QTimer.
+- qcom,synced-clocks: Indicates that all cpus running off a single clock source and to
+	instantiate the necessary clock source.
+- qcom,pc-resets-timer: Indicates that the timer gets reset during power collapse.
+- qcom,tz-flushes-cache: Indicates that TZ flushes all of the cache during
+power collapse. MSM PM can decide to not perform cache flush operations to
+reduce latency associated with L2 PC.
+- qcom,saw-turns-off-pll: Indicates that the CPU's PLL can be managed from SAW
+	hardware. On such targets software management of PLL is not required. If
+	this property is specified then qcom,synced-clocks would be ignored.
+- qcom,no-pll-switch-for-retention: Boolean property, to indicate that the cpu
+	clock can be sourced even from the HFPLL even when the cpu is in
+	retention, and need not be switched to an always on pll. If this flag
+	is set then the cpu clock is not ramped down when entering retention or
+	ramped up on exiting retention.
+
+Example 1:
+
+qcom,pm@fe800664 {
+		compatible = "qcom,pm";
+		reg = <0xfe800664 0x40>;
+		qcom,use-sync-timer;
+	};
+
+Example 2:
+
+qcom,pm@fe800664 {
+		compatible = "qcom,pm";
+		reg = <0xfe800664 0x40>;
+		qcom,saw-turns-off-pll;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/pm_snoc_client.txt b/Documentation/devicetree/bindings/arm/msm/pm_snoc_client.txt
new file mode 100644
index 0000000..4f7111f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/pm_snoc_client.txt
@@ -0,0 +1,35 @@
+* MSM PM SNOC client
+
+MSM PM SNOC client device is used to setup a bus request for 100 Mhz for the
+SNOC bus when the Apps cores are active. This bus request helps mitigate the
+exit latency from power collapse in cases where there aren't any active bus
+requests for SNOC.
+
+This device is dependent on the pm-8x60 device, which configures the low power
+mode of respective cores.
+
+The required properties of this device are:
+
+- compatible: qcom,pm-snoc-client
+- qcom,msm-bus,name:            String representing the client-name
+- qcom,msm-bus,num-cases:       Total number of usecases
+- qcom,msm-bus,active-only:     Boolean context flag for requests in active or
+                                dual (active & sleep) contex
+- qcom,msm-bus,num-paths:       Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps:    Arrays of unsigned integers representing:
+                                master-id, slave-id, arbitrated bandwidth
+                                in KBps, instantaneous bandwidth in KBps
+
+
+Example:
+	qcom,pm-snoc-client {
+		compatible = "qcom,pm-snoc-client";
+		qcom,msm-bus,name = "ocimem_snoc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors =
+			<22 512 0 0>,
+			<22 512 320000 3200000>;
+	};
+
diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
index 2b7b3fa..606da38 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
@@ -1,11 +1,14 @@
-* Amlogic Meson8b Clock and Reset Unit
+* Amlogic Meson8, Meson8b and Meson8m2 Clock and Reset Unit
 
-The Amlogic Meson8b clock controller generates and supplies clock to various
-controllers within the SoC.
+The Amlogic Meson8 / Meson8b / Meson8m2 clock controller generates and
+supplies clock to various controllers within the SoC.
 
 Required Properties:
 
-- compatible: should be "amlogic,meson8b-clkc"
+- compatible: must be one of:
+	- "amlogic,meson8-clkc" for Meson8 (S802) SoCs
+	- "amlogic,meson8b-clkc" for Meson8 (S805) SoCs
+	- "amlogic,meson8m2-clkc" for Meson8m2 (S812) SoCs
 - reg: it must be composed by two tuples:
 	0) physical base address of the xtal register and length of memory
 	   mapped region.
diff --git a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
index ea3afc7..06a60e2 100644
--- a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
+++ b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
@@ -13,6 +13,7 @@
 Required properties:
   - compatible: "qcom,cnss" for QCA6174 device
                 "qcom,cnss-qca6290" for QCA6290 device
+                "qcom,cnss-qca6390" for QCA6390 device
   - wlan-en-gpio: WLAN_EN GPIO signal specified by the chip specifications
   - vdd-wlan-supply: phandle to the regulator device tree node
   - pinctrl-names: Names corresponding to the numbered pinctrl states
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 0589165..15f1e93 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -103,6 +103,8 @@
 - qcom,sde-dsc-size:		A u32 value indicates the address range for each dsc.
 - qcom,sde-cdm-size:		A u32 value indicates the address range for each cdm.
 - qcom,sde-pp-size:		A u32 value indicates the address range for each pingpong.
+- qcom,sde-te-source:		Array of GPIO sources indicating which pingpong TE is
+				sourced to which panel TE gpio.
 - qcom,sde-wb-size:		A u32 value indicates the address range for each writeback.
 - qcom,sde-len:			A u32 entry for SDE address range.
 - qcom,sde-intf-max-prefetch-lines:	Array of u32 values for max prefetch lines on
@@ -502,6 +504,7 @@
     qcom,sde-pp-off = <0x00071000 0x00071800
 			  0x00072000 0x00072800>;
     qcom,sde-pp-slave = <0x0 0x0 0x0 0x0>;
+    qcom,sde-te-source = <0x0 0x1 0x0 0x0>;
     qcom,sde-cdm-off = <0x0007a200>;
     qcom,sde-dsc-off = <0x00081000 0x00081400>;
     qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 4f7ae75..bda9d6f 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -47,10 +47,13 @@
   Documentation/devicetree/bindings/media/video-interfaces.txt. The
   first port should be the input endpoint, the second one the output
 
-  The output should have two endpoints. The first is the block
-  connected to the TCON channel 0 (usually a panel or a bridge), the
-  second the block connected to the TCON channel 1 (usually the TV
-  encoder)
+  The output may have multiple endpoints. The TCON has two channels,
+  usually with the first channel being used for the panels interfaces
+  (RGB, LVDS, etc.), and the second being used for the outputs that
+  require another controller (TV Encoder, HDMI, etc.). The endpoints
+  will take an extra property, allwinner,tcon-channel, to specify the
+  channel the endpoint is associated to. If that property is not
+  present, the endpoint number will be used as the channel number.
 
 On SoCs other than the A33, there is one more clock required:
    - 'tcon-ch1': The clock driving the TCON channel 1
diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
index 217a90e..9c38bbe 100644
--- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
+++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
@@ -11,7 +11,11 @@
   interrupts.
 
 Optional properties:
-- clocks: Optional reference to the clock used by the XOR engine.
+- clocks: Optional reference to the clocks used by the XOR engine.
+- clock-names: mandatory if there is a second clock, in this case the
+   name must be "core" for the first clock and "reg" for the second
+   one
+
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 24290c8..c17970c 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -521,6 +521,9 @@
 					value 0.
 - qcom,mdss-dsi-dma-schedule-line:	An integer value indicates the line number after vertical active
 					region, at which command DMA needs to be triggered.
+- qcom,mdss-dsi-panel-cmds-only-by-right: Boolean used to mention whether the panel support DSI1 or
+					DSI0 to send commands. If this was set, that mean the panel only support
+					DSI1 to send commands, otherwise DSI0 will send comands.
 
 Required properties for sub-nodes:	None
 Optional properties:
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 73a6dbc..7454863 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -285,6 +285,35 @@
 					60 = 60 frames per second (default)
 - qcom,mdss-dsi-panel-clockrate:	A 64 bit value specifies the panel clock speed in Hz.
 					0 = default value.
+- qcom,mdss-mdp-kickoff-threshold:	This property can be used to define a region
+					(in terms of scanlines) where the
+hardware is allowed
+					to trigger a data transfer from MDP to DSI.
+					If this property is used, the region must be defined setting
+					two values, the low and the high thresholds:
+					<low_threshold high_threshold>
+					Where following condition must be met:
+					low_threshold < high_threshold
+					These values will be used by the driver in such way that if
+					the Driver receives a request to kickoff a transfer (MDP to DSI),
+					the transfer will be triggered only if the following condition
+					is satisfied:
+					low_threshold < scanline < high_threshold
+					If the condition is not met, then the driver will delay the
+					transfer by the time defined in the following property:
+					"qcom,mdss-mdp-kickoff-delay".
+					So in order to use this property, the delay property must
+					be defined as well and greater than 0.
+- qcom,mdss-mdp-kickoff-delay:	This property defines the delay in microseconds that
+					the driver will delay before triggering an MDP transfer if the
+					thresholds defined by the following property are not met:
+					"qcom,mdss-mdp-kickoff-threshold".
+					So in order to use this property, the threshold property must
+					be defined as well. Note that this delay cannot be zero
+					and also should not be greater than
+the fps window.
+					i.e. For 60fps value should not exceed
+16666 uS.
 - qcom,mdss-mdp-transfer-time-us:	Specifies the dsi transfer time for command mode
 					panels in microseconds. Driver uses this number to adjust
 					the clock rate according to the expected transfer time.
@@ -634,6 +663,8 @@
 		qcom,mdss-dsi-dma-trigger = <0>;
 		qcom,mdss-dsi-panel-framerate = <60>;
 		qcom,mdss-dsi-panel-clockrate = <424000000>;
+		qcom,mdss-mdp-kickoff-threshold = <11 2430>;
+		qcom,mdss-mdp-kickoff-delay = <1000>;
 		qcom,mdss-mdp-transfer-time-us = <12500>;
 		qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
 					22 27 1e 03 04 00];
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi.txt b/Documentation/devicetree/bindings/fb/mdss-dsi.txt
index 1934bc5..2d689d2 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi.txt
@@ -113,6 +113,7 @@
 - qcom,platform-bklight-en-gpio-invert:	Invert the gpio used to enable display back-light
 - qcom,panel-mode-gpio:			Specifies the GPIO to select video/command/single-port/dual-port
 					mode of panel through gpio when it supports these modes.
+- qcom,ext-vdd-gpio:			Specifies the GPIO to enable external VDD supply.
 - pinctrl-names:			List of names to assign mdss pin states defined in pinctrl device node
 					Refer to pinctrl-bindings.txt
 - pinctrl-<0..n>:			Lists phandles each pointing to the pin configuration node within a pin
diff --git a/Documentation/devicetree/bindings/fb/mdss-spi-client.txt b/Documentation/devicetree/bindings/fb/mdss-spi-client.txt
new file mode 100644
index 0000000..0d5fde8
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-spi-client.txt
@@ -0,0 +1,27 @@
+Qualcomm Technologies, Inc. mdss-spi-client
+
+mdss-spi-client is for SPI display to send the FB data to SPI master.
+
+Required properties:
+- compatible : should be "qcom,mdss-spi-client"
+- spi-max-frequency : Maximum SPI clocking speed of device in Hz
+
+Optional properties:
+- label: A string used to describe the controller used.
+- spi-cpol : Boolean property indicating device requires inverse
+  clock polarity (CPOL) mode
+- spi-cpha :  Empty property indicating device requires shifted
+  clock phase (CPHA) mode
+- spi-cs-high :  Empty property indicating device requires
+  chip select active high
+
+Example:
+spi@78b9000 { /* BLSP1 QUP5 */
+	qcom,mdss_spi_client {
+		reg = <0>;
+		compatible = "qcom,mdss-spi-client";
+		label = "MDSS SPI QUP5 CLIENT";
+		spi-max-frequency = <50000000>;
+	};
+};
+
diff --git a/Documentation/devicetree/bindings/fb/mdss-spi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-spi-panel.txt
new file mode 100644
index 0000000..d46068f
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-spi-panel.txt
@@ -0,0 +1,203 @@
+Qualcomm Technologies, Inc. mdss-spi-panel
+
+mdss-spi-panel is a SPI panel device which supports panels that
+are compatible with display serial interface specification.
+
+Required properties:
+- qcom,mdss-spi-panel-controller:	Specifies the phandle for the SPI controller that
+					this panel will be mapped to.
+- qcom,mdss-spi-panel-width:		Specifies panel width in pixels.
+- qcom,mdss-spi-panel-height:		Specifies panel height in pixels.
+- qcom,mdss-spi-bpp:			Specifies the panel bits per pixels.
+					3  = for rgb111
+					8  = for rgb332
+					12 = for rgb444
+					16 = for rgb565
+					18 = for rgb666
+					24 = for rgb888
+- qcom,mdss-spi-panel-destination:	A string that specifies the destination display for the panel.
+					"display_1" = DISPLAY_1
+					"display_2" = DISPLAY_2
+- qcom,mdss-spi-on-command:		A byte stream formed by multiple packets
+					byte 0: wait number of specified ms after command
+						 transmitted
+					byte 1: 8 bits length in network byte order
+					byte 3 and beyond: number byte of payload
+- qcom,mdss-spi-off-command:		A byte stream formed by multiple packets
+					byte 0: wait number of specified ms after command
+						 transmitted
+					byte 1: 8 bits length in network byte order
+					byte 3 and beyond: number byte of payload
+Optional properties:
+- qcom,mdss-spi-panel-name:		A string used as a descriptive name of the panel
+- qcom,cont-splash-enabled:		Boolean used to enable continuous splash mode.
+					If this property is specified, it is required to
+					to specify the memory reserved for the splash
+					screen using the qcom,memblock-reserve binding
+					for the framebuffer device attached to the panel.
+- qcom,mdss-spi-h-back-porch:		Horizontal back porch value in pixels.
+					6 = default value.
+- qcom,mdss-spi-h-front-porch:		Horizontal front porch value in pixels.
+					6 = default value.
+- qcom,mdss-spi-h-pulse-width:		Horizontal pulse width.
+					2 = default value.
+- qcom,mdss-spi-h-sync-skew:		Horizontal sync skew value.
+					0 = default value.
+- qcom,mdss-spi-v-back-porch:		Vertical back porch value in pixels.
+					6 = default value.
+- qcom,mdss-spi-v-front-porch:		Vertical front porch value in pixels.
+					6 = default value.
+- qcom,mdss-spi-v-pulse-width:		Vertical pulse width.
+					2 = default value.
+- qcom,mdss-spi-bl-pmic-control-type:	A string that specifies the implementation of backlight
+					control for this panel.
+					"bl_ctrl_pwm" = Backlight controlled by PWM gpio.
+					"bl_ctrl_wled" = Backlight controlled by WLED.
+					other: Unknown backlight control. (default)
+- qcom,mdss-spi-bl-min-level:		Specifies the min backlight level supported by the panel.
+					0 = default value.
+- qcom,mdss-spi-bl-max-level:		Specifies the max backlight level supported by the panel.
+					255 = default value.
+- qcom,mdss-spi-panel-framerate:	Specifies the frame rate for the panel.
+- qcom,esd-check-enabled:		Boolean used to enable ESD recovery feature.
+- qcom,mdss-spi-panel-status-check-mode:Specifies the panel status check method for ESD recovery.
+					"send_init_command" = send init code to recover panel status.
+					"reg_read" = Read register value to check the panel status.
+- qcom,mdss-spi-panel-status-reg:Unsigned 8bits integer value to specifies the value
+					of panel status register address.
+- qcom,mdss-spi-panel-status-read-length:Unsigned 8bits integer value that specifies
+					the expected read-back length of the panel register.
+- qcom,mdss-spi-panel-status-value:An unsigned 8bits integer araray that specifies the
+					values of the panel status register which is used to
+					check the panel status.
+					The size of this array is specified by
+					qcom,mdss-dsi-panel-status-read-length.
+
+Example:
+&mdss_mdp {
+	spi_gc9305_qvga_cmd: qcom,mdss_spi_gc9305_qvga_cmd {
+		qcom,mdss-spi-panel-name = "gc9305 qvga command mode spi panel";
+		qcom,mdss-spi-panel-destination = "display_1";
+		qcom,mdss-spi-panel-controller = <&mdss_spi>;
+		qcom,mdss-spi-panel-framerate = <30>;
+		qcom,mdss-spi-panel-width = <240>;
+		qcom,mdss-spi-panel-height = <320>;
+		qcom,mdss-spi-h-front-porch = <79>;
+		qcom,mdss-spi-h-back-porch = <59>;
+		qcom,mdss-spi-h-pulse-width = <60>;
+		qcom,mdss-spi-v-back-porch = <10>;
+		qcom,mdss-spi-v-front-porch = <7>;
+		qcom,mdss-spi-v-pulse-width = <2>;
+		qcom,mdss-spi-h-left-border = <0>;
+		qcom,mdss-spi-h-right-border = <0>;
+		qcom,mdss-spi-v-top-border = <0>;
+		qcom,mdss-spi-v-bottom-border = <0>;
+		qcom,mdss-spi-bpp = <16>;
+		qcom,mdss-spi-on-command = [00 01 FE
+			00 01 EF
+			00 02 36 48
+			00 02 3A 05
+			00 02 35 00
+			00 03 A4 44 44
+			00 03 A5 42 42
+			00 03 AA 88 88
+			00 03 E8 12 40
+			00 03 E3 01 10
+			00 02 FF 61
+			00 02 AC 00
+			00 03 A6 2A 2A
+			00 03 A7 2B 2B
+			00 03 A8 18 18
+			00 03 A9 2A 2A
+			00 02 AD 33
+			00 02 AF 55
+			00 02 AE 2B
+			00 05 2A 00 00 00 EF
+			00 05 2B 00 00 01 3F
+			00 01 2C
+			00 07 F0 02 02 00 08 0C 10
+			00 07 F1 01 00 00 14 1D 0E
+			00 07 F2 10 09 37 04 04 48
+			00 07 F3 10 0B 3F 05 05 4E
+			00 07 F4 0D 19 17 1D 1E 0F
+			00 07 F5 06 12 13 1A 1B 0F
+			78 01 11
+			00 01 29
+			00 01 2C];
+		qcom,mdss-spi-off-command = [20 01 28
+			20 01 10];
+		qcom,mdss-spi-bl-min-level = <1>;
+		qcom,mdss-spi-bl-max-level = <4095>;
+		qcom,esd-check-enabled;
+		qcom,mdss-spi-panel-status-check-mode = "reg_read";
+		qcom,mdss-spi-panel-status-reg = /bits/ 8 <0x09>;
+		qcom,mdss-spi-panel-status-read-length = <4>;
+		qcom,mdss-spi-panel-status-value = /bits/ 8 <0x52 0x29 0x83 0x00>;
+	};
+};
+
+mdss-spi-display is a spi interface display which support send frame
+data and command to panel, compatible with SPI interface specification.
+
+Required properties:
+- compatible:	This property applies to SPI panels only.
+			compatible = "qcom,mdss-spi-display".
+- vdd-supply:	Phandle for vdd regulator device node.
+- vddio-supply:	Phandle for vdd-io regulator device node.
+- qcom,mdss-fb-map:	pHandle that specifies the framebuffer to which the interface is mapped.
+- qcom,mdss-mdp:	pHandle that specifies the mdss-mdp device.
+- qcom,panel-supply-entries:	A node that lists the elements of the supply used to
+				power the DSI panel. There can be more than one instance
+				of this binding, in which case the entry would be appended
+				with the supply entry index. For a detailed description
+				fields in the supply entry, refer to the qcom,ctrl-supply-entries
+				binding above.
+- qcom,platform-spi-dc-gpio: 	Pull down this gpio indicate current package is command,
+				Pull up this gpio indicate current package is parameter or pixels.
+
+Optional properties:
+- label:A string used to describe the controller used.
+	-- qcom,supply-name: name of the supply (vdd/vdda/vddio)
+	-- qcom,supply-min-voltage: minimum voltage level (uV)
+	-- qcom,supply-max-voltage: maximum voltage level (uV)
+	-- qcom,supply-enable-load: load drawn (uA) from enabled supply
+	-- qcom,supply-disable-load: load drawn (uA) from disabled supply
+	-- qcom,supply-pre-on-sleep: time to sleep (ms) before turning on
+	-- qcom,supply-post-on-sleep: time to sleep (ms) after turning on
+	-- qcom,supply-pre-off-sleep: time to sleep (ms) before turning off
+	-- qcom,supply-post-off-sleep: time to sleep (ms) after turning off
+
+Example:
+	mdss_spi: qcom,mdss_spi {
+		compatible = "qcom,mdss-spi-display";
+		label = "mdss spi panel";
+
+		qcom,mdss-fb-map = <&mdss_fb0>;
+		qcom,mdss-mdp = <&mdss_mdp>;
+		vdd-supply = <&pm8909_l17>;
+		vddio-supply = <&pm8909_l6>;
+		qcom,platform-spi-dc-gpio = <&msm_gpio 110 0>;
+
+		qcom,panel-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,panel-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdd";
+				qcom,supply-min-voltage = <2850000>;
+				qcom,supply-max-voltage = <2850000>;
+				qcom,supply-enable-load = <100000>;
+				qcom,supply-disable-load = <100>;
+			};
+
+			qcom,panel-supply-entry@1 {
+				reg = <1>;
+				qcom,supply-name = "vddio";
+				qcom,supply-min-voltage = <1800000>;
+				qcom,supply-max-voltage = <1800000>;
+				qcom,supply-enable-load = <100000>;
+				qcom,supply-disable-load = <100>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/iio/imu/invn-icm20602.txt b/Documentation/devicetree/bindings/iio/imu/invn-icm20602.txt
new file mode 100644
index 0000000..27c304e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/imu/invn-icm20602.txt
@@ -0,0 +1,28 @@
+The ICM20602 sensor is 6-axis gyroscope+accelerometer combo
+device which is made by InvenSense Inc.
+
+Required properties:
+
+ - compatible	: Should be "invensense,icm20602".
+ - reg	: the I2C address which depends on the AD0 pin.
+ - interrupt-parent	: should be the phandle for the interrupt controller
+ - interrupts	: interrupt mapping for GPIO IRQ
+
+Optional properties:
+ - invensense,icm20602-gpio:  the irq gpio. This is not used if
+ using SMD to trigger. this is needed only if using the
+ device IRQ to trigger. Only using SMD to trigger can
+ support 8K sample rate.
+
+Example:
+	icm20602-i2c@068 {
+		compatible = "invensense,icm20602";
+		reg = <0x68>;
+		interrupt-parent = <&msm_gpio>;
+		interrupts = <13 0>;
+		invensense,icm20602-gpio = <&msm_gpio 13 0x0>;
+		pinctrl-names = "imu_active","imu_suspend";
+		pinctrl-0 = <&imu_int_active>;
+		pinctrl-1 = <&imu_int_suspend>;
+		status = "okay";
+	};
diff --git a/Documentation/devicetree/bindings/input/misc/stmvl53l0x.txt b/Documentation/devicetree/bindings/input/misc/stmvl53l0x.txt
new file mode 100644
index 0000000..bf10122
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/misc/stmvl53l0x.txt
@@ -0,0 +1,36 @@
+STM VL53L0X Time-of-Flight ranging and gesture detection sensor driver
+
+Description:
+
+The VL53L0X is a new generation Time-of-Flight
+(ToF) laser-ranging module housed in the
+smallest package on the market today, providing
+accurate distance measurement whatever the
+target reflectances unlike conventional
+technologies. It can measure absolute distances
+up to 2m, setting a new benchmark in ranging
+performance levels, opening the door to various
+new applications.
+The VL53L0X integrates a leading-edge SPAD
+array (Single Photon Avalanche Diodes) and
+embeds ST’s second generation FlightSenseTM
+patented technology.
+The VL53L0X’s 940 nm VCSEL emitter (Vertical
+Cavity Surface-Emitting Laser), is totally invisible
+to the human eye, coupled with internal physical
+infrared filters, it enables longer ranging
+distances, higher immunity to ambient light, and
+better robustness to cover glass optical crosstalk.
+
+Required properties:
+
+ - compatible		: Should be "st,stmvl53l0".
+ - reg			: i2c slave address of the device.
+
+Example:
+	i2c@f9925000 {
+		vl53l0x@52 {
+			compatible = "st,stmvl53l0";
+			reg = <0x52>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/himax.txt b/Documentation/devicetree/bindings/input/touchscreen/himax.txt
index 258ffec..b54c859 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/himax.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/himax.txt
@@ -3,3 +3,20 @@
 Required properties:
 
  - compatible			: Should be "himax,hxcommon"
+ - reg					: i2c slave address of the device.
+ - interrupt-parent		: parent of interrupt.
+ - himax,irq-gpio       : irq gpio.
+ - himax,reset-gpio		: reset gpio.
+ - vdd-supply			: Power supply needed to power up the device.
+ - avdd-supply			: Power source required to power up i2c bus.
+ - himax,panel-coords 	: panel coordinates for the chip in pixels.
+						  It is a four tuple consisting of min x,
+						  min y, max x and max y values.
+ - himax,display-coords : display coordinates in pixels. It is a four
+						  tuple consisting of min x, min y, max x and
+						  max y values
+
+Optional properties:
+ - himax,3v3-gpio		: gpio acting as 3.3 v supply.
+ - report_type			: Multi-touch protocol type. Default 0.
+						  0 for protocol A, 1 for protocol B.
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
index 87a551ba..885be72 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
@@ -24,6 +24,7 @@
  - synaptics,reset-on-state      : reset gpio active state.
  - synaptics,power-on-state      : power switch active state.
  - synaptics,ub-i2c-addr	 : microbootloader mode I2C slave address.
+ - synaptics,do-not-disable-regulators	   : If specified, regulators cannot be disabled/enabled during suspend/resume.
  - synaptics,cap-button-codes  : virtual key code mappings to be used.
  - synaptics,vir-button-codes  : virtual key code and the response region on panel.
  - synaptics,wakeup-gestures-en: enable wakeup gestures.
@@ -33,6 +34,7 @@
  - synaptics,reset-active-ms	   : reset active duration for controller (ms), default 100.
  - synaptics,power-delay-ms	   : power delay for controller (ms), default 100.
  - synaptics,max-y-for-2d	   : maximal y value of the panel.
+ - synaptics,bus-lpm-cur-uA	   : I2C bus idle mode current setting.
  - synaptics,swap-axes		   : specify whether to swap axes.
  - synaptics,resume-in-workqueue	: specify whether to defer the resume to workqueue.
  - clock-names			: Clock names used for secure touch. They are: "iface_clk", "core_clk"
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 4c29cda..fdc3418 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -57,6 +57,10 @@
   occupied by the redistributors. Required if more than one such
   region is present.
 
+- ignored-save-restore-irqs: Array of u32 elements, specifying the interrupts
+  which are ignored while doing gicd save/restore. Maximum of 10 elements
+  is supported at present.
+
 Sub-nodes:
 
 PPI affinity can be expressed as a single "ppi-partitions" node,
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 4839df4..23e5f20 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -87,11 +87,6 @@
 		When qcom,enable-static-cb is selected, indicates which
 		iommu context banks may be used by HLOS.
 
-- qcom,hibernation-support:
-		A boolean, indicates that hibernation should be supported and
-		all secure usecases should be disabled, since they cannot be
-		restored properly.
-
 - qcom,skip-init : Disable resetting configuration for all context banks
                   during device reset.  This is useful for targets where
                   some context banks are dedicated to other execution
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
index 1a76d5d..258504e 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
@@ -116,6 +116,11 @@
   Definition: Short circuit debounce cycles for internal PWM.
 		Allowed values: 0, 8, 16 or 32.
 
+- vcc_pon-supply
+  Usage:      optional
+  Value type: <phandle>
+  Definition: PON driver regulator required to force MBG_ON
+
 Following properties are specific only to LRA vibrators.
 
 - qcom,lra-auto-mode
diff --git a/Documentation/devicetree/bindings/mcd/mcd.txt b/Documentation/devicetree/bindings/mcd/mcd.txt
deleted file mode 100644
index 4077ee2..0000000
--- a/Documentation/devicetree/bindings/mcd/mcd.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* MCD (MobiCore Driver)
-
-t-base is an operating system running in the secure world (TrustZone).
-The t-base implementation consists of several components in the
-secure world and the non-secure world (kernel and user space). The
-MobiCore driver communicates with the t-base operating system that
-exists in TrustZone.
-
-Required properties:
-  - compatible: Should be "qcom,mcd"
-  - qcom,ce-hw-instance: should contain crypto HW instance
-  - qcom,ce-device: Device number
-  - clocks: Array of <clock_controller_phandle clock_reference> listing
-            all the clocks that are accesed by this subsystem.
-  - qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
-
-Example:
-	mcd {
-		compatible = "qcom,mcd";
-		qcom,ce-hw-instance = <0>;
-		qcom,ce-device = <0>;
-		clocks = <&clock_gcc clk_crypto_clk_src>,
-			 <&clock_gcc clk_gcc_crypto_clk>,
-			 <&clock_gcc clk_gcc_crypto_ahb_clk>,
-			 <&clock_gcc clk_gcc_crypto_axi_clk>;
-		clock-names = "core_clk_src", "core_clk",
-				"iface_clk", "bus_clk";
-		qcom,ce-opp-freq = <100000000>;
-	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
index 66eaae1..e1e486f 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -71,6 +71,11 @@
   Value type: <u32>
   Definition: CAM HW Version information.
 
+- camnoc-axi-min-ib-bw
+  Usage: optional
+  Value type: <u64>
+  Definition: Min camnoc axi bw for the given target.
+
 - regulator-names
   Usage: required
   Value type: <string>
diff --git a/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt b/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt
new file mode 100644
index 0000000..233c7cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt
@@ -0,0 +1,85 @@
+Fingerprint Cards AB. Fpc1028 driver
+
+The fpc1028 fingerprint sensor is connected to the host processor via SPI.
+The sensor will generates interrupts when the user touches the sensor.
+The host controller is expected to read data over SPI and pass the data to
+the rest of the system.
+
+This binding document describes the properties for this module.
+
+Properties:
+
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: It must be "fpc,fpc1020"
+
+- interrupts
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Peripheral interrupt specifier.
+
+- interrupt-parent
+  Usage:      required
+  Value type: <phandle>
+  Definition: phandle of the interrupt controller which services the
+  summary interrupt.
+
+- fpc,gpio_rst
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: GPIO which connecting to the reset pin of fpc1028
+
+- fpc,gpio_irq
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Specifies the GPIO which connecting to the irq pin of fpc1028.
+
+- vcc_spi-supply
+  Usage:      required
+  Value type: <phandle>
+  Definition: The phandle of the regulator which supplies fpc1028 spi bus core.
+
+- vcc_io-supply
+  Usage:      required
+  Value type: <phandle>
+  Definition: The phandle of the regulator which supplies fpc1028 io pins.
+
+- vcc_ana-supply
+  Usage:      required
+  Value type: <phandle>
+  Definition: The phandle of the regulator which supplies fpc1028 analog circuit.
+
+- pinctrl-names:
+  Usage:      required
+  Value type: <string>
+  Definition: Pinctrl state names for each pin group configuration.
+  eg:"fpc1020_reset_reset", "fpc1020_reset_active", "fpc1020_irq_active".
+  refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+
+- pinctrl-n:
+  Usage:      required
+  Value type: <string>
+  Definition: pinctrl state for each pin group
+  refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+
+
+Example:
+
+	fpc1020 {
+			compatible = "fpc,fpc1020";
+			interrupt-parent = <&tlmm>;
+			interrupts = <48 0>;
+			fpc,gpio_rst = <&tlmm 124 0x0>;
+			fpc,gpio_irq = <&tlmm 48 0>;
+			vcc_spi-supply = <&pm8953_l5>;
+			vdd_io-supply  = <&pm8953_l5>;
+			vdd_ana-supply = <&pm8953_l5>;
+			fpc,enable-on-boot;
+			pinctrl-names = "fpc1020_reset_reset",
+					"fpc1020_reset_active",
+					"fpc1020_irq_active";
+			pinctrl-0 = <&msm_gpio_124>;
+			pinctrl-1 = <&msm_gpio_124_output_high>;
+			pinctrl-2 = <&msm_gpio_48>;
+		};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
index caf297b..c28d4eb8 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
@@ -35,6 +35,15 @@
 - ti,palmas-enable-dvfs2: Enable DVFS2. Configure pins for DVFS2 mode.
 	Selection primary or secondary function associated to GPADC_START
 	and SYSEN2 pin/pad for DVFS2 interface
+- ti,palmas-override-powerhold: This is applicable for PMICs for which
+	GPIO7 is configured in POWERHOLD mode which has higher priority
+	over DEV_ON bit and keeps the PMIC supplies on even after the DEV_ON
+	bit is turned off. This property enables driver to over ride the
+	POWERHOLD value to GPIO7 so as to turn off the PMIC in power off
+	scenarios. So for GPIO7 if ti,palmas-override-powerhold is set
+	then the GPIO_7 field should never be muxed to anything else.
+	It should be set to POWERHOLD by default and only in case of
+	power off scenarios the driver will over ride the mux value.
 
 This binding uses the following generic properties as defined in
 pinctrl-bindings.txt:
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 8b63075..7652aa4 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -87,6 +87,8 @@
                                 need to be unlocked by TZ.
 - qcom,ipa-uc-monitor-holb:   	Boolean context flag to indicate whether
                                 monitoring of holb via IPA uc is required.
+- qcom,wlan-ce-db-over-pcie: Boolean context flag to represent WLAN CE DB
+				over pcie bus or not.
 
 IPA pipe sub nodes (A2 static pipes configurations):
 
@@ -118,6 +120,10 @@
     controller phandle and "clk_ipa_clk" as macro for "iface_clk"
 - clock-names: This property shall contain the clock input names used
     by driver in same order as the clocks property.This should be "iface_clk"
+- emulator-bar0-offset: Specifies the offset, within PCIe BAR0, where
+    IPA/GSI programmable registers reside.  This property is used only
+    with the IPA/GSI emulation system, which is connected to and
+    communicated with via PCIe.
 
 IPA SMMU sub nodes
 
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
index f6a7a1b..1e44686 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
@@ -109,6 +109,10 @@
 					this. If this property is not specified,
 					low battery voltage threshold will be
 					configured to 4200 mV.
+- qcom,fg-rconn-mohm:			Battery connector resistance (Rconn) in
+					milliohms. If Rconn is specified, then
+					Rslow values will be updated to account
+					it for an accurate ESR.
 - qcom,cycle-counter-en:		Boolean property which enables the cycle
 					counter feature. If this property is
 					present, then the following properties
@@ -143,6 +147,14 @@
 					battery voltage shadow and the current
 					predicted voltage in uV to initiate
 					capacity learning.
+- qcom,cl-max-limit-deciperc:		The maximum percent that the capacity
+					cannot go above during any capacity
+					learning cycle. This property is in the
+					unit of .1% increments.
+- qcom,cl-min-limit-deciperc:		The minimum percent that the capacity
+					cannot go below during any capacity
+					learning cycle. This property is in the
+					unit of .1% increments.
 - qcom,capacity-estimation-on:		A boolean property to have the fuel
 					gauge driver attempt to estimate the
 					battery capacity using battery
@@ -178,6 +190,97 @@
 					settings will be different from default.
 					Once SOC crosses 5%, ESR pulse timings
 					will be restored back to default.
+- qcom,fg-control-slope-limiter:	A boolean property to specify if SOC
+					slope limiter coefficients needs to
+					be modified based on charging status
+					and battery temperature threshold.
+- qcom,fg-slope-limit-temp-threshold:	Temperature threshold in decidegC used
+					for applying the slope coefficient based
+					on charging status and battery
+					temperature. If this property is not
+					specified, a default value of 100 (10C)
+					will be applied by default.
+- qcom,fg-slope-limit-low-temp-chg:	When the temperature goes below the
+					specified temperature threshold and
+					battery is charging, slope coefficient
+					specified with this property will be
+					applied. If this property is not
+					specified, a default value of 45 will be
+					applied.
+- qcom,fg-slope-limit-low-temp-dischg:	Same as "qcom,fg-slope-limit-low-temp-chg"
+					except this is when the battery is
+					discharging.
+- qcom,fg-slope-limit-high-temp-chg:	When the temperature goes above the
+					specified temperature threshold and
+					battery is charging, slope coefficient
+					specified with this property will be
+					applied. If this property is not
+					specified, a default value of 2 will be
+					applied.
+- qcom,fg-slope-limit-high-temp-dischg:	Same as "qcom,fg-slope-limit-high-temp-chg"
+					except this is when the battery is
+					discharging.
+- qcom,fg-dischg-voltage-gain-ctrl:	A boolean property to specify if the
+					voltage gain needs to be modified
+					during discharging based on monotonic
+					soc.
+- qcom,fg-dischg-voltage-gain-soc:	Array of monotonic SOC threshold values
+					to change the voltage gain settings
+					during discharge. This should be defined
+					in the ascending order and in the range
+					of 0-100. Array limit is set to 3.
+					If qcom,fg-dischg-voltage-gain-ctrl is
+					set, then this property should be
+					specified to apply the gain settings.
+- qcom,fg-dischg-med-voltage-gain:	Array of voltage gain values that needs
+					to be applied to medC voltage gain when
+					the monotonic SOC goes below the SOC
+					threshold specified under
+					qcom,fg-dischg-voltage-gain-soc. Array
+					limit is set to 3.
+					If qcom,fg-dischg-voltage-gain-ctrl is
+					set, then this property should be
+					specified to apply the gain setting.
+- qcom,fg-dischg-high-voltage-gain:	Array of voltage gain values that needs
+					to be applied to highC voltage gain when
+					the monotonic SOC goes below the SOC
+					threshold specified under
+					qcom,fg-dischg-voltage-gain-soc. Array
+					limit is set to 3.
+					If qcom,fg-dischg-voltage-gain-ctrl is
+					set, then this property should be
+					specified to apply the gain setting.
+- qcom,fg-use-vbat-low-empty-soc:	A boolean property to specify whether
+					vbatt-low interrupt is used to handle
+					empty battery condition. If this is
+					not specified, empty battery condition
+					is detected by empty-soc interrupt.
+- qcom,fg-batt-temp-low-limit:		Battery temperature (in decidegC) low
+					limit  which will be used to validate
+					the battery temperature reading from FG.
+					If the battery temperature goes below
+					this limit, last read good temperature
+					will be notified to userspace. If this
+					limit is not specified, then the
+					default limit would be -60C.
+- qcom,fg-batt-temp-high-limit:		Battery temperature (in decidegC) high
+					limit  which will be used to validate
+					the battery temperature reading from FG.
+					If the battery temperature goes above
+					this limit, last read good temperature
+					will be notified to userspace. If this
+					limit is not specified, then the
+					default limit would be 150C.
+- qcom,fg-cc-soc-limit-pct:		Percentage of CC_SOC before resetting
+					FG and restore the full CC_SOC value.
+- qcom,fg-restore-batt-info:		A boolean property to specify whether
+					battery parameters needs to be
+					restored. If this feature is enabled,
+					then validating the battery parameters
+					by OCV/battery SOC, validation range
+					in percentage should be specified via
+					appropriate module parameters to make
+					it work properly.
 
 qcom,fg-soc node required properties:
 - reg : offset and length of the PMIC peripheral register map.
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
index afeb65d..0da71a3 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
@@ -262,6 +262,45 @@
 		    capacity learning cycle. If this is not specified, then
 		    the default value is 0. Unit is in decipercentage.
 
+- qcom,esr-disable
+	Usage:      optional
+	Value type: <bool>
+	Definition: Boolean property to disable ESR estimation. If not defined
+		    ESR estimation stays enabled for charge-cycles.
+
+- qcom,esr-discharge-enable
+	Usage:      optional
+	Value type: <bool>
+	Definition: Boolean property to enable ESR estimation during discharge.
+		    Only valid if 'qcom,esr-disable' is not defined.
+
+- qcom,esr-qual-current-ua
+	Usage:      optional
+	Value type: <u32>
+	Definition: Minimum current differential in uA to qualify an ESR
+		    reading as valid. If not defined the value defaults
+		    to 130mA.
+
+- qcom,esr-qual-vbatt-uv
+	Usage:      optional
+	Value type: <u32>
+	Definition: Minimum vbatt differential in uV to qualify an ESR
+		    reading as valid. If not defined the value defaults
+		    to 7mV.
+
+- qcom,esr-disable-soc
+	Usage:      optional
+	Value type: <u32>
+	Definition: Minimum battery SOC below which ESR will not be
+		    attempted by QG. If not defined the value defaults
+		    to 10%.
+
+- qcom,qg-ext-sns
+	Usage:      optional
+	Value type: <bool>
+	Definition: Boolean property to support external-rsense based
+		    configuration.
+
 ==========================================================
 Second Level Nodes - Peripherals managed by QGAUGE driver
 ==========================================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index afa8009..9de24c3 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -193,6 +193,12 @@
 		to be get from these properties defined in battery profile:
 		qcom,step-chg-ranges, qcom,jeita-fcc-ranges, qcom,jeita-fv-ranges.
 
+- qcom,disable-stat-sw-override
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present disables STAT pin default software
+		override configuration.
+
 =============================================
 Second Level Nodes - SMB2 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
index 8ee2749..65dcf59 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
@@ -201,6 +201,25 @@
 	Definition: Phandle for the VADC node, it is used to obtain USBIN_V
 		    and USBIN_I readings on PMIC632 based platform.
 
+- qcom,hw-die-temp-mitigation
+	Usage:      optional
+	Value type: bool
+	Definition: Boolean flag which when present enables h/w based thermal
+		    mitigation.
+
+- qcom,hw-connector-mitigation
+	Usage:      optional
+	Value type: bool
+	Definition: Boolean flag which when present enables h/w based
+		    connector temperature mitigation.
+
+- qcom,connector-internal-pull-kohm
+	Usage:      optional
+	Value type: <u32>
+	Definition: Specifies internal pull-up configuration to be applied to
+		    connector THERM, only valid values are (0/30/100/400).
+		    If not specified 100K is used as default pull-up.
+
 =============================================
 Second Level Nodes - SMB5 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
index 4f12ec0..528c285 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
@@ -51,6 +51,18 @@
 	      stacked one after the other and thus all the charge current
 	      (FCC) flows through main. In a non-stacked configuration each
 	      charger controls the charge current (FCC) separately.
+
+- qcom,die-temp-threshold-degc
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies DIE temp threshold beyond which h/w starts mitigation.
+	      If not sepcified, 90 degrees centigrade is used.
+
+- qcom,hw-die-temp-mitigation
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean property to enable h/w controlled die temp mitigation.
+
 ================================================
 Second Level Nodes - SMB1355 Charger Peripherals
 ================================================
diff --git a/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt b/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
index ddd90e1..2a84dd6 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
@@ -20,7 +20,8 @@
 	Value type: <string>
 	Definition: The name of the register defined in the reg property.
 		      It must have "lpg-base", "lut-base" is optional but
-		      it's required if any LPG channels support LUT mode.
+		      it's required if any LPG channels support LUT mode
+		      with a LUT module.
 
 - #pwm-cells:
 	Usage: required
@@ -30,14 +31,47 @@
 		      the PWM channel ID indexed from 0, and the second
 		      cell is the PWM default period in nanoseconds.
 
+- nvmem-names:
+	Usage: optional
+	Value type: <string>
+	Definition: The nvmem device name for the SDAM module where the LUT
+		      pattern is stored. It must be "ppg_sdam". This property
+		      is required only when LUT mode is supported with a SDAM
+		      module instead of a LUT module.
+
+- nvmem:
+	Usage: optional
+	Value type: <phandle>
+	Definition: Phandle of the nvmem device to access the LUT stored
+		      in the SDAM module. This property is required only when
+		      LUT mode is supported and the LUT pattern is stored in a
+		      SDAM module instead of a LUT module.
+
+- qcom,pbs-client
+	Usage: optional
+	Value type: <phandle>
+	Definition: Phandle of the PBS client used for sending the PBS
+		      trigger. This property is required when LUT mode is
+		      supported and the LUT pattern is stored in a SDAM
+		      module instead of a LUT module.
+
+- qcom,lut-sdam-base:
+	Usage: optional
+	Value type: <u32>
+	Definition: The register base of the LUT entries stored in SDAM. This
+		      property is required only when LUT mode is supported and
+		      the LUT pattern is stored in a SDAM module instead of a
+		      LUT module.
+
 - qcom,lut-patterns:
 	Usage: optional
 	Value type: <prop-encoded-array>
 	Definition: Duty ratios in percentages for LPG working at LUT mode.
 		      These duty ratios will be translated into PWM values
-		      and stored in LUT module. The LUT module has resource
-		      to store 47 PWM values at max and shared for all LPG
-		      channels. This property is required if any LPG channels
+		      and stored in LUT or SDAM module shared for all LPG
+		      channels. The LUT module has resource to store 47 PWM
+		      values at max while SDAM module can store upto 64 PWM
+		      values. This property is required if any LPG channels
 		      support LUT mode.
 
 Subnode is optional if LUT mode is not required, it's required if any LPG
@@ -54,31 +88,37 @@
 		      range is 1 - 8. Maximum value depends on the number of
 		      channels supported on PMIC.
 
+- qcom,lpg-sdam-base:
+	Usage: optional
+	Value type: <u32>
+	Definition: Register base address for LPG configuration in SDAM for
+		      the LPG channel specified under "qcom,lpg-chan-id".
+		      This property is required if LUT mode is supported with
+		      a SDAM module.
+
 - qcom,ramp-step-ms:
 	Usage: required
 	Value type: <u32>
 	Definition: The step duration in milliseconds for LPG staying at each
-		      duty specified in the LUT pattern. Allowed range is
-		      1 - 511.
+		      duty specified in the LUT pattern. Allowed range:
+		      1 - 511 when LUT module is used, and 8 - 2000 when SDAM
+		      is used.
 
 - qcom,ramp-high-index:
 	Usage: required
 	Value type: <u32>
 	Definition: The high index of the LUT pattern where LPG ends up
-		      ramping to. Allowed range is 1 - 47.
+		      ramping to. Allowed range: 1 - 47 when LUT module
+		      is used, and 1 - 64 when SDAM module is used.
 
 - qcom,ramp-low-index:
 	Usage: required
 	Value type: <u32>
 	Definition: The low index of the LUT pattern from where LPG begins
-		      ramping from. Allowed range is 0 - 46.
-
-- qcom,ramp-from-low-to-high:
-	Usage: optional
-	Value type: <empty>
-	Definition: The flag to specify the LPG ramping direction. The ramping
-		      direction is from low index to high index of the LUT
-		      pattern if it's specified.
+		      ramping from. The ramp-low-index should be always less
+		      than ramp-high-index when SDAM module is used. Allowed
+		      range: 0 - 46 when LUT module is used, and 0 - 63 when
+		      SDAM module is used.
 
 - qcom,ramp-pattern-repeat:
 	Usage: optional
@@ -86,34 +126,66 @@
 	Definition: The flag to specify if LPG would be ramping with the LUT
 		      pattern repeatedly.
 
+- qcom,ramp-from-low-to-high:
+	Usage: optional
+	Value type: <empty>
+	Definition: The flag to specify the LPG ramping direction. The ramping
+		      direction is from low index to high index of the LUT
+		      pattern if it's specified. This property is not required
+		      when SDAM module is used.
+
 - qcom,ramp-toggle:
 	Usage: optional
 	Value type: <empty>
 	Definition: The flag to specify if LPG would toggle the LUT pattern
 		      in ramping. If toggling enabled, LPG would return to the
 		      low index when high index is reached, or return to the high
-		      index when low index is reached.
+		      index when low index is reached. This property is not
+		      required when SDAM module is used.
 
 - qcom,ramp-pause-hi-count:
 	Usage: optional
 	Value type: <u32>
 	Definition: The step count that LPG stop the output when it ramped up
-		      to the high index of the LUT.
+		      to the high index of the LUT. This property is not
+		      required when SDAM module is used.
 
 - qcom,ramp-pause-lo-count:
 	Usage: optional
 	Value type: <u32>
 	Definition: The step count that LPG stop the output when it ramped up
-		      to the low index of the LUT.
-Example:
+		      to the low index of the LUT. This property is not
+		      required when SDAM module is used.
 
-	pmi8998_lpg: lpg@b100 {
+Example when LUT pattern is stored in a LUT module:
+
+	pm8150l_lpg: lpg@b100 {
 		compatible = "qcom,pwm-lpg";
 		reg = <0xb100 0x600>, <0xb000 0x100>;
 		reg-names = "lpg-base", "lut-base";
 		#pwm-cells = <2>;
 		qcom,lut-patterns = <0 14 28 42 56 70 84 100
 					100 84 70 56 42 28 14 0>;
+		lpg@1 {
+			qcom,lpg-chan-id = <1>;
+			qcom,ramp-step-ms = <200>;
+			qcom,ramp-pause-hi-count = <10>;
+			qcom,ramp-pause-lo-count = <10>;
+			qcom,ramp-low-index = <0>;
+			qcom,ramp-high-index = <15>;
+			qcom,ramp-from-low-to-high;
+			qcom,ramp-pattern-repeat;
+		};
+		lpg@2 {
+			qcom,lpg-chan-id = <2>;
+			qcom,ramp-step-ms = <200>;
+			qcom,ramp-pause-hi-count = <10>;
+			qcom,ramp-pause-lo-count = <10>;
+			qcom,ramp-low-index = <0>;
+			qcom,ramp-high-index = <15>;
+			qcom,ramp-from-low-to-high;
+			qcom,ramp-pattern-repeat;
+		};
 		lpg@3 {
 			qcom,lpg-chan-id = <3>;
 			qcom,ramp-step-ms = <200>;
@@ -124,24 +196,43 @@
 			qcom,ramp-from-low-to-high;
 			qcom,ramp-pattern-repeat;
 		};
-		lpg@4 {
-			qcom,lpg-chan-id = <4>;
+	};
+
+Example when LUT pattern is stored in a SDAM module:
+
+	pmi632_lpg: lpg@b100 {
+		compatible = "qcom,pwm-lpg";
+		reg = <0xb100 0x600>;
+		reg-names = "lpg-base";
+		#pwm-cells = <2>;
+		nvmem-names = "ppg_sdam";
+		nvmem = <&sdam7>;
+		qcom,pbs-client = <&pbs_client_3>;
+		qcom,lut-sdam-base = <0x80>;
+		qcom,lut-patterns = <0 14 28 42 56 70 84 100
+					100 84 70 56 42 28 14 0>;
+		lpg@1 {
+			qcom,lpg-chan-id = <1>;
 			qcom,ramp-step-ms = <200>;
-			qcom,ramp-pause-hi-count = <10>;
-			qcom,ramp-pause-lo-count = <10>;
 			qcom,ramp-low-index = <0>;
 			qcom,ramp-high-index = <15>;
-			qcom,ramp-from-low-to-high;
 			qcom,ramp-pattern-repeat;
+			qcom,lpg-sdam-base = <0x48>:
 		};
-		lpg@5 {
-			qcom,lpg-chan-id = <5>;
+		lpg@2 {
+			qcom,lpg-chan-id = <2>;
 			qcom,ramp-step-ms = <200>;
-			qcom,ramp-pause-hi-count = <10>;
-			qcom,ramp-pause-lo-count = <10>;
 			qcom,ramp-low-index = <0>;
 			qcom,ramp-high-index = <15>;
-			qcom,ramp-from-low-to-high;
 			qcom,ramp-pattern-repeat;
+			qcom,lpg-sdam-base = <0x56>;
+		};
+		lpg@3 {
+			qcom,lpg-chan-id = <3>;
+			qcom,ramp-step-ms = <200>;
+			qcom,ramp-low-index = <0>;
+			qcom,ramp-high-index = <15>;
+			qcom,ramp-pattern-repeat;
+			qcom,lpg-sdam-base = <0x64>;
 		};
 	};
diff --git a/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt b/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt
index 846bd22..b02d759 100644
--- a/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt
@@ -241,6 +241,15 @@
 		    time in order to allow hardware closed-loop CPR voltage
 		    change requests to reach the PMIC regulator.
 
+- qcom,cpr-thread-has-always-vote-en
+	Usage:      optional; only meaningful for CPR4 controller
+	Value type: <empty>
+	Definition: Boolean value which indicates that the CPR controller should
+		    be configured to keep thread vote always enabled. This
+		    configuration allows the CPR controller to not consider
+		    MID/DN recommendations from other thread when all sensors
+		    mapped to a thread collapsed.
+
 =================================================
 Second Level Nodes - CPR Threads for a Controller
 =================================================
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
index 803df6f..6fae8b0 100644
--- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -61,6 +61,12 @@
 			in conjunction with "hw-ctrl-addr".
  - qcom,toggle-sw-collapse-in-disable: If set, SW_COLLAPSE bit is toggled
 			in disable call.
+ - qcom,en-few-wait-val: Input value for EN_FEW_WAIT controls state transition
+			 delay after receiving ack signal (gds_enf_ack) from the
+			 longest en_few power switch chain.
+ - qcom,en-rest-wait-val: Input value for EN_REST_WAIT controls state transition
+			  delay after receiving ack signal (gds_enr_ack) from the
+			  longest en_rest power switch chain.
 
 Example:
 	gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 7b8ae6f..20cb25b 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -300,6 +300,11 @@
  - compatible : "qcom,msm-audio-apr"
 		This device is added to represent APR module.
 
+ - qcom,subsys-name: This value provides the subsystem name where codec
+		is present. It can be "apr_modem" or "apr_adsp". This
+		property enable apr driver to receive subsystem up/down
+		notification from modem/adsp.
+
 Optional properties:
 
  - compatible : "qcom,msm-audio-apr-dummy"
@@ -434,6 +439,9 @@
 Required properties:
 
  - compatible :                            "qcom,wcd-dsp-glink"
+ - qcom,msm-codec-glink-edge:              Name of the glink edge which is used
+                                           for IPC.
+                                           If no name is set, it defaults to "wdsp"
 
 * msm_ext_disp_audio_codec_rx
 
@@ -661,6 +669,8 @@
 		msm_audio_apr_dummy {
 			compatible = "qcom,msm-audio-apr-dummy";
 		};
+
+		qcom,subsys-name = "apr_adsp";
 	};
 
 	qcom,msm-ocmem-audio {
@@ -722,6 +732,7 @@
 
 	wcd_dsp_glink {
 		compatible = "qcom,wcd-dsp-glink";
+		qcom,msm-codec-glink-edge = "bg";
 	};
 
 	msm_ext_disp_audio_codec_rx {
@@ -1180,6 +1191,18 @@
 				When clock rate is set to zero,
 				then external clock is assumed.
 
+ - qcom,msm-cpudai-tdm-afe-ebit-unsupported: Notify if ebit
+				setting is needed.When this is
+				set, along with clock rate as
+				zero, then afe is not configured
+				for clock.
+
+ - qcom,msm-cpudai-tdm-sec-port-enable: For chipsets with the
+				limitation where we need to enable
+				both RX and TX AFE ports, this flag
+				is used to enable TX/RX port for
+				RX/TX streams.
+
  - qcom,msm-cpudai-tdm-clk-internal: Clock Source.
 				0 - EBIT clock from clk tree
 				1 - IBIT clock from clk tree
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
index 866d004..8b6d5a2 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
@@ -28,6 +28,7 @@
 Optional properties:
 - qcom,rt:	Specifies if the framework worker thread for this
 		controller device should have "real-time" priority.
+- qcom,disable-autosuspend: Specifies to disable runtime PM auto suspend.
 
 SPI slave nodes must be children of the SPI master node and can contain
 properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
index 6ff6e9b..f25691a 100644
--- a/Documentation/devicetree/bindings/thermal/tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -20,6 +20,7 @@
 	       should be "qcom,sdm845-tsens" for SDM845 TSENS driver.
 	       should be "qcom,tsens24xx" for 2.4 TSENS controller.
 	       should be "qcom,msm8937-tsens" for 8937 TSENS driver.
+	       should be "qcom,msm8909-tsens" for 8909 TSENS driver.
 	       The compatible property is used to identify the respective controller to use
 	       for the corresponding SoC.
 - reg : offset and length of the TSENS registers with associated property in reg-names
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 7f79f40..f075a2a 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -74,6 +74,7 @@
 			  Defaults to 26 MHz if not specified.
 - extcon:       phandle to external connector (Refer Documentation/devicetree/bindings/extcon/extcon-gpio.txt for more details).
 - non-removable		: defines if the connected ufs device is not removable
+- force-ufshc-probe	: For force probing UFS device (non removable) even if it is not the boot device.
 
 
 Note: If above properties are not defined it can be assumed that the supply
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 5256edd..431c32a 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -189,6 +189,8 @@
 	point to external connector device, which provide "USB-HOST" cable events.
 	A single phandle may be specified if a single connector device provides
 	both "USB" and "USB-HOST" events.
+- qcom,phy-id-high-as-peripheral: If present, specifies device to switch to device mode
+	if PHY ID state is high or host mode if PHY ID state is low.
 
 Example HSUSB OTG controller device node :
 	usb@f9690000 {
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index b880890..c28b05b 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -191,6 +191,8 @@
  - qcom,tune2-efuse-correction: The value to be adjusted from fused value for
    improved rise/fall times.
  - qcom,host-chirp-erratum: Indicates host chirp fix is required.
+ - qcom,override-bias-ctrl2: Indicates override is done from driver for
+   BIAS_CTRL2 register.
  - nvmem-cells: specifies the handle to represent the SoC revision.
    usually it is defined by qfprom device node.
  - nvmem-cell-names: specifies the given nvmem cell name as defined in
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index e1be5fd..a22edb5 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -103,6 +103,7 @@
 fcs	Fairchild Semiconductor
 firefly	Firefly
 focaltech	FocalTech Systems Co.,Ltd
+fpc	Fingerprint Cards AB.
 friendlyarm	Guangzhou FriendlyARM Computer Tech Co., Ltd
 fsl	Freescale Semiconductor
 ge	General Electric Company
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index e7fb61e..193a034 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -171,6 +171,25 @@
 offprjjquota           Turn off project journelled quota.
 quota                  Enable plain user disk quota accounting.
 noquota                Disable all plain disk quota option.
+whint_mode=%s          Control which write hints are passed down to block
+                       layer. This supports "off", "user-based", and
+                       "fs-based".  In "off" mode (default), f2fs does not pass
+                       down hints. In "user-based" mode, f2fs tries to pass
+                       down hints given by users. And in "fs-based" mode, f2fs
+                       passes down hints with its policy.
+alloc_mode=%s          Adjust block allocation policy, which supports "reuse"
+                       and "default".
+fsync_mode=%s          Control the policy of fsync. Currently supports "posix",
+                       "strict", and "nobarrier". In "posix" mode, which is
+                       default, fsync will follow POSIX semantics and does a
+                       light operation to improve the filesystem performance.
+                       In "strict" mode, fsync will be heavy and behaves in line
+                       with xfs, ext4 and btrfs, where xfstest generic/342 will
+                       pass, but the performance will regress. "nobarrier" is
+                       based on "posix", but doesn't issue flush command for
+                       non-atomic files likewise "nobarrier" mount option.
+test_dummy_encryption  Enable dummy encryption, which provides a fake fscrypt
+                       context. The fake fscrypt context is used by xfstests.
 
 ================================================================================
 DEBUGFS ENTRIES
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 0b8f21f..435a509 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2662,6 +2662,9 @@
 
 	noalign		[KNL,ARM]
 
+	noaltinstr	[S390] Disables alternative instructions patching
+			(CPU alternatives feature).
+
 	noapic		[SMP,APIC] Tells the kernel to not make use of any
 			IOAPICs that may be present in the system.
 
@@ -2718,6 +2721,9 @@
 			allow data leaks with this option, which is equivalent
 			to spectre_v2=off.
 
+	nospec_store_bypass_disable
+			[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
+
 	noxsave		[BUGS=X86] Disables x86 extended register state save
 			and restore using xsave. The kernel will fallback to
 			enabling legacy floating-point and sse state.
@@ -3992,6 +3998,48 @@
 			Not specifying this option is equivalent to
 			spectre_v2=auto.
 
+	spec_store_bypass_disable=
+			[HW] Control Speculative Store Bypass (SSB) Disable mitigation
+			(Speculative Store Bypass vulnerability)
+
+			Certain CPUs are vulnerable to an exploit against a
+			a common industry wide performance optimization known
+			as "Speculative Store Bypass" in which recent stores
+			to the same memory location may not be observed by
+			later loads during speculative execution. The idea
+			is that such stores are unlikely and that they can
+			be detected prior to instruction retirement at the
+			end of a particular speculation execution window.
+
+			In vulnerable processors, the speculatively forwarded
+			store can be used in a cache side channel attack, for
+			example to read memory to which the attacker does not
+			directly have access (e.g. inside sandboxed code).
+
+			This parameter controls whether the Speculative Store
+			Bypass optimization is used.
+
+			on      - Unconditionally disable Speculative Store Bypass
+			off     - Unconditionally enable Speculative Store Bypass
+			auto    - Kernel detects whether the CPU model contains an
+				  implementation of Speculative Store Bypass and
+				  picks the most appropriate mitigation. If the
+				  CPU is not vulnerable, "off" is selected. If the
+				  CPU is vulnerable the default mitigation is
+				  architecture and Kconfig dependent. See below.
+			prctl   - Control Speculative Store Bypass per thread
+				  via prctl. Speculative Store Bypass is enabled
+				  for a process by default. The state of the control
+				  is inherited on fork.
+			seccomp - Same as "prctl" above, but all seccomp threads
+				  will disable SSB unless they explicitly opt out.
+
+			Not specifying this option is equivalent to
+			spec_store_bypass_disable=auto.
+
+			Default mitigations:
+			X86:	If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
+
 	spia_io_base=	[HW,MTD]
 	spia_fio_base=
 	spia_pedr=
diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt
new file mode 100644
index 0000000..32f3d55
--- /dev/null
+++ b/Documentation/spec_ctrl.txt
@@ -0,0 +1,94 @@
+===================
+Speculation Control
+===================
+
+Quite some CPUs have speculation-related misfeatures which are in
+fact vulnerabilities causing data leaks in various forms even across
+privilege domains.
+
+The kernel provides mitigation for such vulnerabilities in various
+forms. Some of these mitigations are compile-time configurable and some
+can be supplied on the kernel command line.
+
+There is also a class of mitigations which are very expensive, but they can
+be restricted to a certain set of processes or tasks in controlled
+environments. The mechanism to control these mitigations is via
+:manpage:`prctl(2)`.
+
+There are two prctl options which are related to this:
+
+ * PR_GET_SPECULATION_CTRL
+
+ * PR_SET_SPECULATION_CTRL
+
+PR_GET_SPECULATION_CTRL
+-----------------------
+
+PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
+the following meaning:
+
+==== ===================== ===================================================
+Bit  Define                Description
+==== ===================== ===================================================
+0    PR_SPEC_PRCTL         Mitigation can be controlled per task by
+                           PR_SET_SPECULATION_CTRL.
+1    PR_SPEC_ENABLE        The speculation feature is enabled, mitigation is
+                           disabled.
+2    PR_SPEC_DISABLE       The speculation feature is disabled, mitigation is
+                           enabled.
+3    PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
+                           subsequent prctl(..., PR_SPEC_ENABLE) will fail.
+==== ===================== ===================================================
+
+If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
+available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+misfeature will fail.
+
+PR_SET_SPECULATION_CTRL
+-----------------------
+
+PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
+in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
+PR_SPEC_FORCE_DISABLE.
+
+Common error codes
+------------------
+======= =================================================================
+Value   Meaning
+======= =================================================================
+EINVAL  The prctl is not implemented by the architecture or unused
+        prctl(2) arguments are not 0.
+
+ENODEV  arg2 is selecting a not supported speculation misfeature.
+======= =================================================================
+
+PR_SET_SPECULATION_CTRL error codes
+-----------------------------------
+======= =================================================================
+Value   Meaning
+======= =================================================================
+0       Success
+
+ERANGE  arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
+        PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
+
+ENXIO   Control of the selected speculation misfeature is not possible.
+        See PR_GET_SPECULATION_CTRL.
+
+EPERM   Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
+        tried to enable it again.
+======= =================================================================
+
+Speculation misfeature controls
+-------------------------------
+- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
+
+  Invocations:
+   * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 206c9b0..3cab335 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -64,6 +64,7 @@
 - vfs_cache_pressure
 - watermark_scale_factor
 - zone_reclaim_mode
+- want_old_faultaround_pte
 
 ==============================================================
 
@@ -891,4 +892,25 @@
 node unless explicitly overridden by memory policies or cpuset
 configurations.
 
+=============================================================
+
+want_old_faultaround_pte:
+
+By default faultaround code produces young pte. When want_old_faultaround_pte is
+set to 1, faultaround produces old ptes.
+
+During sparse file access faultaround gets more pages mapped and when all of
+them are young (default), under memory pressure, this makes vmscan swap out anon
+pages instead, or to drop other page cache pages which otherwise stay resident.
+Setting want_old_faultaround_pte to 1 avoids this.
+
+Making the faultaround ptes old can result in performance regression on some
+architectures. This is due to cycles spent in micro-faults which would take page
+walk to set young bit in the pte. One such known test that shows a regression on
+x86 is unixbench shell8. Set want_old_faultaround_pte to 1 on architectures
+which does not show this regression or if the workload shows overall performance
+benefit with old faultaround ptes.
+
+The default value is 0.
+
 ============ End of Document =================================
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 1f5eab4..e46c14f 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2118,6 +2118,9 @@
 ARM 64-bit FP registers have the following id bit patterns:
   0x4030 0000 0012 0 <regno:12>
 
+ARM firmware pseudo-registers have the following bit pattern:
+  0x4030 0000 0014 <regno:16>
+
 
 arm64 registers are mapped using the lower 32 bits. The upper 16 of
 that is the register group type, or coprocessor number:
@@ -2134,6 +2137,9 @@
 arm64 system registers have the following id bit patterns:
   0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
 
+arm64 firmware pseudo-registers have the following bit pattern:
+  0x6030 0000 0014 <regno:16>
+
 
 MIPS registers are mapped using the lower 32 bits.  The upper 16 of that is
 the register group type:
@@ -2656,7 +2662,8 @@
 	  and execute guest code when KVM_RUN is called.
 	- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
 	  Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
-	- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
+	- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
+          backward compatible with v0.2) for the CPU.
 	  Depends on KVM_CAP_ARM_PSCI_0_2.
 	- KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
 	  Depends on KVM_CAP_ARM_PMU_V3.
diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt
new file mode 100644
index 0000000..aafdab8
--- /dev/null
+++ b/Documentation/virtual/kvm/arm/psci.txt
@@ -0,0 +1,30 @@
+KVM implements the PSCI (Power State Coordination Interface)
+specification in order to provide services such as CPU on/off, reset
+and power-off to the guest.
+
+The PSCI specification is regularly updated to provide new features,
+and KVM implements these updates if they make sense from a virtualization
+point of view.
+
+This means that a guest booted on two different versions of KVM can
+observe two different "firmware" revisions. This could cause issues if
+a given guest is tied to a particular PSCI revision (unlikely), or if
+a migration causes a different PSCI version to be exposed out of the
+blue to an unsuspecting guest.
+
+In order to remedy this situation, KVM exposes a set of "firmware
+pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
+interface. These registers can be saved/restored by userspace, and set
+to a convenient value if required.
+
+The following register is defined:
+
+* KVM_REG_ARM_PSCI_VERSION:
+
+  - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
+    (and thus has already been initialized)
+  - Returns the current PSCI version on GET_ONE_REG (defaulting to the
+    highest PSCI version implemented by KVM and compatible with v0.2)
+  - Allows any PSCI version implemented by KVM and compatible with
+    v0.2 to be set with SET_ONE_REG
+  - Affects the whole VM (even if the register view is per-vcpu)
diff --git a/Makefile b/Makefile
index 41cc032..6fac39b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 92
+SUBLEVEL = 106
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index f939794..5647469 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -29,18 +29,10 @@
 	:	"r" (uaddr), "r"(oparg)				\
 	:	"memory")
 
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -66,17 +58,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index 0ca9724..7081e52 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -11,6 +11,10 @@
  * Atomic exchange.
  * Since it can be used to implement critical sections
  * it must clobber "memory" (also for interrupts in UP).
+ *
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
  */
 
 static inline unsigned long
@@ -18,6 +22,7 @@
 {
 	unsigned long ret, tmp, addr64;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"	andnot	%4,7,%3\n"
 	"	insbl	%1,%4,%1\n"
@@ -42,6 +47,7 @@
 {
 	unsigned long ret, tmp, addr64;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"	andnot	%4,7,%3\n"
 	"	inswl	%1,%4,%1\n"
@@ -66,6 +72,7 @@
 {
 	unsigned long dummy;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldl_l %0,%4\n"
 	"	bis $31,%3,%1\n"
@@ -86,6 +93,7 @@
 {
 	unsigned long dummy;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldq_l %0,%4\n"
 	"	bis $31,%3,%1\n"
@@ -127,10 +135,12 @@
  * store NEW in MEM.  Return the initial value in MEM.  Success is
  * indicated by comparing RETURN with OLD.
  *
- * The memory barrier should be placed in SMP only when we actually
- * make the change. If we don't change anything (so if the returned
- * prev is equal to old) then we aren't acquiring anything new and
- * we don't need any memory barrier as far I can tell.
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
+ * The trailing memory barrier is placed in SMP unconditionally, in
+ * order to guarantee that dependency ordering is preserved when a
+ * dependency is headed by an unsuccessful operation.
  */
 
 static inline unsigned long
@@ -138,6 +148,7 @@
 {
 	unsigned long prev, tmp, cmp, addr64;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"	andnot	%5,7,%4\n"
 	"	insbl	%1,%5,%1\n"
@@ -149,8 +160,8 @@
 	"	or	%1,%2,%2\n"
 	"	stq_c	%2,0(%4)\n"
 	"	beq	%2,3f\n"
-		__ASM__MB
 	"2:\n"
+		__ASM__MB
 	".subsection 2\n"
 	"3:	br	1b\n"
 	".previous"
@@ -165,6 +176,7 @@
 {
 	unsigned long prev, tmp, cmp, addr64;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"	andnot	%5,7,%4\n"
 	"	inswl	%1,%5,%1\n"
@@ -176,8 +188,8 @@
 	"	or	%1,%2,%2\n"
 	"	stq_c	%2,0(%4)\n"
 	"	beq	%2,3f\n"
-		__ASM__MB
 	"2:\n"
+		__ASM__MB
 	".subsection 2\n"
 	"3:	br	1b\n"
 	".previous"
@@ -192,6 +204,7 @@
 {
 	unsigned long prev, cmp;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldl_l %0,%5\n"
 	"	cmpeq %0,%3,%1\n"
@@ -199,8 +212,8 @@
 	"	mov %4,%1\n"
 	"	stl_c %1,%2\n"
 	"	beq %1,3f\n"
-		__ASM__MB
 	"2:\n"
+		__ASM__MB
 	".subsection 2\n"
 	"3:	br 1b\n"
 	".previous"
@@ -215,6 +228,7 @@
 {
 	unsigned long prev, cmp;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldq_l %0,%5\n"
 	"	cmpeq %0,%3,%1\n"
@@ -222,8 +236,8 @@
 	"	mov %4,%1\n"
 	"	stq_c %1,%2\n"
 	"	beq %1,3f\n"
-		__ASM__MB
 	"2:\n"
+		__ASM__MB
 	".subsection 2\n"
 	"3:	br 1b\n"
 	".previous"
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 249e101..b7b78cb 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -495,7 +495,6 @@
 
 config ARC_EMUL_UNALIGNED
 	bool "Emulate unaligned memory access (userspace only)"
-	default N
 	select SYSCTL_ARCH_UNALIGN_NO_WARN
 	select SYSCTL_ARCH_UNALIGN_ALLOW
 	depends on ISA_ARCOMPACT
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 11e1b1f..eb887dd 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -73,20 +73,11 @@
 
 #endif
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
-		return -EFAULT;
-
 #ifndef CONFIG_ARC_HAS_LLSC
 	preempt_disable();	/* to guarantee atomic r-m-w of futex op */
 #endif
@@ -118,30 +109,9 @@
 	preempt_enable();
 #endif
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ:
-			ret = (oldval == cmparg);
-			break;
-		case FUTEX_OP_CMP_NE:
-			ret = (oldval != cmparg);
-			break;
-		case FUTEX_OP_CMP_LT:
-			ret = (oldval < cmparg);
-			break;
-		case FUTEX_OP_CMP_GE:
-			ret = (oldval >= cmparg);
-			break;
-		case FUTEX_OP_CMP_LE:
-			ret = (oldval <= cmparg);
-			break;
-		case FUTEX_OP_CMP_GT:
-			ret = (oldval > cmparg);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 04b1c06..54e93a0 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -574,6 +574,7 @@
        select PINCTRL
        select ARCH_WANT_KMAP_ATOMIC_FLUSH
        select SND_SOC_COMPRESS
+       select SND_HWDEP
        help
          Support for Qualcomm MSM/QSD based systems.  This runs on the
          apps processor of the MSM/QSD and depends on a shared memory
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index a6d5794..f66f377 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -13,6 +13,10 @@
 # Ensure linker flags are correct
 LDFLAGS		:=
 
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+export DTC_FLAGS := -@
+endif
+
 LDFLAGS_vmlinux	:=-p --no-undefined -X --pic-veneer
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux	+= --be8
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 2fa123e..52e8c50 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -34,10 +34,10 @@
 DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
 ifneq ($(DTB_NAMES),)
 DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
-else
-DTB_LIST := $(dtb-y)
-endif
 DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+else
+DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
+endif
 
 ifeq ($(CONFIG_XIP_KERNEL),y)
 
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 42b62f5..30e2f87 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -139,7 +139,7 @@
 &audio_codec {
 	status = "okay";
 
-	reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
+	gpio-reset = <&gpio1 16 GPIO_ACTIVE_LOW>;
 	AVDD-supply = <&ldo3_reg>;
 	IOVDD-supply = <&ldo3_reg>;
 	DRVDD-supply = <&ldo3_reg>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index 6df7829..78bee26 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -204,6 +204,7 @@
 		interrupt-controller;
 
 		ti,system-power-controller;
+		ti,palmas-override-powerhold;
 
 		tps659038_pmic {
 			compatible = "ti,tps659038-pmic";
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index db858ff..1cc6272 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -57,6 +57,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		ti,system-power-controller;
+		ti,palmas-override-powerhold;
 
 		tps659038_pmic {
 			compatible = "ti,tps659038-pmic";
diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
index a7da0dd..0898213 100644
--- a/arch/arm/boot/dts/at91sam9g25.dtsi
+++ b/arch/arm/boot/dts/at91sam9g25.dtsi
@@ -21,7 +21,7 @@
 				atmel,mux-mask = <
 				      /*    A         B          C     */
 				       0xffffffff 0xffe0399f 0xc000001c  /* pioA */
-				       0x0007ffff 0x8000fe3f 0x00000000  /* pioB */
+				       0x0007ffff 0x00047e3f 0x00000000  /* pioB */
 				       0x80000000 0x07c0ffff 0xb83fffff  /* pioC */
 				       0x003fffff 0x003f8000 0x00000000  /* pioD */
 				      >;
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 74dd21b..c51b88e 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -146,8 +146,8 @@
 
 		i2s: i2s@7e203000 {
 			compatible = "brcm,bcm2835-i2s";
-			reg = <0x7e203000 0x20>,
-			      <0x7e101098 0x02>;
+			reg = <0x7e203000 0x24>;
+			clocks = <&clocks BCM2835_CLOCK_PCM>;
 
 			dmas = <&dma 2>,
 			       <&dma 3>;
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index a1658d0..cf0de77 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -49,7 +49,7 @@
 
 	memory {
 		device_type = "memory";
-		reg = <0x60000000 0x80000000>;
+		reg = <0x60000000 0x20000000>;
 	};
 
 	gpio-restart {
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 132f2be..56311fd 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -398,6 +398,8 @@
 	tps659038: tps659038@58 {
 		compatible = "ti,tps659038";
 		reg = <0x58>;
+		ti,palmas-override-powerhold;
+		ti,system-power-controller;
 
 		tps659038_pmic {
 			compatible = "ti,tps659038-pmic";
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index f7357d9..64de33d 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -640,7 +640,7 @@
 			power-domains = <&pd_gsc>;
 			clocks = <&clock CLK_GSCL0>;
 			clock-names = "gscl";
-			iommu = <&sysmmu_gsc0>;
+			iommus = <&sysmmu_gsc0>;
 		};
 
 		gsc_1:  gsc@13e10000 {
@@ -650,7 +650,7 @@
 			power-domains = <&pd_gsc>;
 			clocks = <&clock CLK_GSCL1>;
 			clock-names = "gscl";
-			iommu = <&sysmmu_gsc1>;
+			iommus = <&sysmmu_gsc1>;
 		};
 
 		gsc_2:  gsc@13e20000 {
@@ -660,7 +660,7 @@
 			power-domains = <&pd_gsc>;
 			clocks = <&clock CLK_GSCL2>;
 			clock-names = "gscl";
-			iommu = <&sysmmu_gsc2>;
+			iommus = <&sysmmu_gsc2>;
 		};
 
 		gsc_3:  gsc@13e30000 {
@@ -670,7 +670,7 @@
 			power-domains = <&pd_gsc>;
 			clocks = <&clock CLK_GSCL3>;
 			clock-names = "gscl";
-			iommu = <&sysmmu_gsc3>;
+			iommus = <&sysmmu_gsc3>;
 		};
 
 		hdmi: hdmi@14530000 {
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index 96d7eed..036c9bd 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -23,7 +23,7 @@
 	imx53-qsrb {
 		pinctrl_pmic: pmicgrp {
 			fsl,pins = <
-				MX53_PAD_CSI0_DAT5__GPIO5_23	0x1e4 /* IRQ */
+				MX53_PAD_CSI0_DAT5__GPIO5_23	0x1c4 /* IRQ */
 			>;
 		};
 	};
diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
index 58b09bf..2051306 100644
--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
@@ -213,37 +213,37 @@
 &iomuxc {
 	pinctrl_enet1: enet1grp {
 		fsl,pins = <
-			MX7D_PAD_SD2_CD_B__ENET1_MDIO			0x3
-			MX7D_PAD_SD2_WP__ENET1_MDC			0x3
-			MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC	0x1
-			MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0	0x1
-			MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1	0x1
-			MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2	0x1
-			MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3	0x1
-			MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL	0x1
-			MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC	0x1
-			MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0	0x1
-			MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1	0x1
-			MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2	0x1
-			MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3	0x1
-			MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL	0x1
+			MX7D_PAD_SD2_CD_B__ENET1_MDIO			0x30
+			MX7D_PAD_SD2_WP__ENET1_MDC			0x30
+			MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC	0x11
+			MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0	0x11
+			MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1	0x11
+			MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2	0x11
+			MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3	0x11
+			MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL	0x11
+			MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC	0x11
+			MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0	0x11
+			MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1	0x11
+			MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2	0x11
+			MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3	0x11
+			MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL	0x11
 		>;
 	};
 
 	pinctrl_enet2: enet2grp {
 		fsl,pins = <
-			MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC		0x1
-			MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0		0x1
-			MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1		0x1
-			MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2		0x1
-			MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3		0x1
-			MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL		0x1
-			MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC		0x1
-			MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0		0x1
-			MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1		0x1
-			MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2		0x1
-			MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3		0x1
-			MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL		0x1
+			MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC		0x11
+			MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0		0x11
+			MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1		0x11
+			MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2		0x11
+			MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3		0x11
+			MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL		0x11
+			MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC		0x11
+			MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0		0x11
+			MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1		0x11
+			MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2		0x11
+			MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3		0x11
+			MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL		0x11
 		>;
 	};
 
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 368e219..825f6ea 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -146,7 +146,7 @@
 		};
 
 		esdhc: esdhc@1560000 {
-			compatible = "fsl,esdhc";
+			compatible = "fsl,ls1021a-esdhc", "fsl,esdhc";
 			reg = <0x0 0x1560000 0x0 0x10000>;
 			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
 			clock-frequency = <0>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 6003b29..4d448f1 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -510,7 +510,7 @@
 	tlv320aic3x: tlv320aic3x@18 {
 		compatible = "ti,tlv320aic3x";
 		reg = <0x18>;
-		reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>; /* 60 */
+		gpio-reset = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* 60 */
 		ai3x-gpio-func = <
 			0 /* AIC3X_GPIO1_FUNC_DISABLED */
 			5 /* AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT */
@@ -527,7 +527,7 @@
 	tlv320aic3x_aux: tlv320aic3x@19 {
 		compatible = "ti,tlv320aic3x";
 		reg = <0x19>;
-		reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>; /* 60 */
+		gpio-reset = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* 60 */
 
 		AVDD-supply = <&vmmc2>;
 		DRVDD-supply = <&vmmc2>;
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index b7a24af..4b7d972 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -154,10 +154,10 @@
 
 		i2c_0: i2c@78b7000 {
 			compatible = "qcom,i2c-qup-v2.2.1";
-			reg = <0x78b7000 0x6000>;
+			reg = <0x78b7000 0x600>;
 			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&gcc GCC_BLSP1_AHB_CLK>,
-				 <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
+				 <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>;
 			clock-names = "iface", "core";
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index 4642d0d..824eefa 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -3,6 +3,7 @@
 	sdxpoorwills-cdp.dtb \
 	sdxpoorwills-mtp.dtb \
 	sdxpoorwills-atp.dtb \
+	sdxpoorwills-ttp.dtb \
 	sdxpoorwills-cdp-256.dtb \
 	sdxpoorwills-mtp-256.dtb \
 	sdxpoorwills-dualwifi-cdp.dtb \
@@ -21,14 +22,18 @@
 	mdm9607-ttp.dtb
 
 targets += dtbs
-targets += $(addprefix ../, $(dtb-y))
-
-$(obj)/../%.dtb: $(src)/%.dts FORCE
-	$(call if_changed_dep,dtc)
 
 include $(srctree)/arch/arm64/boot/dts/qcom/Makefile
-$(obj)/../%.dtb: $(src)/../../../../arm64/boot/dts/qcom/%.dts FORCE
+$(obj)/%.dtb: $(src)/../../../../arm64/boot/dts/qcom/%.dts FORCE
 	$(call if_changed_dep,dtc)
 
-dtbs: $(addprefix $(obj)/../,$(dtb-y))
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+$(obj)/%.dtbo:$(src)/../../../../arm64/boot/dts/qcom/%.dts FORCE
+	$(call if_changed_dep,dtc)
+	$(call if_changed,dtbo_verify)
+
+dtbs: $(addprefix $(obj)/,$(dtb-y)) $(addprefix $(obj)/,$(dtbo-y))
+else
+dtbs: $(addprefix $(obj)/,$(dtb-y))
+endif
 clean-files := *.dtb
diff --git a/arch/arm/boot/dts/qcom/msm-arm-smmu-sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/msm-arm-smmu-sdxpoorwills.dtsi
index 580df55..ce8ad83 100644
--- a/arch/arm/boot/dts/qcom/msm-arm-smmu-sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-arm-smmu-sdxpoorwills.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -100,5 +100,6 @@
 		 * the apps_smmu device.
 		 */
 		iommus = <&apps_smmu 0x1a0 0x0>;
+		status = "disabled";
 	};
 };
diff --git a/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi b/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
index 6a3210c..d891a4b 100644
--- a/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
+++ b/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
@@ -71,6 +71,7 @@
 
 	audio_apr: qcom,msm-audio-apr {
 		compatible = "qcom,msm-audio-apr";
+		qcom,subsys-name = "apr_modem";
 	};
 
 	host_pcm: qcom,msm-voice-host-pcm {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts
index 6909ef5..6c6e640 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts
@@ -28,3 +28,7 @@
 &cnss_sdio {
 	status = "okay";
 };
+
+&blsp1_uart2b_hs {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi
index d447724..16f933f 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi
@@ -36,3 +36,11 @@
 	/delete-property/ qcom,devfreq,freq-table;
 	/delete-property/ cd-gpios;
 };
+
+&soc {
+	bluetooth: bt_qca6174 {
+		compatible = "qca,qca6174";
+		qca,bt-reset-gpio = <&pmxpoorwills_gpios 4 0>; /* BT_EN */
+		qca,bt-vdd-pa-supply = <&vreg_wlan>;
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dts
index 5fd7042..243fdaf 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dts
@@ -28,3 +28,7 @@
 &cnss_sdio {
 	status = "okay";
 };
+
+&blsp1_uart2b_hs {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dtsi
index b60225a..252fb04 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dtsi
@@ -36,3 +36,11 @@
 	/delete-property/ qcom,devfreq,freq-table;
 	/delete-property/ cd-gpios;
 };
+
+&soc {
+	bluetooth: bt_qca6174 {
+		compatible = "qca,qca6174";
+		qca,bt-reset-gpio = <&pmxpoorwills_gpios 4 0>; /* BT_EN */
+		qca,bt-vdd-pa-supply = <&vreg_wlan>;
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
index a09b149..6957063 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -26,5 +26,17 @@
 			memory-region = <&audio_mem>;
 			qcom,ion-heap-type = "DMA";
 		};
+
+		qcom,ion-heap@27 { /* QSEECOM HEAP */
+			reg = <27>;
+			memory-region = <&qseecom_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@19 { /* QSEECOM TA HEAP */
+			reg = <19>;
+			memory-region = <&qseecom_ta_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
index dafd0b8..1428f37 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
@@ -18,7 +18,8 @@
 
 &usb {
 	status = "okay";
-	extcon = <&vbus_detect>;
+	qcom,connector-type-uAB;
+	extcon = <0>, <0>, <0>, <&vbus_detect>;
 };
 
 &pcie_ep {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
index 1428f37..81877b7 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
@@ -16,12 +16,6 @@
 	status = "okay";
 };
 
-&usb {
-	status = "okay";
-	qcom,connector-type-uAB;
-	extcon = <0>, <0>, <0>, <&vbus_detect>;
-};
-
 &pcie_ep {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
index 6c5f3c3..d7c0d13 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
@@ -18,7 +18,8 @@
 
 &usb {
 	status = "okay";
-	extcon = <&vbus_detect>;
+	qcom,connector-type-uAB;
+	extcon = <0>, <0>, <0>, <&vbus_detect>;
 };
 
 &pcie_ep {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
index d7c0d13..6ceac6e 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
@@ -16,12 +16,6 @@
 	status = "okay";
 };
 
-&usb {
-	status = "okay";
-	qcom,connector-type-uAB;
-	extcon = <0>, <0>, <0>, <&vbus_detect>;
-};
-
 &pcie_ep {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi
index 77fc533..505f242 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi
@@ -98,4 +98,9 @@
 		reg = <0xC300000 0x1000>, <0xC370004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
+
+	qcom,rpmh-master-stats@b211200 {
+		compatible = "qcom,rpmh-master-stats-v1";
+		reg = <0xb211200 0x60>;
+	};
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dts
new file mode 100644
index 0000000..775be96
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dts
@@ -0,0 +1,22 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdxpoorwills-ttp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDXPOORWILLS TTP";
+	compatible = "qcom,sdxpoorwills-ttp",
+		"qcom,sdxpoorwills", "qcom,ttp";
+	qcom,board-id = <30 0x100>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dtsi
new file mode 100644
index 0000000..fa8f3a4
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dtsi
@@ -0,0 +1,23 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdxpoorwills-mtp.dtsi"
+
+&vbus_detect {
+	status = "okay";
+};
+
+&usb {
+	status = "okay";
+	qcom,connector-type-uAB;
+	extcon = <0>, <0>, <0>, <&vbus_detect>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
index 3bccd8a..9f74227 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
@@ -76,6 +76,7 @@
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
+			snps,xhci-imod-value = <4000>;
 		};
 
 		qcom,usbbam@a704000 {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 68a13ee..9584c15 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -18,6 +18,9 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 #include <dt-bindings/clock/qcom,aop-qmp.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+#define MHZ_TO_MBPS(mhz, w) ((mhz * 1000000 * w) / (1024 * 1024))
 
 / {
 	model = "Qualcomm Technologies, Inc. SDX POORWILLS";
@@ -75,7 +78,21 @@
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			reusable;
-			size = <0 0x2400000>;
+			size = <0x400000>;
+		};
+
+		qseecom_mem: qseecom_region@0 {
+			compatible = "shared-dma-pool";
+			reusable;
+			alignment = <0x400000>;
+			size = <0x1400000>;
+		};
+
+		qseecom_ta_mem: qseecom_ta_region@0 {
+			compatible = "shared-dma-pool";
+			reusable;
+			alignment = <0x400000>;
+			size = <0x1000000>;
 		};
 	};
 
@@ -218,6 +235,31 @@
 				< 1497600 >;
 	};
 
+	cpubw: qcom,cpubw {
+		compatible = "qcom,devbw";
+		governor = "cpufreq";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			< MHZ_TO_MBPS(200, 2) >, /*   381 MB/s */
+			< MHZ_TO_MBPS(470, 2) >, /*   896 MB/s */
+			< MHZ_TO_MBPS(547, 2) >, /*  1043 MB/s */
+			< MHZ_TO_MBPS(691, 2) >, /*  1317 MB/s */
+			< MHZ_TO_MBPS(806, 2) >, /*  1537 MB/s */
+			< MHZ_TO_MBPS(940, 2) >, /*  1792 MB/s */
+			< MHZ_TO_MBPS(1383, 2) >; /* 2637 MB/s */
+	};
+
+	devfreq_compute: qcom,devfreq-compute {
+		compatible = "qcom,arm-cpu-mon";
+		qcom,cpulist = <&CPU0>;
+		qcom,target-dev = <&cpubw>;
+		qcom,core-dev-table =
+				<  153600 MHZ_TO_MBPS(200, 2) >,
+				<  576000 MHZ_TO_MBPS(691, 2) >,
+				< 1497600 MHZ_TO_MBPS(1383, 2)>;
+	};
+
 	clock_gcc: qcom,gcc@100000 {
 		compatible = "qcom,gcc-sdxpoorwills", "syscon";
 		reg = <0x100000 0x1f0000>;
@@ -893,38 +935,49 @@
 		qcom,mhi-event-ring-id-limits = <9 10>; /* start and end */
 		qcom,modem-cfg-emb-pipe-flt;
 		qcom,use-ipa-pm;
+		qcom,wlan-ce-db-over-pcie;
 		qcom,arm-smmu;
 		qcom,smmu-fast-map;
 		qcom,bandwidth-vote-for-ipa;
 		qcom,msm-bus,name = "ipa";
 		qcom,msm-bus,num-cases = <5>;
-		qcom,msm-bus,num-paths = <4>;
+		qcom,msm-bus,num-paths = <5>;
 		qcom,msm-bus,vectors-KBps =
 		/* No vote */
-			<90 512 0 0>,
-			<90 585 0 0>,
-			<1 676 0 0>,
-			<143 777 0 0>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_SNOC_MEM_NOC_GC 0 0>,
+		<MSM_BUS_MASTER_SNOC_GC_MEM_NOC MSM_BUS_SLAVE_EBI_CH0 0 0>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 0 0>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 0 0>,
+		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 0>,
+
 		/* SVS2 */
-			<90 512 3616000 7232000>,
-			<90 585 300000 600000>,
-			<1 676 90000 180000>, /*gcc_config_noc_clk_src */
-			<143 777 0 120>, /* IB defined for IPA2X_clk in MHz*/
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_SNOC_MEM_NOC_GC 240000 480000>,
+		<MSM_BUS_MASTER_SNOC_GC_MEM_NOC MSM_BUS_SLAVE_EBI_CH0 900000 1800000>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 300000 600000>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 90000 179000>,
+		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 120>,
+
 		/* SVS */
-			<90 512 6640000 13280000>,
-			<90 585 400000 800000>,
-			<1 676 100000 200000>,
-			<143 777 0 250>, /* IB defined for IPA2X_clk in MHz*/
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_SNOC_MEM_NOC_GC 360000 720000>,
+		<MSM_BUS_MASTER_SNOC_GC_MEM_NOC MSM_BUS_SLAVE_EBI_CH0 1530000 3060000>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 400000 800000>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 100000 199000>,
+		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 250>,
+
 		/* NOMINAL */
-			<90 512 10400000 20800000>,
-			<90 585 800000 1600000>,
-			<1 676 200000 400000>,
-			<143 777 0 440>, /* IB defined for IPA2X_clk in MHz*/
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_SNOC_MEM_NOC_GC 780000 1560000>,
+		<MSM_BUS_MASTER_SNOC_GC_MEM_NOC MSM_BUS_SLAVE_EBI_CH0 2592000 5184000>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 800000 1600000>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 200000 399000>,
+		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 440>,
+
 		/* TURBO */
-			<90 512 10400000 20800000>,
-			<90 585 960000 1920000>,
-			<1 676 266000 532000>,
-			<143 777 0 500>; /* IB defined for IPA clk in MHz*/
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_SNOC_MEM_NOC_GC 960000 1920000>,
+		<MSM_BUS_MASTER_SNOC_GC_MEM_NOC MSM_BUS_SLAVE_EBI_CH0 2592000 5184000>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 960000 1920000>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 266000 531000>,
+		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 500>;
+
 		qcom,bus-vector-names = "MIN", "SVS2", "SVS", "NOMINAL",
 		"TURBO";
 		qcom,throughput-threshold = <310 600 1000>;
@@ -1113,6 +1166,33 @@
 		clock-names = "iface_clk";
 	};
 
+	qcom_seecom: qseecom@90000000{
+		compatible = "qcom,qseecom";
+		reg = <0x90000000 0x600000>;
+		reg-names = "secapp-region";
+		qcom,hlos-num-ce-hw-instances = <1>;
+		qcom,hlos-ce-hw-instance = <0>;
+		qcom,qsee-ce-hw-instance = <0>;
+		qcom,no-clock-support;
+		qcom,msm-bus,name = "qseecom-noc";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		clocks = <&clock_gcc GCC_CE1_CLK>,
+			<&clock_gcc GCC_CE1_CLK>,
+			<&clock_gcc GCC_CE1_AHB_CLK>,
+			<&clock_gcc GCC_CE1_AXI_CLK>;
+		qcom,msm-bus,vectors-KBps =
+			<125 512 0 0>,
+			<125 512 20000 40000>,
+			<125 512 30000 80000>,
+			<125 512 40000 100000>;
+		clock-names = "core_clk_src", "core_clk",
+			"iface_clk", "bus_clk";
+		qcom,ce-opp-freq = <171430000>;
+		qcom,qsee-reentrancy-support = <2>;
+		status = "disabled";
+	};
+
 	qcom_cedev: qcedev@1de0000 {
 		compatible = "qcom,qcedev";
 		reg = <0x1de0000 0x20000>,
@@ -1258,13 +1338,14 @@
 			"rx-ch0-intr", "rx-ch1-intr",
 			"rx-ch2-intr", "rx-ch3-intr";
 		qcom,msm-bus,name = "emac";
-		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-cases = <4>;
 		qcom,msm-bus,num-paths = <2>;
 		qcom,msm-bus,vectors-KBps =
+			<98 512 0 0>, <1 781 0 0>,  /* No vote */
 			<98 512 1250 0>, <1 781 0 40000>,  /* 10Mbps vote */
 			<98 512 12500 0>, <1 781 0 40000>,  /* 100Mbps vote */
 			<98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */
-		qcom,bus-vector-names = "10", "100", "1000";
+		qcom,bus-vector-names = "0", "10", "100", "1000";
 		clocks = <&clock_gcc GCC_ETH_AXI_CLK>,
 			<&clock_gcc GCC_ETH_PTP_CLK>,
 			<&clock_gcc GCC_ETH_RGMII_CLK>,
diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
index 7885075..1788e18 100644
--- a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
+++ b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
@@ -266,7 +266,9 @@
 	lcd0_pins: lcd0 {
 		groups = "lcd0_data24_0", "lcd0_lclk_1", "lcd0_sync";
 		function = "lcd0";
+	};
 
+	lcd0_mux {
 		/* DBGMD/LCDC0/FSIA MUX */
 		gpio-hog;
 		gpios = <176 0>;
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index 6761d11..db0239c 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -428,7 +428,7 @@
 		      "dclkin.0", "dclkin.1";
 
 	ports {
-		port@1 {
+		port@0 {
 			endpoint {
 				remote-endpoint = <&adv7511_in>;
 			};
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 9e6bf0e..2679dc8 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -617,9 +617,9 @@
 						<0 12 RK_FUNC_1 &pcfg_pull_none>,
 						<0 13 RK_FUNC_1 &pcfg_pull_none>,
 						<0 14 RK_FUNC_1 &pcfg_pull_none>,
-						<1 2 RK_FUNC_1 &pcfg_pull_none>,
-						<1 4 RK_FUNC_1 &pcfg_pull_none>,
-						<1 5 RK_FUNC_1 &pcfg_pull_none>;
+						<1 2 RK_FUNC_2 &pcfg_pull_none>,
+						<1 4 RK_FUNC_2 &pcfg_pull_none>,
+						<1 5 RK_FUNC_2 &pcfg_pull_none>;
 			};
 		};
 
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 65e725f..de0e189 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1362,7 +1362,7 @@
 			pinctrl@fc06a000 {
 				#address-cells = <1>;
 				#size-cells = <1>;
-				compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
+				compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
 				ranges = <0xfc068000 0xfc068000 0x100
 					  0xfc06a000 0xfc06a000 0x4000>;
 				/* WARNING: revisit as pin spec has changed */
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 9f48141..f0702d8 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -759,7 +759,7 @@
 		timer@fffec600 {
 			compatible = "arm,cortex-a9-twd-timer";
 			reg = <0xfffec600 0x100>;
-			interrupts = <1 13 0xf04>;
+			interrupts = <1 13 0xf01>;
 			clocks = <&mpu_periph_clk>;
 		};
 
diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig
index 9f5001f..7467eb7 100644
--- a/arch/arm/configs/msm8909-perf_defconfig
+++ b/arch/arm/configs/msm8909-perf_defconfig
@@ -9,8 +9,6 @@
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
-CONFIG_RCU_NOCB_CPU=y
-CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
@@ -26,18 +24,18 @@
 CONFIG_SCHED_TUNE=y
 CONFIG_DEFAULT_USE_ENERGY_AWARE=y
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_RD_BZIP2 is not set
-# CONFIG_RD_LZMA is not set
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 # CONFIG_MEMBARRIER is not set
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_OPROFILE=m
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
 CONFIG_ARCH_MMAP_RND_BITS=16
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,17 +51,17 @@
 CONFIG_SCHED_MC=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
 CONFIG_SECCOMP=y
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_KERNEL_MODE_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
@@ -84,7 +82,6 @@
 CONFIG_IP_PNP_DHCP=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
-CONFIG_INET_IPCOMP=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
@@ -114,14 +111,12 @@
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
 CONFIG_NETFILTER_XT_TARGET_TPROXY=y
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -145,6 +140,7 @@
 CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -152,6 +148,8 @@
 CONFIG_NETFILTER_XT_MATCH_TIME=y
 CONFIG_NETFILTER_XT_MATCH_U32=y
 CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_DUP_IPV4=y
+CONFIG_NF_LOG_IPV4=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=y
 CONFIG_IP_NF_MATCH_ECN=y
@@ -170,6 +168,8 @@
 CONFIG_IP_NF_ARPFILTER=y
 CONFIG_IP_NF_ARP_MANGLE=y
 CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_DUP_IPV6=y
+CONFIG_NF_LOG_IPV6=y
 CONFIG_IP6_NF_IPTABLES=y
 CONFIG_IP6_NF_MATCH_RPFILTER=y
 CONFIG_IP6_NF_FILTER=y
@@ -186,11 +186,8 @@
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
-CONFIG_NET_SCH_MULTIQ=y
-CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_FW=y
 CONFIG_NET_CLS_U32=y
-CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
@@ -211,7 +208,6 @@
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
-CONFIG_NFC_NQ=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
@@ -222,27 +218,17 @@
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
-CONFIG_UID_SYS_STATS=y
 CONFIG_MEMORY_STATE_TIME=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_UFSHCD=y
-CONFIG_SCSI_UFSHCD_PLATFORM=y
-CONFIG_SCSI_UFS_QCOM=y
-CONFIG_SCSI_UFS_QCOM_ICE=y
-CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
-CONFIG_DM_DEBUG=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
 CONFIG_TUN=y
@@ -263,20 +249,19 @@
 CONFIG_CNSS_SDIO=y
 CONFIG_CLD_HL_SDIO_CORE=y
 CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
-CONFIG_SERIAL_MSM=y
-CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_DIAG_USES_SMD=y
 CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_SMD_PKT=y
 CONFIG_MSM_ADSPRPC=y
-CONFIG_MSM_RDBG=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_MSM_V2=y
 CONFIG_SPI=y
@@ -297,11 +282,17 @@
 CONFIG_QPNP_LINEAR_CHARGER=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -321,11 +312,19 @@
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISP_V1=y
+CONFIG_MSM_ISPIF=y
+CONFIG_QCOM_KGSL=y
 CONFIG_FB=y
-CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_LOGO=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
@@ -345,11 +344,30 @@
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MONTEREY=y
 CONFIG_HID_MULTITOUCH=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_REALTEK=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_STORAGE_ENE_UB6250=y
+CONFIG_USB_UAS=y
 CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_GADGET=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
-CONFIG_USB_MSM_SSPHY_QMP=y
-CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -357,6 +375,7 @@
 CONFIG_USB_CI13XXX_MSM=y
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
@@ -368,12 +387,10 @@
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
-CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_QPNP_VIBRATOR=y
 CONFIG_LEDS_TRIGGERS=y
@@ -396,7 +413,7 @@
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
-CONFIG_QCOM_PM=y
+CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
@@ -410,73 +427,48 @@
 CONFIG_MSM_SMEM=y
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_DEBUG=y
-CONFIG_MSM_GLINK=y
-CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
-CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
-CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_TZ_SMMU=y
 CONFIG_TRACER_PKT=y
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
-CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
-CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_MSM_EVENT_TIMER=y
-CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_BAM_DMUX=y
 CONFIG_CNSS_CRYPTO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_QTI_MPM=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_STM=y
 CONFIG_SENSORS_SSC=y
 CONFIG_MSM_TZ_LOG=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_FUSE_FS=y
-CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
-CONFIG_ECRYPT_FS=y
-CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_FRAME_WARN=2048
-CONFIG_PAGE_OWNER=y
-CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_DEBUG_FS=y
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_TIMEOUT=5
 CONFIG_PANIC_ON_SCHED_BUG=y
 CONFIG_PANIC_ON_RT_THROTTLING=y
-CONFIG_SCHEDSTATS=y
-CONFIG_SCHED_STACK_END_CHECK=y
 # CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_FTRACE is not set
-CONFIG_LKDTM=y
-CONFIG_PANIC_ON_DATA_CORRUPTION=y
-# CONFIG_ARM_UNWIND is not set
-CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_STACKTRACE=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
-CONFIG_CORESIGHT=y
-CONFIG_CORESIGHT_REMOTE_ETM=y
-CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
-CONFIG_CORESIGHT_STM=y
-CONFIG_CORESIGHT_TPDA=y
-CONFIG_CORESIGHT_TPDM=y
-CONFIG_CORESIGHT_CTI=y
-CONFIG_CORESIGHT_EVENT=y
-CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
@@ -485,19 +477,14 @@
 CONFIG_SECURITY_SMACK=y
 CONFIG_SECURITY_APPARMOR=y
 CONFIG_DEFAULT_SECURITY_DAC=y
-CONFIG_CRYPTO_CTR=y
-CONFIG_CRYPTO_XTS=y
 CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_CRC32=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
-CONFIG_ARM_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM_NEON=y
-CONFIG_CRYPTO_SHA2_ARM_CE=y
-CONFIG_CRYPTO_AES_ARM_BS=y
-CONFIG_CRYPTO_AES_ARM_CE=y
-CONFIG_XZ_DEC=y
 CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig
index c8087ad..d4b5cca 100644
--- a/arch/arm/configs/msm8909_defconfig
+++ b/arch/arm/configs/msm8909_defconfig
@@ -27,11 +27,14 @@
 CONFIG_SCHED_TUNE=y
 CONFIG_DEFAULT_USE_ENERGY_AWARE=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
@@ -47,6 +50,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -138,6 +142,7 @@
 CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -257,16 +262,26 @@
 CONFIG_CNSS_SDIO=y
 CONFIG_CLD_HL_SDIO_CORE=y
 CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=y
 CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_STMVL53L0X=y
 CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_MSM_HS=y
 CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_DIAG_USES_SMD=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_SMD_PKT=y
@@ -299,6 +314,7 @@
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -316,6 +332,16 @@
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISP_V1=y
+CONFIG_MSM_ISPIF=y
+CONFIG_QCOM_KGSL=y
 CONFIG_FB=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -340,11 +366,33 @@
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MONTEREY=y
 CONFIG_HID_MULTITOUCH=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_USB_STORAGE_REALTEK=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_STORAGE_ENE_UB6250=y
+CONFIG_USB_UAS=y
 CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_GADGET=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
-CONFIG_USB_MSM_SSPHY_QMP=y
-CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -352,7 +400,12 @@
 CONFIG_USB_CI13XXX_MSM=y
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
@@ -367,8 +420,7 @@
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_QPNP_VIBRATOR=y
 CONFIG_LEDS_TRIGGERS=y
@@ -384,6 +436,8 @@
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ION=y
 CONFIG_ION_MSM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_REVID=y
@@ -391,7 +445,7 @@
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
-CONFIG_QCOM_PM=y
+CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
@@ -406,6 +460,7 @@
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_DEBUG=y
 CONFIG_MSM_GLINK=y
+CONFIG_MSM_TZ_SMMU=y
 CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
 CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
 CONFIG_MSM_GLINK_SPI_XPRT=y
@@ -417,10 +472,16 @@
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_BAM_DMUX=y
 CONFIG_CNSS_CRYPTO=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_IIO=y
+CONFIG_INV_ICM20602_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_QTI_MPM=y
@@ -434,13 +495,16 @@
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
 CONFIG_FUSE_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
@@ -465,6 +529,7 @@
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 # CONFIG_DETECT_HUNG_TASK is not set
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_TIMEOUT=5
@@ -495,8 +560,13 @@
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
@@ -514,10 +584,13 @@
 CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XTS=y
 CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_CRC32=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_ARM_CRYPTO=y
diff --git a/arch/arm/configs/msm8909w-perf_defconfig b/arch/arm/configs/msm8909w-perf_defconfig
index fb711f6..b2e4be0 100644
--- a/arch/arm/configs/msm8909w-perf_defconfig
+++ b/arch/arm/configs/msm8909w-perf_defconfig
@@ -1,9 +1,12 @@
+# CONFIG_FHANDLE is not set
 CONFIG_AUDIT=y
-# CONFIG_AUDITSYSCALL is not set
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IRQ_TIME_ACCOUNTING=y
 CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_NOCB_CPU=y
@@ -24,12 +27,17 @@
 CONFIG_DEFAULT_USE_ENERGY_AWARE=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
 CONFIG_CC_STACKPROTECTOR_REGULAR=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -178,11 +186,8 @@
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
-CONFIG_NET_SCH_MULTIQ=y
-CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_FW=y
 CONFIG_NET_CLS_U32=y
-CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
@@ -214,25 +219,22 @@
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
 CONFIG_MEMORY_STATE_TIME=y
 CONFIG_QPNP_MISC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_SCSI_UFSHCD=y
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
-CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
 CONFIG_TUN=y
@@ -260,6 +262,8 @@
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_MSM_HS=y
 CONFIG_SERIAL_MSM_SMD=y
 CONFIG_DIAG_CHAR=y
@@ -288,11 +292,17 @@
 CONFIG_QPNP_SMB2=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -311,36 +321,23 @@
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_VIDC_3X_V4L2=y
+CONFIG_MSM_VIDC_3X_GOVERNORS=y
 CONFIG_QCOM_KGSL=y
 CONFIG_FB=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_FB_MSM=y
 CONFIG_FB_MSM_MDSS=y
 CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_SPI_PANEL=y
 CONFIG_FB_MSM_MDSS_MDP3=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_LOGO=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -348,16 +345,16 @@
 CONFIG_USB_CI13XXX_MSM=y
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
-CONFIG_USB_CONFIGFS_ACM=y
-CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_ECM=y
 CONFIG_USB_CONFIGFS_RMNET_BAM=y
 CONFIG_USB_CONFIGFS_EEM=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_F_ACC=y
 CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_HID=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
@@ -399,7 +396,6 @@
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
-CONFIG_QCOM_PM=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
@@ -426,10 +422,12 @@
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_SYSMON_COMM=y
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_MSM_EVENT_TIMER=y
+CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_MSM_BAM_DMUX=y
 CONFIG_MSM_GLINK_BGCOM_XPRT=y
@@ -462,28 +460,21 @@
 CONFIG_TMPFS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_FRAME_WARN=2048
-CONFIG_PAGE_OWNER=y
-CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_PAGE_EXTENSION=y
+CONFIG_PANIC_ON_RECURSIVE_FAULT=y
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_TIMEOUT=5
 CONFIG_PANIC_ON_SCHED_BUG=y
 CONFIG_PANIC_ON_RT_THROTTLING=y
-CONFIG_SCHEDSTATS=y
-CONFIG_SCHED_STACK_END_CHECK=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
-CONFIG_LKDTM=y
-CONFIG_PANIC_ON_DATA_CORRUPTION=y
-# CONFIG_ARM_UNWIND is not set
-CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_DEBUG_USER=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
diff --git a/arch/arm/configs/msm8909w_defconfig b/arch/arm/configs/msm8909w_defconfig
index de6fb6b..8108661 100644
--- a/arch/arm/configs/msm8909w_defconfig
+++ b/arch/arm/configs/msm8909w_defconfig
@@ -1,3 +1,4 @@
+# CONFIG_FHANDLE is not set
 CONFIG_AUDIT=y
 # CONFIG_AUDITSYSCALL is not set
 CONFIG_NO_HZ=y
@@ -5,7 +6,6 @@
 CONFIG_IRQ_TIME_ACCOUNTING=y
 CONFIG_SCHED_WALT=y
 CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
@@ -17,7 +17,6 @@
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
-CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -28,12 +27,17 @@
 CONFIG_SCHED_TUNE=y
 CONFIG_DEFAULT_USE_ENERGY_AWARE=y
 CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
 CONFIG_CC_STACKPROTECTOR_REGULAR=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -49,11 +53,11 @@
 CONFIG_SCHED_MC=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
 CONFIG_SECCOMP=y
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -64,12 +68,10 @@
 CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
 CONFIG_PM_WAKELOCKS_LIMIT=0
-CONFIG_PM_DEBUG=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
-CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -110,14 +112,13 @@
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
 CONFIG_NETFILTER_XT_TARGET_TPROXY=y
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -127,7 +128,6 @@
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
 CONFIG_NETFILTER_XT_MATCH_DSCP=y
-CONFIG_NETFILTER_XT_MATCH_ESP=y
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_HELPER=y
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
@@ -149,6 +149,7 @@
 CONFIG_NETFILTER_XT_MATCH_TIME=y
 CONFIG_NETFILTER_XT_MATCH_U32=y
 CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_DUP_IPV4=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=y
 CONFIG_IP_NF_MATCH_ECN=y
@@ -167,14 +168,13 @@
 CONFIG_IP_NF_ARPFILTER=y
 CONFIG_IP_NF_ARP_MANGLE=y
 CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_DUP_IPV6=y
 CONFIG_IP6_NF_IPTABLES=y
 CONFIG_IP6_NF_MATCH_RPFILTER=y
 CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
-CONFIG_BRIDGE_NF_EBTABLES=y
-CONFIG_BRIDGE_EBT_BROUTE=y
 CONFIG_L2TP=y
 CONFIG_L2TP_DEBUGFS=y
 CONFIG_L2TP_V3=y
@@ -184,8 +184,6 @@
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
-CONFIG_NET_SCH_MULTIQ=y
-CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_FW=y
 CONFIG_NET_CLS_U32=y
 CONFIG_CLS_U32_MARK=y
@@ -198,8 +196,6 @@
 CONFIG_NET_EMATCH_TEXT=y
 CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_GACT=y
-CONFIG_NET_ACT_MIRRED=y
-CONFIG_NET_ACT_SKBEDIT=y
 CONFIG_DNS_RESOLVER=y
 CONFIG_RMNET_DATA=y
 CONFIG_RMNET_DATA_FC=y
@@ -218,26 +214,11 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
 CONFIG_QPNP_MISC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_CHR_DEV_SCH=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_UFSHCD=y
-CONFIG_SCSI_UFSHCD_PLATFORM=y
-CONFIG_SCSI_UFS_QCOM=y
-CONFIG_SCSI_UFS_QCOM_ICE=y
-CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
-CONFIG_DM_DEBUG=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
@@ -261,7 +242,6 @@
 CONFIG_CLD_LL_CORE=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
-CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
@@ -270,6 +250,8 @@
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_MSM_HS=y
@@ -300,11 +282,17 @@
 CONFIG_QPNP_SMB2=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -323,40 +311,27 @@
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_VIDC_3X_V4L2=y
+CONFIG_MSM_VIDC_3X_GOVERNORS=y
 CONFIG_QCOM_KGSL=y
 CONFIG_FB=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_FB_MSM=y
 CONFIG_FB_MSM_MDSS=y
 CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_SPI_PANEL=y
 CONFIG_FB_MSM_MDSS_MDP3=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_LOGO=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_DWC3=y
-CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
-CONFIG_USB_MSM_SSPHY_QMP=y
-CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -364,23 +339,18 @@
 CONFIG_USB_CI13XXX_MSM=y
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
-CONFIG_USB_CONFIGFS_ACM=y
 CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_ECM=y
 CONFIG_USB_CONFIGFS_RMNET_BAM=y
-CONFIG_USB_CONFIGFS_EEM=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_F_ACC=y
 CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_HID=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_CCID=y
-CONFIG_USB_CONFIGFS_F_GSI=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_RING_BUFFER=y
@@ -419,7 +389,6 @@
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
-CONFIG_QCOM_PM=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
@@ -446,6 +415,7 @@
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_SYSMON_COMM=y
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
@@ -472,37 +442,29 @@
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_SENSORS_SSC=y
 CONFIG_MSM_TZ_LOG=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
-CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=6
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_FRAME_WARN=2048
 CONFIG_PAGE_OWNER=y
 CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
-CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_SLUB_DEBUG_PANIC_ON=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
-CONFIG_DEBUG_OBJECTS=y
-CONFIG_DEBUG_OBJECTS_FREE=y
-CONFIG_DEBUG_OBJECTS_TIMERS=y
-CONFIG_DEBUG_OBJECTS_WORK=y
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
-CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
 CONFIG_SLUB_DEBUG_ON=y
 CONFIG_DEBUG_KMEMLEAK=y
 CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
@@ -512,7 +474,8 @@
 CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_DETECT_HUNG_TASK is not set
 CONFIG_WQ_WATCHDOG=y
-CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
 CONFIG_PANIC_ON_SCHED_BUG=y
 CONFIG_PANIC_ON_RT_THROTTLING=y
 CONFIG_SCHEDSTATS=y
@@ -540,8 +503,13 @@
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
diff --git a/arch/arm/configs/msm8937-perf_defconfig b/arch/arm/configs/msm8937-perf_defconfig
index 09b6f05..34c9daa 100644
--- a/arch/arm/configs/msm8937-perf_defconfig
+++ b/arch/arm/configs/msm8937-perf_defconfig
@@ -20,6 +20,8 @@
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
@@ -65,6 +67,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -75,7 +78,6 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -146,6 +148,7 @@
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -223,6 +226,8 @@
 CONFIG_RMNET_DATA_FC=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
+# CONFIG_BT_BREDR is not set
+# CONFIG_BT_LE is not set
 CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
@@ -241,7 +246,6 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -253,10 +257,8 @@
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
 CONFIG_SCSI_UFS_QCOM_ICE=y
-CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
-CONFIG_DM_DEBUG=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
 CONFIG_DM_UEVENT=y
@@ -308,6 +310,10 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
 CONFIG_TOUCHSCREEN_FT5X06=y
 CONFIG_TOUCHSCREEN_GEN_VKEYS=y
 CONFIG_INPUT_MISC=y
@@ -335,20 +341,18 @@
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
-CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_MSM8937=y
 CONFIG_PINCTRL_MSM8917=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_QPNP_PIN=y
-CONFIG_GPIO_QPNP_PIN_DEBUG=y
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -357,20 +361,27 @@
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
 CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
 CONFIG_REGULATOR_MEM_ACC=y
-CONFIG_REGULATOR_MSM_GFX_LDO=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
 CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP=y
@@ -385,9 +396,7 @@
 CONFIG_USB_VIDEO_CLASS=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
 CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
 CONFIG_MSM_CAMERA_SENSOR=y
 CONFIG_MSM_CPP=y
 CONFIG_MSM_CCI=y
@@ -411,7 +420,6 @@
 CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
 CONFIG_MSMB_JPEG=y
 CONFIG_MSM_FD=y
-CONFIG_MSM_JPEGDMA=y
 CONFIG_MSM_VIDC_3X_V4L2=y
 CONFIG_MSM_VIDC_3X_GOVERNORS=y
 CONFIG_RADIO_IRIS=y
@@ -424,7 +432,7 @@
 CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
 CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
@@ -485,11 +493,12 @@
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
 CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
-CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
@@ -553,7 +562,6 @@
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
@@ -562,17 +570,18 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
-# CONFIG_MSM_JTAGV8 is not set
 CONFIG_MSM_BAM_DMUX=y
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -583,6 +592,9 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -590,8 +602,6 @@
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
-CONFIG_ECRYPT_FS=y
-CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_SDCARD_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
@@ -608,7 +618,6 @@
 CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
-CONFIG_CORESIGHT_SOURCE_ETM4X=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
 CONFIG_CORESIGHT_QCOM_REPLICATOR=y
@@ -619,16 +628,17 @@
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_LZ4=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCRYPTO=y
diff --git a/arch/arm/configs/msm8937_defconfig b/arch/arm/configs/msm8937_defconfig
index 1fefcc616..e4c9eb2 100644
--- a/arch/arm/configs/msm8937_defconfig
+++ b/arch/arm/configs/msm8937_defconfig
@@ -21,6 +21,8 @@
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
@@ -68,6 +70,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -78,7 +81,6 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -150,6 +152,7 @@
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -228,6 +231,8 @@
 CONFIG_RMNET_DATA_FC=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
+# CONFIG_BT_BREDR is not set
+# CONFIG_BT_LE is not set
 CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
@@ -246,7 +251,6 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -258,10 +262,8 @@
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
 CONFIG_SCSI_UFS_QCOM_ICE=y
-CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
-CONFIG_DM_DEBUG=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
 CONFIG_DM_UEVENT=y
@@ -313,6 +315,10 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
 CONFIG_TOUCHSCREEN_FT5X06=y
 CONFIG_TOUCHSCREEN_GEN_VKEYS=y
 CONFIG_INPUT_MISC=y
@@ -342,20 +348,18 @@
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
-CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_MSM8937=y
 CONFIG_PINCTRL_MSM8917=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_QPNP_PIN=y
-CONFIG_GPIO_QPNP_PIN_DEBUG=y
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -364,20 +368,27 @@
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
 CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
 CONFIG_REGULATOR_MEM_ACC=y
-CONFIG_REGULATOR_MSM_GFX_LDO=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
 CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP=y
@@ -418,7 +429,6 @@
 CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
 CONFIG_MSMB_JPEG=y
 CONFIG_MSM_FD=y
-CONFIG_MSM_JPEGDMA=y
 CONFIG_MSM_VIDC_3X_V4L2=y
 CONFIG_MSM_VIDC_3X_GOVERNORS=y
 CONFIG_RADIO_IRIS=y
@@ -432,7 +442,7 @@
 CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
 CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
@@ -493,12 +503,13 @@
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
 CONFIG_MMC_RING_BUFFER=y
 CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
-CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
@@ -544,14 +555,15 @@
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
-CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_DEBUG_LAR_UNLOCK=y
 CONFIG_MSM_RPM_SMD=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_SECURE_BUFFER=y
@@ -569,7 +581,6 @@
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
@@ -583,15 +594,18 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
 CONFIG_QTI_MPM=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
@@ -599,6 +613,9 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -606,8 +623,6 @@
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
-CONFIG_ECRYPT_FS=y
-CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_SDCARD_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
@@ -622,13 +637,14 @@
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_SLUB_DEBUG_PANIC_ON=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_FREE=y
 CONFIG_DEBUG_OBJECTS_TIMERS=y
 CONFIG_DEBUG_OBJECTS_WORK=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
-CONFIG_SLUB_DEBUG_ON=y
 CONFIG_DEBUG_KMEMLEAK=y
 CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
 CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
@@ -663,6 +679,7 @@
 CONFIG_MEMTEST=y
 CONFIG_PANIC_ON_DATA_CORRUPTION=y
 CONFIG_DEBUG_USER=y
+CONFIG_FORCE_PAGES=y
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
@@ -678,16 +695,17 @@
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_LZ4=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCRYPTO=y
diff --git a/arch/arm/configs/msm8953-batcam-perf_defconfig b/arch/arm/configs/msm8953-batcam-perf_defconfig
new file mode 100644
index 0000000..a6fe9b0
--- /dev/null
+++ b/arch/arm/configs/msm8953-batcam-perf_defconfig
@@ -0,0 +1,307 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SCHED_WALT=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_BPF_SYSCALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_MSM8953_BOOT_ORDERING=y
+# CONFIG_VDSO is not set
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_VMSPLIT_3G_OPT=y
+CONFIG_NR_CPUS=8
+CONFIG_ARM_PSCI=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_IMG_DTB=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HIBERNATION=y
+CONFIG_HIBERNATION_IMAGE_REUSE=y
+CONFIG_HIBERNATION_SKIP_CRC=y
+CONFIG_PM_STD_PARTITION="/dev/mmcblk0p49"
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_NET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_DMA_CMA=y
+# CONFIG_OF_KOBJ is not set
+CONFIG_QSEECOM=y
+CONFIG_SCSI=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_NETDEVICES=y
+CONFIG_USB_USBNET=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_SMD_PKT=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_MSM8953=y
+CONFIG_PINCTRL_MSM8917=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_QPNP_FG=y
+CONFIG_SMB135X_CHARGER=y
+CONFIG_QPNP_SMB5=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_TYPEC=y
+CONFIG_QPNP_QG=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_CPR=y
+CONFIG_REGULATOR_CPR4_APSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_MSM_GFX_LDO=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_FB=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_MMC=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QTI_TRI_LED=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_SYNC_FILE=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_IPA=y
+CONFIG_RNDIS_IPA=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MAILBOX=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_MSM_SPM=y
+CONFIG_MSM_L2_SPM=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_TZ_SMMU=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_ICNSS=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_BAM_DMUX=y
+CONFIG_WCNSS_CORE=y
+CONFIG_WCNSS_CORE_PRONTO=y
+CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_QTI_MPM=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_FRAME_WARN=2048
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_STACKTRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM_NEON=y
+CONFIG_CRYPTO_SHA2_ARM_CE=y
+CONFIG_CRYPTO_AES_ARM_BS=y
+CONFIG_CRYPTO_AES_ARM_CE=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/msm8953-batcam_defconfig b/arch/arm/configs/msm8953-batcam_defconfig
new file mode 100644
index 0000000..cd86b01
--- /dev/null
+++ b/arch/arm/configs/msm8953-batcam_defconfig
@@ -0,0 +1,308 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SCHED_WALT=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_BPF_SYSCALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_MSM8953_BOOT_ORDERING=y
+# CONFIG_VDSO is not set
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_VMSPLIT_3G_OPT=y
+CONFIG_NR_CPUS=8
+CONFIG_ARM_PSCI=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_IMG_DTB=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HIBERNATION=y
+CONFIG_HIBERNATION_IMAGE_REUSE=y
+CONFIG_HIBERNATION_SKIP_CRC=y
+CONFIG_PM_STD_PARTITION="/dev/mmcblk0p49"
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_NET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_DMA_CMA=y
+# CONFIG_OF_KOBJ is not set
+CONFIG_QSEECOM=y
+CONFIG_SCSI=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_NETDEVICES=y
+CONFIG_USB_USBNET=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_SMD_PKT=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_MSM8953=y
+CONFIG_PINCTRL_MSM8917=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_QPNP_FG=y
+CONFIG_SMB135X_CHARGER=y
+CONFIG_QPNP_SMB5=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_TYPEC=y
+CONFIG_QPNP_QG=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_CPR=y
+CONFIG_REGULATOR_CPR4_APSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_MSM_GFX_LDO=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_FB=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_MMC=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QTI_TRI_LED=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_SYNC_FILE=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_IPA=y
+CONFIG_RNDIS_IPA=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MAILBOX=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_MSM_SPM=y
+CONFIG_MSM_L2_SPM=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_TZ_SMMU=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_ICNSS=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_BAM_DMUX=y
+CONFIG_WCNSS_CORE=y
+CONFIG_WCNSS_CORE_PRONTO=y
+CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_QTI_MPM=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_FRAME_WARN=2048
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_STACKTRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM_NEON=y
+CONFIG_CRYPTO_SHA2_ARM_CE=y
+CONFIG_CRYPTO_AES_ARM_BS=y
+CONFIG_CRYPTO_AES_ARM_CE=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/msm8953-perf_defconfig b/arch/arm/configs/msm8953-perf_defconfig
index 2ba24d9..fca07a10 100644
--- a/arch/arm/configs/msm8953-perf_defconfig
+++ b/arch/arm/configs/msm8953-perf_defconfig
@@ -17,6 +17,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -63,6 +64,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -144,6 +146,7 @@
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -307,6 +310,10 @@
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_FT5X06=y
 CONFIG_TOUCHSCREEN_GEN_VKEYS=y
+CONFIG_TOUCHSCREEN_HIMAX_CHIPSET=y
+CONFIG_TOUCHSCREEN_HIMAX_I2C=y
+CONFIG_TOUCHSCREEN_HIMAX_DEBUG=y
+CONFIG_HMX_DB=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
@@ -340,10 +347,10 @@
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -352,12 +359,22 @@
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -559,6 +576,7 @@
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_PM=y
+CONFIG_QCOM_DCC=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
@@ -566,12 +584,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -582,6 +602,11 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -604,21 +629,24 @@
 CONFIG_IPC_LOGGING=y
 CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm/configs/msm8953_defconfig b/arch/arm/configs/msm8953_defconfig
index 8299018..690faf2 100644
--- a/arch/arm/configs/msm8953_defconfig
+++ b/arch/arm/configs/msm8953_defconfig
@@ -18,6 +18,7 @@
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -66,6 +67,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -148,6 +150,7 @@
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -312,6 +315,10 @@
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_FT5X06=y
 CONFIG_TOUCHSCREEN_GEN_VKEYS=y
+CONFIG_TOUCHSCREEN_HIMAX_CHIPSET=y
+CONFIG_TOUCHSCREEN_HIMAX_I2C=y
+CONFIG_TOUCHSCREEN_HIMAX_DEBUG=y
+CONFIG_HMX_DB=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
@@ -347,10 +354,10 @@
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -359,12 +366,22 @@
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -545,14 +562,15 @@
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
-CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_DEBUG_LAR_UNLOCK=y
 CONFIG_MSM_RPM_SMD=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_SECURE_BUFFER=y
@@ -575,6 +593,7 @@
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_PM=y
+CONFIG_QCOM_DCC=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
@@ -582,12 +601,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -598,6 +619,11 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -619,6 +645,8 @@
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_SLUB_DEBUG_PANIC_ON=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_FREE=y
 CONFIG_DEBUG_OBJECTS_TIMERS=y
@@ -660,24 +688,29 @@
 CONFIG_MEMTEST=y
 CONFIG_PANIC_ON_DATA_CORRUPTION=y
 CONFIG_DEBUG_USER=y
+CONFIG_FORCE_PAGES=y
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm/configs/sdm670_defconfig b/arch/arm/configs/sdm670_defconfig
new file mode 100644
index 0000000..f7a8300
--- /dev/null
+++ b/arch/arm/configs/sdm670_defconfig
@@ -0,0 +1,609 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM670=y
+CONFIG_PCI_MSM=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_ARM_PSCI=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_SDM670=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_SMB2=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_MSM_GCC_SDM845=y
+CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
+CONFIG_MSM_DISPCC_SDM845=y
+CONFIG_CLOCK_QPNP_DIV=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_QCOM_SCM=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_ERP=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_QBT1000=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MSM_REMOTEQDSS=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_XZ_DEC=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 0e971d2..cab3796 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -32,7 +32,9 @@
 CONFIG_CMA=y
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_MSM=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
@@ -222,6 +224,7 @@
 CONFIG_INPUT_GPIO=m
 CONFIG_SERIO_LIBPS2=y
 # CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_HS=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
@@ -381,6 +384,9 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON_QCOM_SPMI_MISC=y
 CONFIG_IIO=y
 CONFIG_PWM=y
@@ -403,9 +409,11 @@
 CONFIG_PANIC_TIMEOUT=5
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
@@ -417,6 +425,7 @@
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 # CONFIG_SECURITY_SELINUX_AVC_STATS is not set
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 52f3fda..bd429ee 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -34,7 +34,9 @@
 CONFIG_CMA=y
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_MSM=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
@@ -155,6 +157,8 @@
 CONFIG_RMNET_DATA=y
 CONFIG_RMNET_DATA_FC=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_DEBUGFS=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
@@ -226,7 +230,6 @@
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
 CONFIG_SPI_SPIDEV=m
-CONFIG_SLIMBUS=y
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PTP_1588_CLOCK=y
@@ -295,6 +298,7 @@
 CONFIG_USB_STORAGE_CYPRESS_ATACB=y
 CONFIG_USB_DWC3=y
 CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_USB_MSM_SSPHY_QMP=y
 CONFIG_MSM_HSUSB_PHY=y
@@ -386,6 +390,8 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON_QCOM_SPMI_MISC=y
 CONFIG_IIO=y
@@ -414,6 +420,7 @@
 CONFIG_PANIC_ON_RECURSIVE_FAULT=y
 CONFIG_PANIC_TIMEOUT=5
 CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
@@ -428,6 +435,7 @@
 CONFIG_PREEMPT_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
 CONFIG_CORESIGHT_SOURCE_ETM3X=y
@@ -444,10 +452,9 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 # CONFIG_SECURITY_SELINUX_AVC_STATS is not set
-CONFIG_CRYPTO_CMAC=y
-CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCRYPTO=y
 CONFIG_CRYPTO_DEV_QCEDEV=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index a808829..e65712e 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -25,6 +25,7 @@
 #include <asm/cp15.h>
 
 #define ICC_EOIR1			__ACCESS_CP15(c12, 0, c12, 1)
+#define ICC_HPPIR1			__ACCESS_CP15(c12, 0, c12, 2)
 #define ICC_DIR				__ACCESS_CP15(c12, 0, c11, 1)
 #define ICC_IAR1			__ACCESS_CP15(c12, 0, c12, 0)
 #define ICC_SGI1R			__ACCESS_CP15_64(0, c12)
@@ -140,6 +141,7 @@
 CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
 CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
 CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
+CPUIF_MAP(ICC_HPPIR1, ICC_HPPIR1_EL1)
 
 CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
 CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
@@ -184,6 +186,15 @@
 	return irqstat;
 }
 
+static inline u32 gic_read_hppir(void)
+{
+	u32 irqstat = read_sysreg(ICC_HPPIR1);
+
+	dsb(sy);
+
+	return irqstat;
+}
+
 static inline void gic_write_pmr(u32 val)
 {
 	write_sysreg(val, ICC_PMR);
@@ -242,6 +253,15 @@
 	writel_relaxed((u32)(val >> 32), addr + 4);
 }
 
+static inline u64 gic_read_irouter(const volatile void __iomem *addr)
+{
+	u64 val;
+
+	val = readl_relaxed(addr);
+	val |= (u64)readl_relaxed(addr + 4) << 32;
+	return val;
+}
+
 static inline u64 gic_read_typer(const volatile void __iomem *addr)
 {
 	u64 val;
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 12f99fd..3aed449 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -534,4 +534,14 @@
 #endif
 	.endm
 
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE(entry)				\
+	.pushsection "_kprobe_blacklist", "aw" ;	\
+	.balign 4 ;					\
+	.long entry;					\
+	.popsection
+#else
+#define _ASM_NOKPROBE(entry)
+#endif
+
 #endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
index 4f8e9e5..2dbb8c6 100644
--- a/arch/arm/include/asm/dma-contiguous.h
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -1,14 +1,25 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
 #ifndef ASMARM_DMA_CONTIGUOUS_H
 #define ASMARM_DMA_CONTIGUOUS_H
 
 #ifdef __KERNEL__
-#ifdef CONFIG_DMA_CMA
 
 #include <linux/types.h>
 
 void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
 
 #endif
-#endif
 
 #endif
diff --git a/arch/arm/include/asm/etmv4x.h b/arch/arm/include/asm/etmv4x.h
new file mode 100644
index 0000000..7ad0a92
--- /dev/null
+++ b/arch/arm/include/asm/etmv4x.h
@@ -0,0 +1,387 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ETMV4X_H
+#define __ASM_ETMV4X_H
+
+#include <linux/types.h>
+
+
+/* 32 bit register read for AArch32 */
+#define trc_readl(reg)			RSYSL_##reg()
+#define trc_readq(reg)			RSYSL_##reg()
+
+/* 32 bit register write for AArch32 */
+#define trc_write(val, reg)		WSYS_##reg(val)
+
+#define MRC(op0, op1, crn, crm, op2)					    \
+({									    \
+uint32_t val;								    \
+asm volatile("mrc p"#op0", "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \
+val;									    \
+})
+
+#define MCR(val, op0, op1, crn, crm, op2)				    \
+({									    \
+asm volatile("mcr p"#op0", "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/* Clock and Power Management Register */
+#define RSYSL_CPMR_EL1()		MRC(15, 7, c15, c0, 5)
+#define WSYS_CPMR_EL1(val)		MCR(val, 15, 7, c15, c0, 5)
+
+/*
+ * ETMv4 Registers
+ *
+ * Read only
+ * ETMAUTHSTATUS, ETMDEVARCH, ETMDEVID, ETMIDRn[0-13], ETMOSLSR, ETMSTATR
+ *
+ * Write only
+ * ETMOSLAR
+ */
+/* 32 bit registers */
+#define RSYSL_ETMAUTHSTATUS()		MRC(14, 1, c7, c14, 6)
+#define RSYSL_ETMAUXCTLR()		MRC(14, 1, c0, c6, 0)
+#define RSYSL_ETMCCCTLR()		MRC(14, 1, c0, c14, 0)
+#define RSYSL_ETMCIDCCTLR0()		MRC(14, 1, c3, c0, 2)
+#define RSYSL_ETMCNTCTLR0()		MRC(14, 1, c0, c4, 5)
+#define RSYSL_ETMCNTCTLR1()		MRC(14, 1, c0, c5, 5)
+#define RSYSL_ETMCNTCTLR2()		MRC(14, 1, c0, c6, 5)
+#define RSYSL_ETMCNTCTLR3()		MRC(14, 1, c0, c7, 5)
+#define RSYSL_ETMCNTRLDVR0()		MRC(14, 1, c0, c0, 5)
+#define RSYSL_ETMCNTRLDVR1()		MRC(14, 1, c0, c1, 5)
+#define RSYSL_ETMCNTRLDVR2()		MRC(14, 1, c0, c2, 5)
+#define RSYSL_ETMCNTRLDVR3()		MRC(14, 1, c0, c3, 5)
+#define RSYSL_ETMCNTVR0()		MRC(14, 1, c0, c8, 5)
+#define RSYSL_ETMCNTVR1()		MRC(14, 1, c0, c9, 5)
+#define RSYSL_ETMCNTVR2()		MRC(14, 1, c0, c10, 5)
+#define RSYSL_ETMCNTVR3()		MRC(14, 1, c0, c11, 5)
+#define RSYSL_ETMCONFIGR()		MRC(14, 1, c0, c4, 0)
+#define RSYSL_ETMDEVARCH()		MRC(14, 1, c7, c15, 6)
+#define RSYSL_ETMDEVID()		MRC(14, 1, c7, c2, 7)
+#define RSYSL_ETMEVENTCTL0R()		MRC(14, 1, c0, c8, 0)
+#define RSYSL_ETMEVENTCTL1R()		MRC(14, 1, c0, c9, 0)
+#define RSYSL_ETMEXTINSELR()		MRC(14, 1, c0, c8, 4)
+#define RSYSL_ETMIDR0()			MRC(14, 1, c0, c8, 7)
+#define RSYSL_ETMIDR1()			MRC(14, 1, c0, c9, 7)
+#define RSYSL_ETMIDR10()		MRC(14, 1, c0, c2, 6)
+#define RSYSL_ETMIDR11()		MRC(14, 1, c0, c3, 6)
+#define RSYSL_ETMIDR12()		MRC(14, 1, c0, c4, 6)
+#define RSYSL_ETMIDR13()		MRC(14, 1, c0, c5, 6)
+#define RSYSL_ETMIDR2()			MRC(14, 1, c0, c10, 7)
+#define RSYSL_ETMIDR3()			MRC(14, 1, c0, c11, 7)
+#define RSYSL_ETMIDR4()			MRC(14, 1, c0, c12, 7)
+#define RSYSL_ETMIDR5()			MRC(14, 1, c0, c13, 7)
+#define RSYSL_ETMIDR6()			MRC(14, 1, c0, c14, 7)
+#define RSYSL_ETMIDR7()			MRC(14, 1, c0, c15, 7)
+#define RSYSL_ETMIDR8()			MRC(14, 1, c0, c0, 6)
+#define RSYSL_ETMIDR9()			MRC(14, 1, c0, c1, 6)
+#define RSYSL_ETMIMSPEC0()		MRC(14, 1, c0, c0, 7)
+#define RSYSL_ETMOSLSR()		MRC(14, 1, c1, c1, 4)
+#define RSYSL_ETMPRGCTLR()		MRC(14, 1, c0, c1, 0)
+#define RSYSL_ETMRSCTLR10()		MRC(14, 1, c1, c10, 0)
+#define RSYSL_ETMRSCTLR11()		MRC(14, 1, c1, c11, 0)
+#define RSYSL_ETMRSCTLR12()		MRC(14, 1, c1, c12, 0)
+#define RSYSL_ETMRSCTLR13()		MRC(14, 1, c1, c13, 0)
+#define RSYSL_ETMRSCTLR14()		MRC(14, 1, c1, c14, 0)
+#define RSYSL_ETMRSCTLR15()		MRC(14, 1, c1, c15, 0)
+#define RSYSL_ETMRSCTLR2()		MRC(14, 1, c1, c2, 0)
+#define RSYSL_ETMRSCTLR3()		MRC(14, 1, c1, c3, 0)
+#define RSYSL_ETMRSCTLR4()		MRC(14, 1, c1, c4, 0)
+#define RSYSL_ETMRSCTLR5()		MRC(14, 1, c1, c5, 0)
+#define RSYSL_ETMRSCTLR6()		MRC(14, 1, c1, c6, 0)
+#define RSYSL_ETMRSCTLR7()		MRC(14, 1, c1, c7, 0)
+#define RSYSL_ETMRSCTLR8()		MRC(14, 1, c1, c8, 0)
+#define RSYSL_ETMRSCTLR9()		MRC(14, 1, c1, c9, 0)
+#define RSYSL_ETMRSCTLR16()		MRC(14, 1, c1, c0, 1)
+#define RSYSL_ETMRSCTLR17()		MRC(14, 1, c1, c1, 1)
+#define RSYSL_ETMRSCTLR18()		MRC(14, 1, c1, c2, 1)
+#define RSYSL_ETMRSCTLR19()		MRC(14, 1, c1, c3, 1)
+#define RSYSL_ETMRSCTLR20()		MRC(14, 1, c1, c4, 1)
+#define RSYSL_ETMRSCTLR21()		MRC(14, 1, c1, c5, 1)
+#define RSYSL_ETMRSCTLR22()		MRC(14, 1, c1, c6, 1)
+#define RSYSL_ETMRSCTLR23()		MRC(14, 1, c1, c7, 1)
+#define RSYSL_ETMRSCTLR24()		MRC(14, 1, c1, c8, 1)
+#define RSYSL_ETMRSCTLR25()		MRC(14, 1, c1, c9, 1)
+#define RSYSL_ETMRSCTLR26()		MRC(14, 1, c1, c10, 1)
+#define RSYSL_ETMRSCTLR27()		MRC(14, 1, c1, c11, 1)
+#define RSYSL_ETMRSCTLR28()		MRC(14, 1, c1, c12, 1)
+#define RSYSL_ETMRSCTLR29()		MRC(14, 1, c1, c13, 1)
+#define RSYSL_ETMRSCTLR30()		MRC(14, 1, c1, c14, 1)
+#define RSYSL_ETMRSCTLR31()		MRC(14, 1, c1, c15, 1)
+#define RSYSL_ETMSEQEVR0()		MRC(14, 1, c0, c0, 4)
+#define RSYSL_ETMSEQEVR1()		MRC(14, 1, c0, c1, 4)
+#define RSYSL_ETMSEQEVR2()		MRC(14, 1, c0, c2, 4)
+#define RSYSL_ETMSEQRSTEVR()		MRC(14, 1, c0, c6, 4)
+#define RSYSL_ETMSEQSTR()		MRC(14, 1, c0, c7, 4)
+#define RSYSL_ETMSTALLCTLR()		MRC(14, 1, c0, c11, 0)
+#define RSYSL_ETMSTATR()		MRC(14, 1, c0, c3, 0)
+#define RSYSL_ETMSYNCPR()		MRC(14, 1, c0, c13, 0)
+#define RSYSL_ETMTRACEIDR()		MRC(14, 1, c0, c0, 1)
+#define RSYSL_ETMTSCTLR()		MRC(14, 1, c0, c12, 0)
+#define RSYSL_ETMVICTLR()		MRC(14, 1, c0, c0, 2)
+#define RSYSL_ETMVIIECTLR()		MRC(14, 1, c0, c1, 2)
+#define RSYSL_ETMVISSCTLR()		MRC(14, 1, c0, c2, 2)
+#define RSYSL_ETMSSCCR0()		MRC(14, 1, c1, c0, 2)
+#define RSYSL_ETMSSCCR1()		MRC(14, 1, c1, c1, 2)
+#define RSYSL_ETMSSCCR2()		MRC(14, 1, c1, c2, 2)
+#define RSYSL_ETMSSCCR3()		MRC(14, 1, c1, c3, 2)
+#define RSYSL_ETMSSCCR4()		MRC(14, 1, c1, c4, 2)
+#define RSYSL_ETMSSCCR5()		MRC(14, 1, c1, c5, 2)
+#define RSYSL_ETMSSCCR6()		MRC(14, 1, c1, c6, 2)
+#define RSYSL_ETMSSCCR7()		MRC(14, 1, c1, c7, 2)
+#define RSYSL_ETMSSCSR0()		MRC(14, 1, c1, c8, 2)
+#define RSYSL_ETMSSCSR1()		MRC(14, 1, c1, c9, 2)
+#define RSYSL_ETMSSCSR2()		MRC(14, 1, c1, c10, 2)
+#define RSYSL_ETMSSCSR3()		MRC(14, 1, c1, c11, 2)
+#define RSYSL_ETMSSCSR4()		MRC(14, 1, c1, c12, 2)
+#define RSYSL_ETMSSCSR5()		MRC(14, 1, c1, c13, 2)
+#define RSYSL_ETMSSCSR6()		MRC(14, 1, c1, c14, 2)
+#define RSYSL_ETMSSCSR7()		MRC(14, 1, c1, c15, 2)
+#define RSYSL_ETMSSPCICR0()		MRC(14, 1, c1, c0, 3)
+#define RSYSL_ETMSSPCICR1()		MRC(14, 1, c1, c1, 3)
+#define RSYSL_ETMSSPCICR2()		MRC(14, 1, c1, c2, 3)
+#define RSYSL_ETMSSPCICR3()		MRC(14, 1, c1, c3, 3)
+#define RSYSL_ETMSSPCICR4()		MRC(14, 1, c1, c4, 3)
+#define RSYSL_ETMSSPCICR5()		MRC(14, 1, c1, c5, 3)
+#define RSYSL_ETMSSPCICR6()		MRC(14, 1, c1, c6, 3)
+#define RSYSL_ETMSSPCICR7()		MRC(14, 1, c1, c7, 3)
+
+/*
+ * 64 bit registers, ignore the upper 32bit
+ * A read from a 32-bit register location using a 64-bit access result
+ * in the upper 32bits being return as RES0.
+ */
+#define RSYSL_ETMACATR0()		MRC(14, 1, c2, c0, 2)
+#define RSYSL_ETMACATR1()		MRC(14, 1, c2, c2, 2)
+#define RSYSL_ETMACATR2()		MRC(14, 1, c2, c4, 2)
+#define RSYSL_ETMACATR3()		MRC(14, 1, c2, c6, 2)
+#define RSYSL_ETMACATR4()		MRC(14, 1, c2, c8, 2)
+#define RSYSL_ETMACATR5()		MRC(14, 1, c2, c10, 2)
+#define RSYSL_ETMACATR6()		MRC(14, 1, c2, c12, 2)
+#define RSYSL_ETMACATR7()		MRC(14, 1, c2, c14, 2)
+#define RSYSL_ETMACATR8()		MRC(14, 1, c2, c0, 3)
+#define RSYSL_ETMACATR9()		MRC(14, 1, c2, c2, 3)
+#define RSYSL_ETMACATR10()		MRC(14, 1, c2, c4, 3)
+#define RSYSL_ETMACATR11()		MRC(14, 1, c2, c6, 3)
+#define RSYSL_ETMACATR12()		MRC(14, 1, c2, c8, 3)
+#define RSYSL_ETMACATR13()		MRC(14, 1, c2, c10, 3)
+#define RSYSL_ETMACATR14()		MRC(14, 1, c2, c12, 3)
+#define RSYSL_ETMACATR15()		MRC(14, 1, c2, c14, 3)
+#define RSYSL_ETMCIDCVR0()		MRC(14, 1, c3, c0, 0)
+#define RSYSL_ETMCIDCVR1()		MRC(14, 1, c3, c2, 0)
+#define RSYSL_ETMCIDCVR2()		MRC(14, 1, c3, c4, 0)
+#define RSYSL_ETMCIDCVR3()		MRC(14, 1, c3, c6, 0)
+#define RSYSL_ETMCIDCVR4()		MRC(14, 1, c3, c8, 0)
+#define RSYSL_ETMCIDCVR5()		MRC(14, 1, c3, c10, 0)
+#define RSYSL_ETMCIDCVR6()		MRC(14, 1, c3, c12, 0)
+#define RSYSL_ETMCIDCVR7()		MRC(14, 1, c3, c14, 0)
+#define RSYSL_ETMACVR0()		MRC(14, 1, c2, c0, 0)
+#define RSYSL_ETMACVR1()		MRC(14, 1, c2, c2, 0)
+#define RSYSL_ETMACVR2()		MRC(14, 1, c2, c4, 0)
+#define RSYSL_ETMACVR3()		MRC(14, 1, c2, c6, 0)
+#define RSYSL_ETMACVR4()		MRC(14, 1, c2, c8, 0)
+#define RSYSL_ETMACVR5()		MRC(14, 1, c2, c10, 0)
+#define RSYSL_ETMACVR6()		MRC(14, 1, c2, c12, 0)
+#define RSYSL_ETMACVR7()		MRC(14, 1, c2, c14, 0)
+#define RSYSL_ETMACVR8()		MRC(14, 1, c2, c0, 1)
+#define RSYSL_ETMACVR9()		MRC(14, 1, c2, c2, 1)
+#define RSYSL_ETMACVR10()		MRC(14, 1, c2, c4, 1)
+#define RSYSL_ETMACVR11()		MRC(14, 1, c2, c6, 1)
+#define RSYSL_ETMACVR12()		MRC(14, 1, c2, c8, 1)
+#define RSYSL_ETMACVR13()		MRC(14, 1, c2, c10, 1)
+#define RSYSL_ETMACVR14()		MRC(14, 1, c2, c12, 1)
+#define RSYSL_ETMACVR15()		MRC(14, 1, c2, c14, 1)
+#define RSYSL_ETMVMIDCVR0()		MRC(14, 1, c3, c0, 1)
+#define RSYSL_ETMVMIDCVR1()		MRC(14, 1, c3, c2, 1)
+#define RSYSL_ETMVMIDCVR2()		MRC(14, 1, c3, c4, 1)
+#define RSYSL_ETMVMIDCVR3()		MRC(14, 1, c3, c6, 1)
+#define RSYSL_ETMVMIDCVR4()		MRC(14, 1, c3, c8, 1)
+#define RSYSL_ETMVMIDCVR5()		MRC(14, 1, c3, c10, 1)
+#define RSYSL_ETMVMIDCVR6()		MRC(14, 1, c3, c12, 1)
+#define RSYSL_ETMVMIDCVR7()		MRC(14, 1, c3, c14, 1)
+#define RSYSL_ETMDVCVR0()		MRC(14, 1, c2, c0, 4)
+#define RSYSL_ETMDVCVR1()		MRC(14, 1, c2, c4, 4)
+#define RSYSL_ETMDVCVR2()		MRC(14, 1, c2, c8, 4)
+#define RSYSL_ETMDVCVR3()		MRC(14, 1, c2, c12, 4)
+#define RSYSL_ETMDVCVR4()		MRC(14, 1, c2, c0, 5)
+#define RSYSL_ETMDVCVR5()		MRC(14, 1, c2, c4, 5)
+#define RSYSL_ETMDVCVR6()		MRC(14, 1, c2, c8, 5)
+#define RSYSL_ETMDVCVR7()		MRC(14, 1, c2, c12, 5)
+#define RSYSL_ETMDVCMR0()		MRC(14, 1, c2, c0, 6)
+#define RSYSL_ETMDVCMR1()		MRC(14, 1, c2, c4, 6)
+#define RSYSL_ETMDVCMR2()		MRC(14, 1, c2, c8, 6)
+#define RSYSL_ETMDVCMR3()		MRC(14, 1, c2, c12, 6)
+#define RSYSL_ETMDVCMR4()		MRC(14, 1, c2, c0, 7)
+#define RSYSL_ETMDVCMR5()		MRC(14, 1, c2, c4, 7)
+#define RSYSL_ETMDVCMR6()		MRC(14, 1, c2, c8, 7)
+#define RSYSL_ETMDVCMR7()		MRC(14, 1, c2, c12, 7)
+
+/*
+ * 32 and 64 bit registers
+ * A write to a 32-bit register location using a 64-bit access result
+ * in the upper 32bit of access
+ */
+#define WSYS_ETMAUXCTLR(val)		MCR(val, 14, 1, c0, c6, 0)
+#define WSYS_ETMACATR0(val)		MCR(val, 14, 1, c2, c0, 2)
+#define WSYS_ETMACATR1(val)		MCR(val, 14, 1, c2, c2, 2)
+#define WSYS_ETMACATR2(val)		MCR(val, 14, 1, c2, c4, 2)
+#define WSYS_ETMACATR3(val)		MCR(val, 14, 1, c2, c6, 2)
+#define WSYS_ETMACATR4(val)		MCR(val, 14, 1, c2, c8, 2)
+#define WSYS_ETMACATR5(val)		MCR(val, 14, 1, c2, c10, 2)
+#define WSYS_ETMACATR6(val)		MCR(val, 14, 1, c2, c12, 2)
+#define WSYS_ETMACATR7(val)		MCR(val, 14, 1, c2, c14, 2)
+#define WSYS_ETMACATR8(val)		MCR(val, 14, 1, c2, c0, 3)
+#define WSYS_ETMACATR9(val)		MCR(val, 14, 1, c2, c2, 3)
+#define WSYS_ETMACATR10(val)		MCR(val, 14, 1, c2, c4, 3)
+#define WSYS_ETMACATR11(val)		MCR(val, 14, 1, c2, c6, 3)
+#define WSYS_ETMACATR12(val)		MCR(val, 14, 1, c2, c8, 3)
+#define WSYS_ETMACATR13(val)		MCR(val, 14, 1, c2, c10, 3)
+#define WSYS_ETMACATR14(val)		MCR(val, 14, 1, c2, c12, 3)
+#define WSYS_ETMACATR15(val)		MCR(val, 14, 1, c2, c14, 3)
+#define WSYS_ETMACVR0(val)		MCR(val, 14, 1, c2, c0, 0)
+#define WSYS_ETMACVR1(val)		MCR(val, 14, 1, c2, c2, 0)
+#define WSYS_ETMACVR2(val)		MCR(val, 14, 1, c2, c4, 0)
+#define WSYS_ETMACVR3(val)		MCR(val, 14, 1, c2, c6, 0)
+#define WSYS_ETMACVR4(val)		MCR(val, 14, 1, c2, c8, 0)
+#define WSYS_ETMACVR5(val)		MCR(val, 14, 1, c2, c10, 0)
+#define WSYS_ETMACVR6(val)		MCR(val, 14, 1, c2, c12, 0)
+#define WSYS_ETMACVR7(val)		MCR(val, 14, 1, c2, c14, 0)
+#define WSYS_ETMACVR8(val)		MCR(val, 14, 1, c2, c0, 1)
+#define WSYS_ETMACVR9(val)		MCR(val, 14, 1, c2, c2, 1)
+#define WSYS_ETMACVR10(val)		MCR(val, 14, 1, c2, c4, 1)
+#define WSYS_ETMACVR11(val)		MCR(val, 14, 1, c2, c6, 1)
+#define WSYS_ETMACVR12(val)		MCR(val, 14, 1, c2, c8, 1)
+#define WSYS_ETMACVR13(val)		MCR(val, 14, 1, c2, c10, 1)
+#define WSYS_ETMACVR14(val)		MCR(val, 14, 1, c2, c12, 1)
+#define WSYS_ETMACVR15(val)		MCR(val, 14, 1, c2, c14, 1)
+#define WSYS_ETMCCCTLR(val)		MCR(val, 14, 1, c0, c14, 0)
+#define WSYS_ETMCIDCCTLR0(val)		MCR(val, 14, 1, c3, c0, 2)
+#define WSYS_ETMCIDCVR0(val)		MCR(val, 14, 1, c3, c0, 0)
+#define WSYS_ETMCIDCVR1(val)		MCR(val, 14, 1, c3, c2, 0)
+#define WSYS_ETMCIDCVR2(val)		MCR(val, 14, 1, c3, c4, 0)
+#define WSYS_ETMCIDCVR3(val)		MCR(val, 14, 1, c3, c6, 0)
+#define WSYS_ETMCIDCVR4(val)		MCR(val, 14, 1, c3, c8, 0)
+#define WSYS_ETMCIDCVR5(val)		MCR(val, 14, 1, c3, c10, 0)
+#define WSYS_ETMCIDCVR6(val)		MCR(val, 14, 1, c3, c12, 0)
+#define WSYS_ETMCIDCVR7(val)		MCR(val, 14, 1, c3, c14, 0)
+#define WSYS_ETMCNTCTLR0(val)		MCR(val, 14, 1, c0, c4, 5)
+#define WSYS_ETMCNTCTLR1(val)		MCR(val, 14, 1, c0, c5, 5)
+#define WSYS_ETMCNTCTLR2(val)		MCR(val, 14, 1, c0, c6, 5)
+#define WSYS_ETMCNTCTLR3(val)		MCR(val, 14, 1, c0, c7, 5)
+#define WSYS_ETMCNTRLDVR0(val)		MCR(val, 14, 1, c0, c0, 5)
+#define WSYS_ETMCNTRLDVR1(val)		MCR(val, 14, 1, c0, c1, 5)
+#define WSYS_ETMCNTRLDVR2(val)		MCR(val, 14, 1, c0, c2, 5)
+#define WSYS_ETMCNTRLDVR3(val)		MCR(val, 14, 1, c0, c3, 5)
+#define WSYS_ETMCNTVR0(val)		MCR(val, 14, 1, c0, c8, 5)
+#define WSYS_ETMCNTVR1(val)		MCR(val, 14, 1, c0, c9, 5)
+#define WSYS_ETMCNTVR2(val)		MCR(val, 14, 1, c0, c10, 5)
+#define WSYS_ETMCNTVR3(val)		MCR(val, 14, 1, c0, c11, 5)
+#define WSYS_ETMCONFIGR(val)		MCR(val, 14, 1, c0, c4, 0)
+#define WSYS_ETMEVENTCTL0R(val)		MCR(val, 14, 1, c0, c8, 0)
+#define WSYS_ETMEVENTCTL1R(val)		MCR(val, 14, 1, c0, c9, 0)
+#define WSYS_ETMEXTINSELR(val)		MCR(val, 14, 1, c0, c8, 4)
+#define WSYS_ETMIMSPEC0(val)		MCR(val, 14, 1, c0, c0, 7)
+#define WSYS_ETMOSLAR(val)		MCR(val, 14, 1, c1, c0, 4)
+#define WSYS_ETMPRGCTLR(val)		MCR(val, 14, 1, c0, c1, 0)
+#define WSYS_ETMRSCTLR10(val)		MCR(val, 14, 1, c1, c10, 0)
+#define WSYS_ETMRSCTLR11(val)		MCR(val, 14, 1, c1, c11, 0)
+#define WSYS_ETMRSCTLR12(val)		MCR(val, 14, 1, c1, c12, 0)
+#define WSYS_ETMRSCTLR13(val)		MCR(val, 14, 1, c1, c13, 0)
+#define WSYS_ETMRSCTLR14(val)		MCR(val, 14, 1, c1, c14, 0)
+#define WSYS_ETMRSCTLR15(val)		MCR(val, 14, 1, c1, c15, 0)
+#define WSYS_ETMRSCTLR2(val)		MCR(val, 14, 1, c1, c2, 0)
+#define WSYS_ETMRSCTLR3(val)		MCR(val, 14, 1, c1, c3, 0)
+#define WSYS_ETMRSCTLR4(val)		MCR(val, 14, 1, c1, c4, 0)
+#define WSYS_ETMRSCTLR5(val)		MCR(val, 14, 1, c1, c5, 0)
+#define WSYS_ETMRSCTLR6(val)		MCR(val, 14, 1, c1, c6, 0)
+#define WSYS_ETMRSCTLR7(val)		MCR(val, 14, 1, c1, c7, 0)
+#define WSYS_ETMRSCTLR8(val)		MCR(val, 14, 1, c1, c8, 0)
+#define WSYS_ETMRSCTLR9(val)		MCR(val, 14, 1, c1, c9, 0)
+#define WSYS_ETMRSCTLR16(val)		MCR(val, 14, 1, c1, c0, 1)
+#define WSYS_ETMRSCTLR17(val)		MCR(val, 14, 1, c1, c1, 1)
+#define WSYS_ETMRSCTLR18(val)		MCR(val, 14, 1, c1, c2, 1)
+#define WSYS_ETMRSCTLR19(val)		MCR(val, 14, 1, c1, c3, 1)
+#define WSYS_ETMRSCTLR20(val)		MCR(val, 14, 1, c1, c4, 1)
+#define WSYS_ETMRSCTLR21(val)		MCR(val, 14, 1, c1, c5, 1)
+#define WSYS_ETMRSCTLR22(val)		MCR(val, 14, 1, c1, c6, 1)
+#define WSYS_ETMRSCTLR23(val)		MCR(val, 14, 1, c1, c7, 1)
+#define WSYS_ETMRSCTLR24(val)		MCR(val, 14, 1, c1, c8, 1)
+#define WSYS_ETMRSCTLR25(val)		MCR(val, 14, 1, c1, c9, 1)
+#define WSYS_ETMRSCTLR26(val)		MCR(val, 14, 1, c1, c10, 1)
+#define WSYS_ETMRSCTLR27(val)		MCR(val, 14, 1, c1, c11, 1)
+#define WSYS_ETMRSCTLR28(val)		MCR(val, 14, 1, c1, c12, 1)
+#define WSYS_ETMRSCTLR29(val)		MCR(val, 14, 1, c1, c13, 1)
+#define WSYS_ETMRSCTLR30(val)		MCR(val, 14, 1, c1, c14, 1)
+#define WSYS_ETMRSCTLR31(val)		MCR(val, 14, 1, c1, c15, 1)
+#define WSYS_ETMSEQEVR0(val)		MCR(val, 14, 1, c0, c0, 4)
+#define WSYS_ETMSEQEVR1(val)		MCR(val, 14, 1, c0, c1, 4)
+#define WSYS_ETMSEQEVR2(val)		MCR(val, 14, 1, c0, c2, 4)
+#define WSYS_ETMSEQRSTEVR(val)		MCR(val, 14, 1, c0, c6, 4)
+#define WSYS_ETMSEQSTR(val)		MCR(val, 14, 1, c0, c7, 4)
+#define WSYS_ETMSTALLCTLR(val)		MCR(val, 14, 1, c0, c11, 0)
+#define WSYS_ETMSYNCPR(val)		MCR(val, 14, 1, c0, c13, 0)
+#define WSYS_ETMTRACEIDR(val)		MCR(val, 14, 1, c0, c0, 1)
+#define WSYS_ETMTSCTLR(val)		MCR(val, 14, 1, c0, c12, 0)
+#define WSYS_ETMVICTLR(val)		MCR(val, 14, 1, c0, c0, 2)
+#define WSYS_ETMVIIECTLR(val)		MCR(val, 14, 1, c0, c1, 2)
+#define WSYS_ETMVISSCTLR(val)		MCR(val, 14, 1, c0, c2, 2)
+#define WSYS_ETMVMIDCVR0(val)		MCR(val, 14, 1, c3, c0, 1)
+#define WSYS_ETMVMIDCVR1(val)		MCR(val, 14, 1, c3, c2, 1)
+#define WSYS_ETMVMIDCVR2(val)		MCR(val, 14, 1, c3, c4, 1)
+#define WSYS_ETMVMIDCVR3(val)		MCR(val, 14, 1, c3, c6, 1)
+#define WSYS_ETMVMIDCVR4(val)		MCR(val, 14, 1, c3, c8, 1)
+#define WSYS_ETMVMIDCVR5(val)		MCR(val, 14, 1, c3, c10, 1)
+#define WSYS_ETMVMIDCVR6(val)		MCR(val, 14, 1, c3, c12, 1)
+#define WSYS_ETMVMIDCVR7(val)		MCR(val, 14, 1, c3, c14, 1)
+#define WSYS_ETMDVCVR0(val)		MCR(val, 14, 1, c2, c0, 4)
+#define WSYS_ETMDVCVR1(val)		MCR(val, 14, 1, c2, c4, 4)
+#define WSYS_ETMDVCVR2(val)		MCR(val, 14, 1, c2, c8, 4)
+#define WSYS_ETMDVCVR3(val)		MCR(val, 14, 1, c2, c12, 4)
+#define WSYS_ETMDVCVR4(val)		MCR(val, 14, 1, c2, c0, 5)
+#define WSYS_ETMDVCVR5(val)		MCR(val, 14, 1, c2, c4, 5)
+#define WSYS_ETMDVCVR6(val)		MCR(val, 14, 1, c2, c8, 5)
+#define WSYS_ETMDVCVR7(val)		MCR(val, 14, 1, c2, c12, 5)
+#define WSYS_ETMDVCMR0(val)		MCR(val, 14, 1, c2, c0, 6)
+#define WSYS_ETMDVCMR1(val)		MCR(val, 14, 1, c2, c4, 6)
+#define WSYS_ETMDVCMR2(val)		MCR(val, 14, 1, c2, c8, 6)
+#define WSYS_ETMDVCMR3(val)		MCR(val, 14, 1, c2, c12, 6)
+#define WSYS_ETMDVCMR4(val)		MCR(val, 14, 1, c2, c0, 7)
+#define WSYS_ETMDVCMR5(val)		MCR(val, 14, 1, c2, c4, 7)
+#define WSYS_ETMDVCMR6(val)		MCR(val, 14, 1, c2, c8, 7)
+#define WSYS_ETMDVCMR7(val)		MCR(val, 14, 1, c2, c12, 7)
+#define WSYS_ETMSSCCR0(val)		MCR(val, 14, 1, c1, c0, 2)
+#define WSYS_ETMSSCCR1(val)		MCR(val, 14, 1, c1, c1, 2)
+#define WSYS_ETMSSCCR2(val)		MCR(val, 14, 1, c1, c2, 2)
+#define WSYS_ETMSSCCR3(val)		MCR(val, 14, 1, c1, c3, 2)
+#define WSYS_ETMSSCCR4(val)		MCR(val, 14, 1, c1, c4, 2)
+#define WSYS_ETMSSCCR5(val)		MCR(val, 14, 1, c1, c5, 2)
+#define WSYS_ETMSSCCR6(val)		MCR(val, 14, 1, c1, c6, 2)
+#define WSYS_ETMSSCCR7(val)		MCR(val, 14, 1, c1, c7, 2)
+#define WSYS_ETMSSCSR0(val)		MCR(val, 14, 1, c1, c8, 2)
+#define WSYS_ETMSSCSR1(val)		MCR(val, 14, 1, c1, c9, 2)
+#define WSYS_ETMSSCSR2(val)		MCR(val, 14, 1, c1, c10, 2)
+#define WSYS_ETMSSCSR3(val)		MCR(val, 14, 1, c1, c11, 2)
+#define WSYS_ETMSSCSR4(val)		MCR(val, 14, 1, c1, c12, 2)
+#define WSYS_ETMSSCSR5(val)		MCR(val, 14, 1, c1, c13, 2)
+#define WSYS_ETMSSCSR6(val)		MCR(val, 14, 1, c1, c14, 2)
+#define WSYS_ETMSSCSR7(val)		MCR(val, 14, 1, c1, c15, 2)
+#define WSYS_ETMSSPCICR0(val)		MCR(val, 14, 1, c1, c0, 3)
+#define WSYS_ETMSSPCICR1(val)		MCR(val, 14, 1, c1, c1, 3)
+#define WSYS_ETMSSPCICR2(val)		MCR(val, 14, 1, c1, c2, 3)
+#define WSYS_ETMSSPCICR3(val)		MCR(val, 14, 1, c1, c3, 3)
+#define WSYS_ETMSSPCICR4(val)		MCR(val, 14, 1, c1, c4, 3)
+#define WSYS_ETMSSPCICR5(val)		MCR(val, 14, 1, c1, c5, 3)
+#define WSYS_ETMSSPCICR6(val)		MCR(val, 14, 1, c1, c6, 3)
+#define WSYS_ETMSSPCICR7(val)		MCR(val, 14, 1, c1, c7, 3)
+
+#endif
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 6795368..cc41438 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -128,20 +128,10 @@
 #endif /* !SMP */
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret, tmp;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 #ifndef CONFIG_SMP
 	preempt_disable();
 #endif
@@ -172,17 +162,9 @@
 	preempt_enable();
 #endif
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/arm/include/asm/hardware/debugv8.h b/arch/arm/include/asm/hardware/debugv8.h
new file mode 100644
index 0000000..a8249cd
--- /dev/null
+++ b/arch/arm/include/asm/hardware/debugv8.h
@@ -0,0 +1,247 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_HARDWARE_DEBUGV8_H
+#define __ASM_HARDWARE_DEBUGV8_H
+
+#include <linux/types.h>
+
+/* Accessors for CP14 registers */
+#define dbg_read(reg)			RCP14_##reg()
+#define dbg_write(val, reg)		WCP14_##reg(val)
+
+/* MRC14 registers */
+#define MRC14(op1, crn, crm, op2)					\
+({									\
+uint32_t val;								\
+asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val));	\
+val;									\
+})
+
+/* MCR14 registers */
+#define MCR14(val, op1, crn, crm, op2)					\
+({									\
+asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/*
+ * Debug Registers
+ *
+ * Read only
+ * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGDSAR,
+ * DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID
+ *
+ * Write only
+ * DBGDTRTXint, DBGOSLAR
+ */
+#define RCP14_DBGDIDR()			MRC14(0, c0, c0, 0)
+#define RCP14_DBGDSCRint()		MRC14(0, c0, c1, 0)
+#define RCP14_DBGDCCINT()		MRC14(0, c0, c2, 0)
+#define RCP14_DBGDTRRXint()		MRC14(0, c0, c5, 0)
+#define RCP14_DBGWFAR()			MRC14(0, c0, c6, 0)
+#define RCP14_DBGVCR()			MRC14(0, c0, c7, 0)
+#define RCP14_DBGDTRRXext()		MRC14(0, c0, c0, 2)
+#define RCP14_DBGDSCRext()		MRC14(0, c0, c2, 2)
+#define RCP14_DBGDTRTXext()		MRC14(0, c0, c3, 2)
+#define RCP14_DBGOSECCR()		MRC14(0, c0, c6, 2)
+#define RCP14_DBGBVR0()			MRC14(0, c0, c0, 4)
+#define RCP14_DBGBVR1()			MRC14(0, c0, c1, 4)
+#define RCP14_DBGBVR2()			MRC14(0, c0, c2, 4)
+#define RCP14_DBGBVR3()			MRC14(0, c0, c3, 4)
+#define RCP14_DBGBVR4()			MRC14(0, c0, c4, 4)
+#define RCP14_DBGBVR5()			MRC14(0, c0, c5, 4)
+#define RCP14_DBGBVR6()			MRC14(0, c0, c6, 4)
+#define RCP14_DBGBVR7()			MRC14(0, c0, c7, 4)
+#define RCP14_DBGBVR8()			MRC14(0, c0, c8, 4)
+#define RCP14_DBGBVR9()			MRC14(0, c0, c9, 4)
+#define RCP14_DBGBVR10()		MRC14(0, c0, c10, 4)
+#define RCP14_DBGBVR11()		MRC14(0, c0, c11, 4)
+#define RCP14_DBGBVR12()		MRC14(0, c0, c12, 4)
+#define RCP14_DBGBVR13()		MRC14(0, c0, c13, 4)
+#define RCP14_DBGBVR14()		MRC14(0, c0, c14, 4)
+#define RCP14_DBGBVR15()		MRC14(0, c0, c15, 4)
+#define RCP14_DBGBCR0()			MRC14(0, c0, c0, 5)
+#define RCP14_DBGBCR1()			MRC14(0, c0, c1, 5)
+#define RCP14_DBGBCR2()			MRC14(0, c0, c2, 5)
+#define RCP14_DBGBCR3()			MRC14(0, c0, c3, 5)
+#define RCP14_DBGBCR4()			MRC14(0, c0, c4, 5)
+#define RCP14_DBGBCR5()			MRC14(0, c0, c5, 5)
+#define RCP14_DBGBCR6()			MRC14(0, c0, c6, 5)
+#define RCP14_DBGBCR7()			MRC14(0, c0, c7, 5)
+#define RCP14_DBGBCR8()			MRC14(0, c0, c8, 5)
+#define RCP14_DBGBCR9()			MRC14(0, c0, c9, 5)
+#define RCP14_DBGBCR10()		MRC14(0, c0, c10, 5)
+#define RCP14_DBGBCR11()		MRC14(0, c0, c11, 5)
+#define RCP14_DBGBCR12()		MRC14(0, c0, c12, 5)
+#define RCP14_DBGBCR13()		MRC14(0, c0, c13, 5)
+#define RCP14_DBGBCR14()		MRC14(0, c0, c14, 5)
+#define RCP14_DBGBCR15()		MRC14(0, c0, c15, 5)
+#define RCP14_DBGWVR0()			MRC14(0, c0, c0, 6)
+#define RCP14_DBGWVR1()			MRC14(0, c0, c1, 6)
+#define RCP14_DBGWVR2()			MRC14(0, c0, c2, 6)
+#define RCP14_DBGWVR3()			MRC14(0, c0, c3, 6)
+#define RCP14_DBGWVR4()			MRC14(0, c0, c4, 6)
+#define RCP14_DBGWVR5()			MRC14(0, c0, c5, 6)
+#define RCP14_DBGWVR6()			MRC14(0, c0, c6, 6)
+#define RCP14_DBGWVR7()			MRC14(0, c0, c7, 6)
+#define RCP14_DBGWVR8()			MRC14(0, c0, c8, 6)
+#define RCP14_DBGWVR9()			MRC14(0, c0, c9, 6)
+#define RCP14_DBGWVR10()		MRC14(0, c0, c10, 6)
+#define RCP14_DBGWVR11()		MRC14(0, c0, c11, 6)
+#define RCP14_DBGWVR12()		MRC14(0, c0, c12, 6)
+#define RCP14_DBGWVR13()		MRC14(0, c0, c13, 6)
+#define RCP14_DBGWVR14()		MRC14(0, c0, c14, 6)
+#define RCP14_DBGWVR15()		MRC14(0, c0, c15, 6)
+#define RCP14_DBGWCR0()			MRC14(0, c0, c0, 7)
+#define RCP14_DBGWCR1()			MRC14(0, c0, c1, 7)
+#define RCP14_DBGWCR2()			MRC14(0, c0, c2, 7)
+#define RCP14_DBGWCR3()			MRC14(0, c0, c3, 7)
+#define RCP14_DBGWCR4()			MRC14(0, c0, c4, 7)
+#define RCP14_DBGWCR5()			MRC14(0, c0, c5, 7)
+#define RCP14_DBGWCR6()			MRC14(0, c0, c6, 7)
+#define RCP14_DBGWCR7()			MRC14(0, c0, c7, 7)
+#define RCP14_DBGWCR8()			MRC14(0, c0, c8, 7)
+#define RCP14_DBGWCR9()			MRC14(0, c0, c9, 7)
+#define RCP14_DBGWCR10()		MRC14(0, c0, c10, 7)
+#define RCP14_DBGWCR11()		MRC14(0, c0, c11, 7)
+#define RCP14_DBGWCR12()		MRC14(0, c0, c12, 7)
+#define RCP14_DBGWCR13()		MRC14(0, c0, c13, 7)
+#define RCP14_DBGWCR14()		MRC14(0, c0, c14, 7)
+#define RCP14_DBGWCR15()		MRC14(0, c0, c15, 7)
+#define RCP14_DBGDRAR()			MRC14(0, c1, c0, 0)
+#define RCP14_DBGBXVR0()		MRC14(0, c1, c0, 1)
+#define RCP14_DBGBXVR1()		MRC14(0, c1, c1, 1)
+#define RCP14_DBGBXVR2()		MRC14(0, c1, c2, 1)
+#define RCP14_DBGBXVR3()		MRC14(0, c1, c3, 1)
+#define RCP14_DBGBXVR4()		MRC14(0, c1, c4, 1)
+#define RCP14_DBGBXVR5()		MRC14(0, c1, c5, 1)
+#define RCP14_DBGBXVR6()		MRC14(0, c1, c6, 1)
+#define RCP14_DBGBXVR7()		MRC14(0, c1, c7, 1)
+#define RCP14_DBGBXVR8()		MRC14(0, c1, c8, 1)
+#define RCP14_DBGBXVR9()		MRC14(0, c1, c9, 1)
+#define RCP14_DBGBXVR10()		MRC14(0, c1, c10, 1)
+#define RCP14_DBGBXVR11()		MRC14(0, c1, c11, 1)
+#define RCP14_DBGBXVR12()		MRC14(0, c1, c12, 1)
+#define RCP14_DBGBXVR13()		MRC14(0, c1, c13, 1)
+#define RCP14_DBGBXVR14()		MRC14(0, c1, c14, 1)
+#define RCP14_DBGBXVR15()		MRC14(0, c1, c15, 1)
+#define RCP14_DBGOSLSR()		MRC14(0, c1, c1, 4)
+#define RCP14_DBGOSSRR()		MRC14(0, c1, c2, 4)
+#define RCP14_DBGOSDLR()		MRC14(0, c1, c3, 4)
+#define RCP14_DBGPRCR()			MRC14(0, c1, c4, 4)
+#define RCP14_DBGPRSR()			MRC14(0, c1, c5, 4)
+#define RCP14_DBGDSAR()			MRC14(0, c2, c0, 0)
+#define RCP14_DBGITCTRL()		MRC14(0, c7, c0, 4)
+#define RCP14_DBGCLAIMSET()		MRC14(0, c7, c8, 6)
+#define RCP14_DBGCLAIMCLR()		MRC14(0, c7, c9, 6)
+#define RCP14_DBGAUTHSTATUS()		MRC14(0, c7, c14, 6)
+#define RCP14_DBGDEVID2()		MRC14(0, c7, c0, 7)
+#define RCP14_DBGDEVID1()		MRC14(0, c7, c1, 7)
+#define RCP14_DBGDEVID()		MRC14(0, c7, c2, 7)
+
+#define WCP14_DBGDCCINT(val)		MCR14(val, 0, c0, c2, 0)
+#define WCP14_DBGDTRTXint(val)		MCR14(val, 0, c0, c5, 0)
+#define WCP14_DBGWFAR(val)		MCR14(val, 0, c0, c6, 0)
+#define WCP14_DBGVCR(val)		MCR14(val, 0, c0, c7, 0)
+#define WCP14_DBGDTRRXext(val)		MCR14(val, 0, c0, c0, 2)
+#define WCP14_DBGDSCRext(val)		MCR14(val, 0, c0, c2, 2)
+#define WCP14_DBGDTRTXext(val)		MCR14(val, 0, c0, c3, 2)
+#define WCP14_DBGOSECCR(val)		MCR14(val, 0, c0, c6, 2)
+#define WCP14_DBGBVR0(val)		MCR14(val, 0, c0, c0, 4)
+#define WCP14_DBGBVR1(val)		MCR14(val, 0, c0, c1, 4)
+#define WCP14_DBGBVR2(val)		MCR14(val, 0, c0, c2, 4)
+#define WCP14_DBGBVR3(val)		MCR14(val, 0, c0, c3, 4)
+#define WCP14_DBGBVR4(val)		MCR14(val, 0, c0, c4, 4)
+#define WCP14_DBGBVR5(val)		MCR14(val, 0, c0, c5, 4)
+#define WCP14_DBGBVR6(val)		MCR14(val, 0, c0, c6, 4)
+#define WCP14_DBGBVR7(val)		MCR14(val, 0, c0, c7, 4)
+#define WCP14_DBGBVR8(val)		MCR14(val, 0, c0, c8, 4)
+#define WCP14_DBGBVR9(val)		MCR14(val, 0, c0, c9, 4)
+#define WCP14_DBGBVR10(val)		MCR14(val, 0, c0, c10, 4)
+#define WCP14_DBGBVR11(val)		MCR14(val, 0, c0, c11, 4)
+#define WCP14_DBGBVR12(val)		MCR14(val, 0, c0, c12, 4)
+#define WCP14_DBGBVR13(val)		MCR14(val, 0, c0, c13, 4)
+#define WCP14_DBGBVR14(val)		MCR14(val, 0, c0, c14, 4)
+#define WCP14_DBGBVR15(val)		MCR14(val, 0, c0, c15, 4)
+#define WCP14_DBGBCR0(val)		MCR14(val, 0, c0, c0, 5)
+#define WCP14_DBGBCR1(val)		MCR14(val, 0, c0, c1, 5)
+#define WCP14_DBGBCR2(val)		MCR14(val, 0, c0, c2, 5)
+#define WCP14_DBGBCR3(val)		MCR14(val, 0, c0, c3, 5)
+#define WCP14_DBGBCR4(val)		MCR14(val, 0, c0, c4, 5)
+#define WCP14_DBGBCR5(val)		MCR14(val, 0, c0, c5, 5)
+#define WCP14_DBGBCR6(val)		MCR14(val, 0, c0, c6, 5)
+#define WCP14_DBGBCR7(val)		MCR14(val, 0, c0, c7, 5)
+#define WCP14_DBGBCR8(val)		MCR14(val, 0, c0, c8, 5)
+#define WCP14_DBGBCR9(val)		MCR14(val, 0, c0, c9, 5)
+#define WCP14_DBGBCR10(val)		MCR14(val, 0, c0, c10, 5)
+#define WCP14_DBGBCR11(val)		MCR14(val, 0, c0, c11, 5)
+#define WCP14_DBGBCR12(val)		MCR14(val, 0, c0, c12, 5)
+#define WCP14_DBGBCR13(val)		MCR14(val, 0, c0, c13, 5)
+#define WCP14_DBGBCR14(val)		MCR14(val, 0, c0, c14, 5)
+#define WCP14_DBGBCR15(val)		MCR14(val, 0, c0, c15, 5)
+#define WCP14_DBGWVR0(val)		MCR14(val, 0, c0, c0, 6)
+#define WCP14_DBGWVR1(val)		MCR14(val, 0, c0, c1, 6)
+#define WCP14_DBGWVR2(val)		MCR14(val, 0, c0, c2, 6)
+#define WCP14_DBGWVR3(val)		MCR14(val, 0, c0, c3, 6)
+#define WCP14_DBGWVR4(val)		MCR14(val, 0, c0, c4, 6)
+#define WCP14_DBGWVR5(val)		MCR14(val, 0, c0, c5, 6)
+#define WCP14_DBGWVR6(val)		MCR14(val, 0, c0, c6, 6)
+#define WCP14_DBGWVR7(val)		MCR14(val, 0, c0, c7, 6)
+#define WCP14_DBGWVR8(val)		MCR14(val, 0, c0, c8, 6)
+#define WCP14_DBGWVR9(val)		MCR14(val, 0, c0, c9, 6)
+#define WCP14_DBGWVR10(val)		MCR14(val, 0, c0, c10, 6)
+#define WCP14_DBGWVR11(val)		MCR14(val, 0, c0, c11, 6)
+#define WCP14_DBGWVR12(val)		MCR14(val, 0, c0, c12, 6)
+#define WCP14_DBGWVR13(val)		MCR14(val, 0, c0, c13, 6)
+#define WCP14_DBGWVR14(val)		MCR14(val, 0, c0, c14, 6)
+#define WCP14_DBGWVR15(val)		MCR14(val, 0, c0, c15, 6)
+#define WCP14_DBGWCR0(val)		MCR14(val, 0, c0, c0, 7)
+#define WCP14_DBGWCR1(val)		MCR14(val, 0, c0, c1, 7)
+#define WCP14_DBGWCR2(val)		MCR14(val, 0, c0, c2, 7)
+#define WCP14_DBGWCR3(val)		MCR14(val, 0, c0, c3, 7)
+#define WCP14_DBGWCR4(val)		MCR14(val, 0, c0, c4, 7)
+#define WCP14_DBGWCR5(val)		MCR14(val, 0, c0, c5, 7)
+#define WCP14_DBGWCR6(val)		MCR14(val, 0, c0, c6, 7)
+#define WCP14_DBGWCR7(val)		MCR14(val, 0, c0, c7, 7)
+#define WCP14_DBGWCR8(val)		MCR14(val, 0, c0, c8, 7)
+#define WCP14_DBGWCR9(val)		MCR14(val, 0, c0, c9, 7)
+#define WCP14_DBGWCR10(val)		MCR14(val, 0, c0, c10, 7)
+#define WCP14_DBGWCR11(val)		MCR14(val, 0, c0, c11, 7)
+#define WCP14_DBGWCR12(val)		MCR14(val, 0, c0, c12, 7)
+#define WCP14_DBGWCR13(val)		MCR14(val, 0, c0, c13, 7)
+#define WCP14_DBGWCR14(val)		MCR14(val, 0, c0, c14, 7)
+#define WCP14_DBGWCR15(val)		MCR14(val, 0, c0, c15, 7)
+#define WCP14_DBGBXVR0(val)		MCR14(val, 0, c1, c0, 1)
+#define WCP14_DBGBXVR1(val)		MCR14(val, 0, c1, c1, 1)
+#define WCP14_DBGBXVR2(val)		MCR14(val, 0, c1, c2, 1)
+#define WCP14_DBGBXVR3(val)		MCR14(val, 0, c1, c3, 1)
+#define WCP14_DBGBXVR4(val)		MCR14(val, 0, c1, c4, 1)
+#define WCP14_DBGBXVR5(val)		MCR14(val, 0, c1, c5, 1)
+#define WCP14_DBGBXVR6(val)		MCR14(val, 0, c1, c6, 1)
+#define WCP14_DBGBXVR7(val)		MCR14(val, 0, c1, c7, 1)
+#define WCP14_DBGBXVR8(val)		MCR14(val, 0, c1, c8, 1)
+#define WCP14_DBGBXVR9(val)		MCR14(val, 0, c1, c9, 1)
+#define WCP14_DBGBXVR10(val)		MCR14(val, 0, c1, c10, 1)
+#define WCP14_DBGBXVR11(val)		MCR14(val, 0, c1, c11, 1)
+#define WCP14_DBGBXVR12(val)		MCR14(val, 0, c1, c12, 1)
+#define WCP14_DBGBXVR13(val)		MCR14(val, 0, c1, c13, 1)
+#define WCP14_DBGBXVR14(val)		MCR14(val, 0, c1, c14, 1)
+#define WCP14_DBGBXVR15(val)		MCR14(val, 0, c1, c15, 1)
+#define WCP14_DBGOSLAR(val)		MCR14(val, 0, c1, c0, 4)
+#define WCP14_DBGOSSRR(val)		MCR14(val, 0, c1, c2, 4)
+#define WCP14_DBGOSDLR(val)		MCR14(val, 0, c1, c3, 4)
+#define WCP14_DBGPRCR(val)		MCR14(val, 0, c1, c4, 4)
+#define WCP14_DBGITCTRL(val)		MCR14(val, 0, c7, c0, 4)
+#define WCP14_DBGCLAIMSET(val)		MCR14(val, 0, c7, c8, 6)
+#define WCP14_DBGCLAIMCLR(val)		MCR14(val, 0, c7, c9, 6)
+
+#endif
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index afcaf8b..e40bbc5 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -52,6 +52,7 @@
 #define ARM_DEBUG_ARCH_V7_MM	4
 #define ARM_DEBUG_ARCH_V7_1	5
 #define ARM_DEBUG_ARCH_V8	6
+#define ARM_DEBUG_ARCH_V8_8	8
 
 /* Breakpoint */
 #define ARM_BREAKPOINT_EXECUTE	0
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d5423ab..f4dab20 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -78,6 +78,9 @@
 	/* Interrupt controller */
 	struct vgic_dist	vgic;
 	int max_vcpus;
+
+	/* Mandated version of PSCI */
+	u32 psci_version;
 };
 
 #define KVM_NR_MEM_OBJS     40
@@ -318,4 +321,10 @@
 	return -ENXIO;
 }
 
+static inline bool kvm_arm_harden_branch_predictor(void)
+{
+	/* No way to detect it yet, pretend it is not there. */
+	return false;
+}
+
 #endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index d10e362..7f66b1b 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -223,6 +223,22 @@
 	return 8;
 }
 
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+				      gpa_t gpa, void *data, unsigned long len)
+{
+	int srcu_idx = srcu_read_lock(&kvm->srcu);
+	int ret = kvm_read_guest(kvm, gpa, data, len);
+
+	srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+	return ret;
+}
+
 static inline void *kvm_get_hyp_vector(void)
 {
 	return kvm_ksym_ref(__kvm_hyp_vector);
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
deleted file mode 100644
index 6bda945..0000000
--- a/arch/arm/include/asm/kvm_psci.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2012 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM_KVM_PSCI_H__
-#define __ARM_KVM_PSCI_H__
-
-#define KVM_ARM_PSCI_0_1	1
-#define KVM_ARM_PSCI_0_2	2
-
-int kvm_psci_version(struct kvm_vcpu *vcpu);
-int kvm_psci_call(struct kvm_vcpu *vcpu);
-
-#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
index d0295f1..ff65b6d 100644
--- a/arch/arm/include/asm/vdso.h
+++ b/arch/arm/include/asm/vdso.h
@@ -11,8 +11,6 @@
 
 void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
 
-extern char vdso_start, vdso_end;
-
 extern unsigned int vdso_total_pages;
 
 #else /* CONFIG_VDSO */
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 71e473d..620dc75 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -16,7 +16,7 @@
 	return raw_irqs_disabled_flags(regs->ARM_cpsr);
 }
 
-#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr),	\
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\
 							    atomic64_t,	\
 							    counter), (val))
 
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index b38c10c..0b8cf31 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -173,6 +173,12 @@
 #define KVM_REG_ARM_VFP_FPINST		0x1009
 #define KVM_REG_ARM_VFP_FPINST2		0x100A
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM | KVM_REG_SIZE_U64 | \
+					 KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2d1d821..42d3974 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -150,10 +150,15 @@
 	}
 
 	c = irq_data_get_irq_chip(d);
-	if (!c->irq_set_affinity)
+	if (!c->irq_set_affinity) {
 		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-	else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
-		cpumask_copy(irq_data_get_affinity_mask(d), affinity);
+	} else {
+		int r = irq_set_affinity_locked(d, affinity, false);
+
+		if (r)
+			pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
+					    d->irq, r);
+	}
 
 	return ret;
 }
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4f9e2b5..f85665d 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1066,8 +1066,6 @@
 {
 	unsigned long config_base = 0;
 
-	if (attr->exclude_idle)
-		return -EPERM;
 	if (attr->exclude_user)
 		config_base |= ARMV7_EXCLUDE_USER;
 	if (attr->exclude_kernel)
@@ -1184,7 +1182,48 @@
 	*nb_cnt += 1;
 }
 
-static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
+static void armv7_pmu_idle_update(struct arm_pmu *cpu_pmu)
+{
+	struct pmu_hw_events *hw_events;
+	struct perf_event *event;
+	int idx;
+
+	if (!cpu_pmu)
+		return;
+
+	hw_events = this_cpu_ptr(cpu_pmu->hw_events);
+	if (!hw_events)
+		return;
+
+	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+		event = hw_events->events[idx];
+
+		if (!event || !event->attr.exclude_idle ||
+			event->state != PERF_EVENT_STATE_ACTIVE)
+			continue;
+
+		cpu_pmu->pmu.read(event);
+	}
+}
+
+struct armv7_pmu_idle_nb {
+	struct arm_pmu *cpu_pmu;
+	struct notifier_block perf_cpu_idle_nb;
+};
+
+static int armv7_pmu_idle_notifier(struct notifier_block *nb,
+				unsigned long action, void *data)
+{
+	struct armv7_pmu_idle_nb *pmu_idle_nb = container_of(nb,
+				struct armv7_pmu_idle_nb, perf_cpu_idle_nb);
+
+	if (action == IDLE_START)
+		armv7_pmu_idle_update(pmu_idle_nb->cpu_pmu);
+
+	return NOTIFY_OK;
+}
+
+static int armv7_probe_pmu(struct arm_pmu *arm_pmu)
 {
 	return smp_call_function_any(&arm_pmu->supported_cpus,
 				     armv7_read_num_pmnc_events,
@@ -1200,7 +1239,7 @@
 		&armv7_pmuv1_events_attr_group;
 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
 		&armv7_pmu_format_attr_group;
-	return armv7_probe_num_events(cpu_pmu);
+	return armv7_probe_pmu(cpu_pmu);
 }
 
 static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
@@ -1212,7 +1251,7 @@
 		&armv7_pmuv1_events_attr_group;
 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
 		&armv7_pmu_format_attr_group;
-	return armv7_probe_num_events(cpu_pmu);
+	return armv7_probe_pmu(cpu_pmu);
 }
 
 static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
@@ -1224,7 +1263,7 @@
 		&armv7_pmuv1_events_attr_group;
 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
 		&armv7_pmu_format_attr_group;
-	return armv7_probe_num_events(cpu_pmu);
+	return armv7_probe_pmu(cpu_pmu);
 }
 
 static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
@@ -1237,7 +1276,17 @@
 		&armv7_pmuv2_events_attr_group;
 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
 		&armv7_pmu_format_attr_group;
-	return armv7_probe_num_events(cpu_pmu);
+	return armv7_probe_pmu(cpu_pmu);
+}
+
+static int armv8_pmuv3_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	armv7pmu_init(cpu_pmu);
+	cpu_pmu->name		= "ARMv8 Cortex-A53";
+	cpu_pmu->map_event	= armv7_a7_map_event;
+	armv7_read_num_pmnc_events(&cpu_pmu->num_events);
+	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+	return 0;
 }
 
 static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
@@ -1250,7 +1299,7 @@
 		&armv7_pmuv2_events_attr_group;
 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
 		&armv7_pmu_format_attr_group;
-	return armv7_probe_num_events(cpu_pmu);
+	return armv7_probe_pmu(cpu_pmu);
 }
 
 static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
@@ -1263,7 +1312,7 @@
 		&armv7_pmuv2_events_attr_group;
 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
 		&armv7_pmu_format_attr_group;
-	return armv7_probe_num_events(cpu_pmu);
+	return armv7_probe_pmu(cpu_pmu);
 }
 
 static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
@@ -1660,7 +1709,7 @@
 	cpu_pmu->disable	= krait_pmu_disable_event;
 	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
 	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
-	return armv7_probe_num_events(cpu_pmu);
+	return armv7_probe_pmu(cpu_pmu);
 }
 
 /*
@@ -1983,7 +2032,7 @@
 	cpu_pmu->disable	= scorpion_pmu_disable_event;
 	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
 	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
-	return armv7_probe_num_events(cpu_pmu);
+	return armv7_probe_pmu(cpu_pmu);
 }
 
 static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
@@ -1996,7 +2045,7 @@
 	cpu_pmu->disable	= scorpion_pmu_disable_event;
 	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
 	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
-	return armv7_probe_num_events(cpu_pmu);
+	return armv7_probe_pmu(cpu_pmu);
 }
 
 static const struct of_device_id armv7_pmu_of_device_ids[] = {
@@ -2010,6 +2059,7 @@
 	{.compatible = "qcom,krait-pmu",	.data = krait_pmu_init},
 	{.compatible = "qcom,scorpion-pmu",	.data = scorpion_pmu_init},
 	{.compatible = "qcom,scorpion-mp-pmu",	.data = scorpion_mp_pmu_init},
+	{.compatible = "arm,armv8-pmuv3",      .data = armv8_pmuv3_pmu_init},
 	{},
 };
 
@@ -2022,8 +2072,24 @@
 
 static int armv7_pmu_device_probe(struct platform_device *pdev)
 {
-	return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
+	int ret;
+	struct armv7_pmu_idle_nb *pmu_idle_nb;
+
+	pmu_idle_nb = devm_kzalloc(&pdev->dev, sizeof(*pmu_idle_nb),
+				    GFP_KERNEL);
+	if (!pmu_idle_nb)
+		return -ENOMEM;
+
+	ret = arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
 				    armv7_pmu_probe_table);
+	if (ret)
+		return ret;
+
+	pmu_idle_nb->cpu_pmu = (struct arm_pmu *) platform_get_drvdata(pdev);
+	pmu_idle_nb->perf_cpu_idle_nb.notifier_call = armv7_pmu_idle_notifier;
+	idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
+
+	return 0;
 }
 
 static struct platform_driver armv7_pmu_driver = {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c6324b5..38ad8b9 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -129,13 +129,13 @@
 		for (j = 0; j < 8; j++) {
 			u32	data;
 			if (probe_kernel_address(p, data)) {
-				printk(" ********");
+				pr_cont(" ********");
 			} else {
-				printk(" %08x", data);
+				pr_cont(" %08x", data);
 			}
 			++p;
 		}
-		printk("\n");
+		pr_cont("\n");
 	}
 }
 
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 29286fb..8ff6674 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -112,6 +112,12 @@
 	return 0;
 }
 
+static bool psci_cpu_can_disable(unsigned int cpu)
+{
+	/*Hotplug of any CPU is supported*/
+	return true;
+}
+
 #endif
 
 bool __init psci_smp_available(void)
@@ -126,5 +132,6 @@
 	.cpu_disable		= psci_cpu_disable,
 	.cpu_die		= psci_cpu_die,
 	.cpu_kill		= psci_cpu_kill,
+	.cpu_can_disable	= psci_cpu_can_disable,
 #endif
 };
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 1b30489..aa316a7 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -19,6 +19,7 @@
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
 #include <linux/kdebug.h>
+#include <linux/kprobes.h>
 #include <linux/module.h>
 #include <linux/kexec.h>
 #include <linux/bug.h>
@@ -415,7 +416,8 @@
 	raw_spin_unlock_irqrestore(&undef_lock, flags);
 }
 
-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+static nokprobe_inline
+int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 {
 	struct undef_hook *hook;
 	unsigned long flags;
@@ -488,6 +490,7 @@
 
 	arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
 }
+NOKPROBE_SYMBOL(do_undefinstr)
 
 /*
  * Handle FIQ similarly to NMI on x86 systems.
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 53cf86c..8904397 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -39,6 +39,8 @@
 
 static struct page **vdso_text_pagelist;
 
+extern char vdso_start[], vdso_end[];
+
 /* Total number of pages needed for the data and text portions of the VDSO. */
 unsigned int vdso_total_pages __ro_after_init;
 
@@ -179,13 +181,13 @@
 	unsigned int text_pages;
 	int i;
 
-	if (memcmp(&vdso_start, "\177ELF", 4)) {
+	if (memcmp(vdso_start, "\177ELF", 4)) {
 		pr_err("VDSO is not a valid ELF object!\n");
 		return -ENOEXEC;
 	}
 
-	text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
-	pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
+	text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+	pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
 
 	/* Allocate the VDSO text pagelist */
 	vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
@@ -200,7 +202,7 @@
 	for (i = 0; i < text_pages; i++) {
 		struct page *page;
 
-		page = virt_to_page(&vdso_start + i * PAGE_SIZE);
+		page = virt_to_page(vdso_start + i * PAGE_SIZE);
 		vdso_text_pagelist[i] = page;
 	}
 
@@ -211,7 +213,7 @@
 
 	cntvct_ok = cntvct_functional();
 
-	patch_vdso(&vdso_start);
+	patch_vdso(vdso_start);
 
 	return 0;
 }
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index c38bfbe..ef6595c 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -29,6 +29,7 @@
 #include <linux/kvm.h>
 #include <trace/events/kvm.h>
 #include <kvm/arm_pmu.h>
+#include <kvm/arm_psci.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -44,7 +45,6 @@
 #include <asm/kvm_mmu.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
-#include <asm/kvm_psci.h>
 #include <asm/sections.h>
 
 #ifdef REQUIRES_VIRT
@@ -1088,7 +1088,7 @@
 	pgd_ptr = kvm_mmu_get_httbr();
 	stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
 	hyp_stack_ptr = stack_page + PAGE_SIZE;
-	vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
+	vector_ptr = (unsigned long)kvm_get_hyp_vector();
 
 	__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
 	__cpu_init_stage2();
@@ -1345,6 +1345,13 @@
 		goto out_err;
 	}
 
+
+	err = kvm_map_vectors();
+	if (err) {
+		kvm_err("Cannot map vectors\n");
+		goto out_err;
+	}
+
 	/*
 	 * Map the Hyp stack pages
 	 */
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 9aca920..630117d 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
+#include <kvm/arm_psci.h>
 #include <asm/cputype.h>
 #include <asm/uaccess.h>
 #include <asm/kvm.h>
@@ -176,6 +177,7 @@
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
 {
 	return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+		+ kvm_arm_get_fw_num_regs(vcpu)
 		+ NUM_TIMER_REGS;
 }
 
@@ -196,6 +198,11 @@
 		uindices++;
 	}
 
+	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
+	if (ret)
+		return ret;
+	uindices += kvm_arm_get_fw_num_regs(vcpu);
+
 	ret = copy_timer_indices(vcpu, uindices);
 	if (ret)
 		return ret;
@@ -214,6 +221,9 @@
 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
 		return get_core_reg(vcpu, reg);
 
+	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+		return kvm_arm_get_fw_reg(vcpu, reg);
+
 	if (is_timer_reg(reg->id))
 		return get_timer_reg(vcpu, reg);
 
@@ -230,6 +240,9 @@
 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
 		return set_core_reg(vcpu, reg);
 
+	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+		return kvm_arm_set_fw_reg(vcpu, reg);
+
 	if (is_timer_reg(reg->id))
 		return set_timer_reg(vcpu, reg);
 
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 4e57ebc..de1aedc 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -21,7 +21,7 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
 #include <asm/kvm_mmu.h>
-#include <asm/kvm_psci.h>
+#include <kvm/arm_psci.h>
 #include <trace/events/kvm.h>
 
 #include "trace.h"
@@ -36,7 +36,7 @@
 		      kvm_vcpu_hvc_get_imm(vcpu));
 	vcpu->stat.hvc_exit_stat++;
 
-	ret = kvm_psci_call(vcpu);
+	ret = kvm_hvc_call_handler(vcpu);
 	if (ret < 0) {
 		vcpu_set_reg(vcpu, 0, ~0UL);
 		return 1;
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 624a510..ebd2dd4 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -237,8 +237,10 @@
 
 		vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
 		host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+		__timer_save_state(vcpu);
 		__deactivate_traps(vcpu);
 		__deactivate_vm(vcpu);
+		__banked_restore_state(host_ctxt);
 		__sysreg_restore_state(host_ctxt);
 	}
 
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index a08d7a9..8a9c654 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -15,16 +15,17 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/preempt.h>
 #include <linux/kvm_host.h>
+#include <linux/uaccess.h>
 #include <linux/wait.h>
 
 #include <asm/cputype.h>
 #include <asm/kvm_emulate.h>
-#include <asm/kvm_psci.h>
 #include <asm/kvm_host.h>
 
-#include <uapi/linux/psci.h>
+#include <kvm/arm_psci.h>
 
 /*
  * This is an implementation of the Power State Coordination Interface
@@ -33,6 +34,38 @@
 
 #define AFFINITY_MASK(level)	~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
 
+static u32 smccc_get_function(struct kvm_vcpu *vcpu)
+{
+	return vcpu_get_reg(vcpu, 0);
+}
+
+static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
+{
+	return vcpu_get_reg(vcpu, 1);
+}
+
+static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
+{
+	return vcpu_get_reg(vcpu, 2);
+}
+
+static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
+{
+	return vcpu_get_reg(vcpu, 3);
+}
+
+static void smccc_set_retval(struct kvm_vcpu *vcpu,
+			     unsigned long a0,
+			     unsigned long a1,
+			     unsigned long a2,
+			     unsigned long a3)
+{
+	vcpu_set_reg(vcpu, 0, a0);
+	vcpu_set_reg(vcpu, 1, a1);
+	vcpu_set_reg(vcpu, 2, a2);
+	vcpu_set_reg(vcpu, 3, a3);
+}
+
 static unsigned long psci_affinity_mask(unsigned long affinity_level)
 {
 	if (affinity_level <= 3)
@@ -75,7 +108,7 @@
 	unsigned long context_id;
 	phys_addr_t target_pc;
 
-	cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
+	cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
 	if (vcpu_mode_is_32bit(source_vcpu))
 		cpu_id &= ~((u32) 0);
 
@@ -88,14 +121,14 @@
 	if (!vcpu)
 		return PSCI_RET_INVALID_PARAMS;
 	if (!vcpu->arch.power_off) {
-		if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
+		if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
 			return PSCI_RET_ALREADY_ON;
 		else
 			return PSCI_RET_INVALID_PARAMS;
 	}
 
-	target_pc = vcpu_get_reg(source_vcpu, 2);
-	context_id = vcpu_get_reg(source_vcpu, 3);
+	target_pc = smccc_get_arg2(source_vcpu);
+	context_id = smccc_get_arg3(source_vcpu);
 
 	kvm_reset_vcpu(vcpu);
 
@@ -114,7 +147,7 @@
 	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
 	 * the general puspose registers are undefined upon CPU_ON.
 	 */
-	vcpu_set_reg(vcpu, 0, context_id);
+	smccc_set_retval(vcpu, context_id, 0, 0, 0);
 	vcpu->arch.power_off = false;
 	smp_mb();		/* Make sure the above is visible */
 
@@ -134,8 +167,8 @@
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_vcpu *tmp;
 
-	target_affinity = vcpu_get_reg(vcpu, 1);
-	lowest_affinity_level = vcpu_get_reg(vcpu, 2);
+	target_affinity = smccc_get_arg1(vcpu);
+	lowest_affinity_level = smccc_get_arg2(vcpu);
 
 	/* Determine target affinity mask */
 	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -198,18 +231,10 @@
 	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
 }
 
-int kvm_psci_version(struct kvm_vcpu *vcpu)
-{
-	if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
-		return KVM_ARM_PSCI_0_2;
-
-	return KVM_ARM_PSCI_0_1;
-}
-
 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 {
 	struct kvm *kvm = vcpu->kvm;
-	unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+	unsigned long psci_fn = smccc_get_function(vcpu);
 	unsigned long val;
 	int ret = 1;
 
@@ -219,7 +244,7 @@
 		 * Bits[31:16] = Major Version = 0
 		 * Bits[15:0] = Minor Version = 2
 		 */
-		val = 2;
+		val = KVM_ARM_PSCI_0_2;
 		break;
 	case PSCI_0_2_FN_CPU_SUSPEND:
 	case PSCI_0_2_FN64_CPU_SUSPEND:
@@ -276,14 +301,56 @@
 		break;
 	}
 
-	vcpu_set_reg(vcpu, 0, val);
+	smccc_set_retval(vcpu, val, 0, 0, 0);
+	return ret;
+}
+
+static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
+{
+	u32 psci_fn = smccc_get_function(vcpu);
+	u32 feature;
+	unsigned long val;
+	int ret = 1;
+
+	switch(psci_fn) {
+	case PSCI_0_2_FN_PSCI_VERSION:
+		val = KVM_ARM_PSCI_1_0;
+		break;
+	case PSCI_1_0_FN_PSCI_FEATURES:
+		feature = smccc_get_arg1(vcpu);
+		switch(feature) {
+		case PSCI_0_2_FN_PSCI_VERSION:
+		case PSCI_0_2_FN_CPU_SUSPEND:
+		case PSCI_0_2_FN64_CPU_SUSPEND:
+		case PSCI_0_2_FN_CPU_OFF:
+		case PSCI_0_2_FN_CPU_ON:
+		case PSCI_0_2_FN64_CPU_ON:
+		case PSCI_0_2_FN_AFFINITY_INFO:
+		case PSCI_0_2_FN64_AFFINITY_INFO:
+		case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+		case PSCI_0_2_FN_SYSTEM_OFF:
+		case PSCI_0_2_FN_SYSTEM_RESET:
+		case PSCI_1_0_FN_PSCI_FEATURES:
+		case ARM_SMCCC_VERSION_FUNC_ID:
+			val = 0;
+			break;
+		default:
+			val = PSCI_RET_NOT_SUPPORTED;
+			break;
+		}
+		break;
+	default:
+		return kvm_psci_0_2_call(vcpu);
+	}
+
+	smccc_set_retval(vcpu, val, 0, 0, 0);
 	return ret;
 }
 
 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 {
 	struct kvm *kvm = vcpu->kvm;
-	unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+	unsigned long psci_fn = smccc_get_function(vcpu);
 	unsigned long val;
 
 	switch (psci_fn) {
@@ -301,7 +368,7 @@
 		break;
 	}
 
-	vcpu_set_reg(vcpu, 0, val);
+	smccc_set_retval(vcpu, val, 0, 0, 0);
 	return 1;
 }
 
@@ -319,9 +386,11 @@
  * Errors:
  * -EINVAL: Unrecognized PSCI function
  */
-int kvm_psci_call(struct kvm_vcpu *vcpu)
+static int kvm_psci_call(struct kvm_vcpu *vcpu)
 {
-	switch (kvm_psci_version(vcpu)) {
+	switch (kvm_psci_version(vcpu, vcpu->kvm)) {
+	case KVM_ARM_PSCI_1_0:
+		return kvm_psci_1_0_call(vcpu);
 	case KVM_ARM_PSCI_0_2:
 		return kvm_psci_0_2_call(vcpu);
 	case KVM_ARM_PSCI_0_1:
@@ -330,3 +399,89 @@
 		return -EINVAL;
 	};
 }
+
+int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+{
+	u32 func_id = smccc_get_function(vcpu);
+	u32 val = PSCI_RET_NOT_SUPPORTED;
+	u32 feature;
+
+	switch (func_id) {
+	case ARM_SMCCC_VERSION_FUNC_ID:
+		val = ARM_SMCCC_VERSION_1_1;
+		break;
+	case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
+		feature = smccc_get_arg1(vcpu);
+		switch(feature) {
+		case ARM_SMCCC_ARCH_WORKAROUND_1:
+			if (kvm_arm_harden_branch_predictor())
+				val = 0;
+			break;
+		}
+		break;
+	default:
+		return kvm_psci_call(vcpu);
+	}
+
+	smccc_set_retval(vcpu, val, 0, 0, 0);
+	return 1;
+}
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
+{
+	return 1;		/* PSCI version */
+}
+
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+	if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
+		return -EFAULT;
+
+	return 0;
+}
+
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+	if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		u64 val;
+
+		val = kvm_psci_version(vcpu, vcpu->kvm);
+		if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+	if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		bool wants_02;
+		u64 val;
+
+		if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
+
+		switch (val) {
+		case KVM_ARM_PSCI_0_1:
+			if (wants_02)
+				return -EINVAL;
+			vcpu->kvm->arch.psci_version = val;
+			return 0;
+		case KVM_ARM_PSCI_0_2:
+		case KVM_ARM_PSCI_1_0:
+			if (!wants_02)
+				return -EINVAL;
+			vcpu->kvm->arch.psci_version = val;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index df73914..746e780 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -38,6 +38,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_1)
+_ASM_NOKPROBE(__get_user_1)
 
 ENTRY(__get_user_2)
 	check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +59,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_2)
+_ASM_NOKPROBE(__get_user_2)
 
 ENTRY(__get_user_4)
 	check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +67,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_4)
+_ASM_NOKPROBE(__get_user_4)
 
 ENTRY(__get_user_8)
 	check_uaccess r0, 8, r1, r2, __get_user_bad8
@@ -78,6 +81,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_8)
+_ASM_NOKPROBE(__get_user_8)
 
 #ifdef __ARMEB__
 ENTRY(__get_user_32t_8)
@@ -91,6 +95,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_32t_8)
+_ASM_NOKPROBE(__get_user_32t_8)
 
 ENTRY(__get_user_64t_1)
 	check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -98,6 +103,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_1)
+_ASM_NOKPROBE(__get_user_64t_1)
 
 ENTRY(__get_user_64t_2)
 	check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -114,6 +120,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_2)
+_ASM_NOKPROBE(__get_user_64t_2)
 
 ENTRY(__get_user_64t_4)
 	check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -121,6 +128,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_4)
+_ASM_NOKPROBE(__get_user_64t_4)
 #endif
 
 __get_user_bad8:
@@ -131,6 +139,8 @@
 	ret	lr
 ENDPROC(__get_user_bad)
 ENDPROC(__get_user_bad8)
+_ASM_NOKPROBE(__get_user_bad)
+_ASM_NOKPROBE(__get_user_bad8)
 
 .pushsection __ex_table, "a"
 	.long	1b, __get_user_bad
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index add3771..9a22d40 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -821,6 +821,8 @@
 	.resource	= da8xx_rproc_resources,
 };
 
+static bool rproc_mem_inited __initdata;
+
 #if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC)
 
 static phys_addr_t rproc_base __initdata;
@@ -859,6 +861,8 @@
 	ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0);
 	if (ret)
 		pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret);
+	else
+		rproc_mem_inited = true;
 }
 
 #else
@@ -873,6 +877,12 @@
 {
 	int ret;
 
+	if (!rproc_mem_inited) {
+		pr_warn("%s: memory not reserved for DSP, not registering DSP device\n",
+			__func__);
+		return -ENOMEM;
+	}
+
 	ret = platform_device_register(&da8xx_dsp);
 	if (ret)
 		pr_err("%s: can't register DSP device: %d\n", __func__, ret);
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index b3347d3..94906ed 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -131,6 +131,9 @@
 	case MXC_CPU_IMX6UL:
 		soc_id = "i.MX6UL";
 		break;
+	case MXC_CPU_IMX6ULL:
+		soc_id = "i.MX6ULL";
+		break;
 	case MXC_CPU_IMX7D:
 		soc_id = "i.MX7D";
 		break;
diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h
index 34f2ff6..e00d626 100644
--- a/arch/arm/mach-imx/mxc.h
+++ b/arch/arm/mach-imx/mxc.h
@@ -39,6 +39,7 @@
 #define MXC_CPU_IMX6SX		0x62
 #define MXC_CPU_IMX6Q		0x63
 #define MXC_CPU_IMX6UL		0x64
+#define MXC_CPU_IMX6ULL		0x65
 #define MXC_CPU_IMX7D		0x72
 
 #define IMX_DDR_TYPE_LPDDR2		1
@@ -73,6 +74,11 @@
 	return __mxc_cpu_type == MXC_CPU_IMX6UL;
 }
 
+static inline bool cpu_is_imx6ull(void)
+{
+	return __mxc_cpu_type == MXC_CPU_IMX6ULL;
+}
+
 static inline bool cpu_is_imx6q(void)
 {
 	return __mxc_cpu_type == MXC_CPU_IMX6Q;
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index 4f5fd4a..034b894 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -1031,17 +1031,17 @@
 		return -ENOMEM;
 	c->dent = d;
 
-	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
+	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
 	if (!d) {
 		err = -ENOMEM;
 		goto err_out;
 	}
-	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
+	d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
 	if (!d) {
 		err = -ENOMEM;
 		goto err_out;
 	}
-	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
+	d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
 	if (!d) {
 		err = -ENOMEM;
 		goto err_out;
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 678d2a3..3202015 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -225,7 +225,7 @@
 	cpu_idle_poll_ctrl(false);
 }
 
-static void omap_pm_finish(void)
+static void omap_pm_wake(void)
 {
 	if (cpu_is_omap34xx())
 		omap_prcm_irq_complete();
@@ -235,7 +235,7 @@
 	.begin		= omap_pm_begin,
 	.end		= omap_pm_end,
 	.enter		= omap_pm_enter,
-	.finish		= omap_pm_finish,
+	.wake		= omap_pm_wake,
 	.valid		= suspend_valid_only_mem,
 };
 
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index b2f2448..a4cab28 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -136,12 +136,6 @@
 	.tick_resume		= omap2_gp_timer_shutdown,
 };
 
-static struct property device_disabled = {
-	.name = "status",
-	.length = sizeof("disabled"),
-	.value = "disabled",
-};
-
 static const struct of_device_id omap_timer_match[] __initconst = {
 	{ .compatible = "ti,omap2420-timer", },
 	{ .compatible = "ti,omap3430-timer", },
@@ -183,8 +177,17 @@
 				  of_get_property(np, "ti,timer-secure", NULL)))
 			continue;
 
-		if (!of_device_is_compatible(np, "ti,omap-counter32k"))
-			of_add_property(np, &device_disabled);
+		if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
+			struct property *prop;
+
+			prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+			if (!prop)
+				return NULL;
+			prop->name = "status";
+			prop->value = "disabled";
+			prop->length = strlen(prop->value);
+			of_add_property(np, prop);
+		}
 		return np;
 	}
 
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index 89bb0fc..72905a4 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -57,7 +57,6 @@
 
 config MACH_DNS323
 	bool "D-Link DNS-323"
-	select GENERIC_NET_UTILS
 	select I2C_BOARDINFO if I2C
 	help
 	  Say 'Y' here if you want your kernel to support the
@@ -65,7 +64,6 @@
 
 config MACH_TS209
 	bool "QNAP TS-109/TS-209"
-	select GENERIC_NET_UTILS
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  QNAP TS-109/TS-209 platform.
@@ -107,7 +105,6 @@
 
 config MACH_TS409
 	bool "QNAP TS-409"
-	select GENERIC_NET_UTILS
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  QNAP TS-409 platform.
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index cd483bf..d13344b 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -173,10 +173,42 @@
 	.phy_addr = MV643XX_ETH_PHY_ADDR(8),
 };
 
+/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
+ * functions be kept somewhere?
+ */
+static int __init dns323_parse_hex_nibble(char n)
+{
+	if (n >= '0' && n <= '9')
+		return n - '0';
+
+	if (n >= 'A' && n <= 'F')
+		return n - 'A' + 10;
+
+	if (n >= 'a' && n <= 'f')
+		return n - 'a' + 10;
+
+	return -1;
+}
+
+static int __init dns323_parse_hex_byte(const char *b)
+{
+	int hi;
+	int lo;
+
+	hi = dns323_parse_hex_nibble(b[0]);
+	lo = dns323_parse_hex_nibble(b[1]);
+
+	if (hi < 0 || lo < 0)
+		return -1;
+
+	return (hi << 4) | lo;
+}
+
 static int __init dns323_read_mac_addr(void)
 {
 	u_int8_t addr[6];
-	void __iomem *mac_page;
+	int i;
+	char *mac_page;
 
 	/* MAC address is stored as a regular ol' string in /dev/mtdblock4
 	 * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
@@ -185,8 +217,23 @@
 	if (!mac_page)
 		return -ENOMEM;
 
-	if (!mac_pton((__force const char *) mac_page, addr))
-		goto error_fail;
+	/* Sanity check the string we're looking at */
+	for (i = 0; i < 5; i++) {
+		if (*(mac_page + (i * 3) + 2) != ':') {
+			goto error_fail;
+		}
+	}
+
+	for (i = 0; i < 6; i++)	{
+		int byte;
+
+		byte = dns323_parse_hex_byte(mac_page + (i * 3));
+		if (byte < 0) {
+			goto error_fail;
+		}
+
+		addr[i] = byte;
+	}
 
 	iounmap(mac_page);
 	printk("DNS-323: Found ethernet MAC address: %pM\n", addr);
diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
index 8977498..905d4f2 100644
--- a/arch/arm/mach-orion5x/tsx09-common.c
+++ b/arch/arm/mach-orion5x/tsx09-common.c
@@ -53,12 +53,53 @@
 	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
+static int __init qnap_tsx09_parse_hex_nibble(char n)
+{
+	if (n >= '0' && n <= '9')
+		return n - '0';
+
+	if (n >= 'A' && n <= 'F')
+		return n - 'A' + 10;
+
+	if (n >= 'a' && n <= 'f')
+		return n - 'a' + 10;
+
+	return -1;
+}
+
+static int __init qnap_tsx09_parse_hex_byte(const char *b)
+{
+	int hi;
+	int lo;
+
+	hi = qnap_tsx09_parse_hex_nibble(b[0]);
+	lo = qnap_tsx09_parse_hex_nibble(b[1]);
+
+	if (hi < 0 || lo < 0)
+		return -1;
+
+	return (hi << 4) | lo;
+}
+
 static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
 {
 	u_int8_t addr[6];
+	int i;
 
-	if (!mac_pton(addr_str, addr))
-		return -1;
+	for (i = 0; i < 6; i++) {
+		int byte;
+
+		/*
+		 * Enforce "xx:xx:xx:xx:xx:xx\n" format.
+		 */
+		if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
+			return -1;
+
+		byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
+		if (byte < 0)
+			return -1;
+		addr[i] = byte;
+	}
 
 	printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
 
@@ -77,12 +118,12 @@
 	unsigned long addr;
 
 	for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
-		void __iomem *nor_page;
+		char *nor_page;
 		int ret = 0;
 
 		nor_page = ioremap(addr, 1024);
 		if (nor_page != NULL) {
-			ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page);
+			ret = qnap_tsx09_check_mac_addr(nor_page);
 			iounmap(nor_page);
 		}
 
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 1ab1fbb..d96863e 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -65,6 +65,17 @@
 	select HAVE_CLK_PREPARE
 	select COMMON_CLK_MSM
 
+config ARCH_MSM8953_BOOT_ORDERING
+	bool "Enable support for MSM8953 device boot ordering"
+	default n
+	help
+	  Populate devices from devicetree at late_init, after
+	  drivers for all platform devices have been registered.
+	  This causes devices to be probed in the order they are
+	  listed in devicetree. Thus it is possible to have
+	  greater control over the probe ordering such that
+	  overall boot time can be reduced.
+
 config ARCH_MSM8937
 	bool "Enable support for MSM8937"
 	select CPU_V7
@@ -83,7 +94,7 @@
 	select MAY_HAVE_SPARSE_IRQ
 	select PINCTRL_MSM_TLMM
 	select USE_PINCTRL_IRQ
-	select MSM_PM if PM
+	select MSM_PM_LEGACY if PM
 	select MSM_RPM_SMD
 	select MSM_RPM_STATS_LOG
 	select MSM_RPM_LOG
@@ -166,6 +177,40 @@
 	select CPU_FREQ_QCOM
 	select COMMON_CLK_MSM
 
+config ARCH_SDM670
+        bool "Enable Support for SDM670"
+	select CPU_V7
+	select CLKDEV_LOOKUP
+	select HAVE_CLK
+	select HAVE_CLK_PREPARE
+	select PM_OPP
+	select SOC_BUS
+	select MSM_IRQ
+	select THERMAL_WRITABLE_TRIPS
+	select ARM_GIC_V3
+	select ARM_AMBA
+	select SPARSE_IRQ
+	select MULTI_IRQ_HANDLER
+	select HAVE_ARM_ARCH_TIMER
+	select MAY_HAVE_SPARSE_IRQ
+	select COMMON_CLK
+	select COMMON_CLK_QCOM
+	select QCOM_GDSC
+	select PINCTRL_MSM_TLMM
+	select PCI
+	select USE_PINCTRL_IRQ
+	select MSM_PM if PM
+	select QMI_ENCDEC
+	select CPU_FREQ
+	select PM_DEVFREQ
+	select MSM_DEVFREQ_DEVBW
+	select DEVFREQ_SIMPLE_DEV
+	select DEVFREQ_GOV_MSM_BW_HWMON
+	select MSM_BIMC_BWMON
+	select MSM_QDSP6V2_CODECS
+	select MSM_AUDIO_QDSP6V2 if SND_SOC
+	select GENERIC_IRQ_MIGRATION
+
 config ARCH_MDM9650
 	bool "MDM9650"
 	select ARM_GIC
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index 5b93fa3..3ef169f 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -1,5 +1,6 @@
 obj-$(CONFIG_USE_OF) += board-dt.o
 obj-$(CONFIG_SMP)	+= platsmp.o
+obj-$(CONFIG_MSM_PM_LEGACY)	+=hotplug.o
 obj-$(CONFIG_ARCH_SDXPOORWILLS) += board-poorwills.o
 obj-$(CONFIG_ARCH_MSM8953) += board-msm8953.o
 obj-$(CONFIG_ARCH_MSM8937) += board-msm8937.o
@@ -11,3 +12,4 @@
 obj-$(CONFIG_ARCH_MDM9650) += board-9650.o
 obj-$(CONFIG_ARCH_MDM9607) += board-9607.o
 obj-$(CONFIG_ARCH_SDM632) += board-sdm632.o
+obj-$(CONFIG_ARCH_SDM670) += board-sdm670.o
diff --git a/arch/arm/mach-qcom/board-msm8917.c b/arch/arm/mach-qcom/board-msm8917.c
index 63bc43b..0bd6984 100644
--- a/arch/arm/mach-qcom/board-msm8917.c
+++ b/arch/arm/mach-qcom/board-msm8917.c
@@ -17,6 +17,7 @@
 
 static const char *msm8917_dt_match[] __initconst = {
 	"qcom,msm8917",
+	"qcom,apq8017",
 	NULL
 };
 
diff --git a/arch/arm/mach-qcom/board-msm8953.c b/arch/arm/mach-qcom/board-msm8953.c
index 04b0bcc..9a82e3a 100644
--- a/arch/arm/mach-qcom/board-msm8953.c
+++ b/arch/arm/mach-qcom/board-msm8953.c
@@ -14,6 +14,7 @@
 #include "board-dt.h"
 #include <asm/mach/map.h>
 #include <asm/mach/arch.h>
+#include <linux/of_platform.h>
 
 static const char *msm8953_dt_match[] __initconst = {
 	"qcom,msm8953",
@@ -23,9 +24,25 @@
 
 static void __init msm8953_init(void)
 {
+	if (IS_ENABLED(CONFIG_ARCH_MSM8953_BOOT_ORDERING))
+		return;
 	board_dt_populate(NULL);
 }
 
+#ifdef CONFIG_ARCH_MSM8953_BOOT_ORDERING
+static int __init msm8953_dt_populate(void)
+{
+	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+
+	/* Explicitly parent the /soc devices to the root node to preserve
+	 * the kernel ABI (sysfs structure, etc) until userspace is updated
+	 */
+	return of_platform_populate(of_find_node_by_path("/soc"),
+				of_default_bus_match_table, NULL, NULL);
+}
+late_initcall(msm8953_dt_populate);
+#endif
+
 DT_MACHINE_START(MSM8953_DT,
 	"Qualcomm Technologies, Inc. MSM8953 (Flattened Device Tree)")
 	.init_machine		= msm8953_init,
diff --git a/arch/arm/mach-qcom/board-sdm670.c b/arch/arm/mach-qcom/board-sdm670.c
new file mode 100644
index 0000000..c77a3ff
--- /dev/null
+++ b/arch/arm/mach-qcom/board-sdm670.c
@@ -0,0 +1,33 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "board-dt.h"
+#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+
+static const char *sdm670_dt_match[] __initconst = {
+	"qcom,sdm670",
+	"qcom,qcs605",
+	NULL
+};
+
+static void __init sdm670_init(void)
+{
+	board_dt_populate(NULL);
+}
+
+DT_MACHINE_START(SDM670_DT,
+	"Qualcomm Technologies, Inc. SDM670 (Flattened Device Tree)")
+	.init_machine		= sdm670_init,
+	.dt_compat		= sdm670_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-qcom/hotplug.c b/arch/arm/mach-qcom/hotplug.c
new file mode 100644
index 0000000..c038f4b
--- /dev/null
+++ b/arch/arm/mach-qcom/hotplug.c
@@ -0,0 +1,109 @@
+/*
+ *  Copyright (C) 2002 ARM Ltd.
+ *  All Rights Reserved
+ *  Copyright (c) 2011-2014, 2016, 2018, The Linux Foundation.
+ *  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm-legacy.h>
+#include <asm/smp_plat.h>
+#include "platsmp.h"
+#include <soc/qcom/jtag.h>
+
+static cpumask_t cpu_dying_mask;
+static DEFINE_PER_CPU(unsigned int, warm_boot_flag);
+
+static inline void cpu_enter_lowpower(void)
+{
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+}
+
+static inline void platform_do_lowpower(unsigned int cpu)
+{
+	lpm_cpu_hotplug_enter(cpu);
+	/*
+	 * getting here, means that we have come out of low power mode
+	 * without having been woken up - this shouldn't happen
+	 *
+	 */
+	pr_err("%s: CPU%u has failed to Hotplug\n", __func__, cpu);
+}
+
+int qcom_cpu_kill_legacy(unsigned int cpu)
+{
+	int ret = 0;
+
+	if (cpumask_test_and_clear_cpu(cpu, &cpu_dying_mask))
+		ret = msm_pm_wait_cpu_shutdown(cpu);
+
+	return ret ? 0 : 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __ref qcom_cpu_die_legacy(unsigned int cpu)
+{
+	if (unlikely(cpu != smp_processor_id())) {
+		pr_crit("%s: running on %u, should be %u\n",
+			__func__, smp_processor_id(), cpu);
+		WARN_ON(cpu);
+	}
+	/*
+	 * we're ready for shutdown now, so do it
+	 */
+	cpu_enter_lowpower();
+	platform_do_lowpower(cpu);
+
+	pr_debug("CPU%u: %s: normal wakeup\n", cpu, __func__);
+	cpu_leave_lowpower();
+}
+
+int msm_platform_secondary_init(unsigned int cpu)
+{
+	int ret;
+	unsigned int *warm_boot = this_cpu_ptr(&warm_boot_flag);
+
+	if (!(*warm_boot)) {
+		*warm_boot = 1;
+		/*
+		 * All CPU0 boots are considered warm boots (restore needed)
+		 * since CPU0 is the system boot CPU and never cold-booted
+		 * by the kernel.
+		 */
+		if (cpu)
+			return 0;
+	}
+	msm_jtag_restore_state();
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+
+	return ret;
+}
+
+static int hotplug_dying_cpu(unsigned int cpu)
+{
+	cpumask_set_cpu(cpu, &cpu_dying_mask);
+	return 0;
+}
+
+static int __init init_hotplug_dying(void)
+{
+	cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
+		 "AP_QCOM_HOTPLUG_STARTING", NULL, hotplug_dying_cpu);
+
+	return 0;
+}
+early_initcall(init_hotplug_dying);
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
index c422ac3..803804d 100644
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -22,6 +22,9 @@
 #include <asm/smp_plat.h>
 #include <asm/fixmap.h>
 #include "platsmp.h"
+#ifdef CONFIG_MSM_PM_LEGACY
+#include <soc/qcom/pm-legacy.h>
+#endif
 
 #define MSM_APCS_IDR 0x0B011030
 
@@ -62,10 +65,18 @@
 {
 	wfi();
 }
+
+static bool qcom_cpu_can_disable(unsigned int cpu)
+{
+	return true; /*Hotplug of any CPU is supported */
+}
 #endif
 
 static void qcom_secondary_init(unsigned int cpu)
 {
+#ifdef CONFIG_MSM_PM_LEGACY
+	WARN_ON(msm_platform_secondary_init(cpu));
+#endif
 	/*
 	 * Synchronise with the boot thread.
 	 */
@@ -472,8 +483,14 @@
 	.smp_secondary_init = qcom_secondary_init,
 	.smp_boot_secondary = msm8909_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_MSM_PM_LEGACY
+	.cpu_die		= qcom_cpu_die_legacy,
+	.cpu_kill		= qcom_cpu_kill_legacy,
+#else
 	.cpu_die		= qcom_cpu_die,
 #endif
+	.cpu_can_disable	= qcom_cpu_can_disable,
+#endif
 };
 
 CPU_METHOD_OF_DECLARE(qcom_smp_8909, "qcom,apss-8909", &msm8909_smp_ops);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b57aafc..e346cf3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2372,6 +2372,7 @@
 
 	mapping->nr_bitmaps = 1;
 	mapping->extensions = extensions;
+	mapping->bits = BITS_PER_BYTE * bitmap_size;
 
 	spin_lock_init(&mapping->lock);
 	mapping->ops = &iommu_ops;
@@ -2609,7 +2610,7 @@
 	}
 
 	if (__arm_iommu_attach_device(dev, mapping)) {
-		pr_warn("Failed to attached device %s to IOMMU_mapping\n",
+		pr_debug("Failed to attached device %s to IOMMU_mapping\n",
 				dev_name(dev));
 		arm_iommu_release_mapping(mapping);
 		return false;
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 7a327bd..ebef8aa 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -890,11 +890,8 @@
 	timer->irq = irq->start;
 	timer->pdev = pdev;
 
-	/* Skip pm_runtime_enable for OMAP1 */
-	if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
-		pm_runtime_enable(dev);
-		pm_runtime_irq_safe(dev);
-	}
+	pm_runtime_enable(dev);
+	pm_runtime_irq_safe(dev);
 
 	if (!timer->reserved) {
 		ret = pm_runtime_get_sync(dev);
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index bcdecc2..b2aa9b3 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -165,13 +165,14 @@
 {
 	unsigned long flags;
 	struct kprobe *p = &op->kp;
-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	struct kprobe_ctlblk *kcb;
 
 	/* Save skipped registers */
 	regs->ARM_pc = (unsigned long)op->kp.addr;
 	regs->ARM_ORIG_r0 = ~0UL;
 
 	local_irq_save(flags);
+	kcb = get_kprobe_ctlblk();
 
 	if (kprobe_running()) {
 		kprobes_inc_nmissed_count(&op->kp);
@@ -191,6 +192,7 @@
 
 	local_irq_restore(flags);
 }
+NOKPROBE_SYMBOL(optimized_callback)
 
 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
 {
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index da0b33d..5629d75 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -648,7 +648,7 @@
  */
 static int vfp_dying_cpu(unsigned int cpu)
 {
-	vfp_force_reload(cpu, current_thread_info());
+	vfp_current_hw_state[cpu] = NULL;
 	return 0;
 }
 
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1b135e5..f72ce30 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -15,7 +15,24 @@
 	select ARCH_HAS_KCOV
 	select ARCH_HAS_SG_CHAIN
 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+	select ARCH_INLINE_READ_LOCK if !PREEMPT
+	select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
+	select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
+	select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
+	select ARCH_INLINE_READ_UNLOCK if !PREEMPT
+	select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
+	select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
+	select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
+	select ARCH_INLINE_WRITE_LOCK if !PREEMPT
+	select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
+	select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
+	select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
+	select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
+	select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
+	select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
+	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
 	select ARCH_USE_CMPXCHG_LOCKREF
+	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_SUPPORTS_LTO_CLANG
 	select ARCH_SUPPORTS_ATOMIC_RMW
 	select ARCH_SUPPORTS_NUMA_BALANCING
@@ -112,6 +129,7 @@
 	select SPARSE_IRQ
 	select SYSCTL_EXCEPTION_TRACE
 	select THREAD_INFO_IN_TASK
+	select ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
 	help
 	  ARM 64-bit (AArch64) Linux support.
 
@@ -825,6 +843,18 @@
 
 	  If unsure, say Y.
 
+config PSCI_BP_HARDENING
+	depends on HARDEN_BRANCH_PREDICTOR
+	bool "Use PSCI get version to enable branch predictor hardening"
+	help
+	  If the mitigation for branch prediction is supported using psci
+	  get version by the firmware then enable this option. Some older
+	  versions of firmwares may not be using new SMCCC convention in
+	  such cases use psci get version method to enable hardening for
+	  branch prediction attacks.
+
+	  If unsure, say N.
+
 menuconfig ARMV8_DEPRECATED
 	bool "Emulate deprecated/obsolete ARMv8 instructions"
 	depends on COMPAT
diff --git a/arch/arm64/boot/dts/qcom/8909-pm8916.dtsi b/arch/arm64/boot/dts/qcom/8909-pm8916.dtsi
index 3247d0d..b073d99 100644
--- a/arch/arm64/boot/dts/qcom/8909-pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/8909-pm8916.dtsi
@@ -185,16 +185,6 @@
 	qcom,mdss_dsi_pll@1ac8300 {
 		vddio-supply = <&pm8916_l6>;
 	};
-
-	qcom,msm-thermal {
-		vdd-dig-supply = <&pm8916_s1_floor_corner>;
-
-		qcom,vdd-apps-rstr {
-			qcom,vdd-rstr-reg = "vdd-apps";
-			qcom,levels = <800000>;
-			qcom,freq-req;
-		};
-	};
 };
 
 
@@ -296,6 +286,7 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 
 	chan@32 {
@@ -307,6 +298,7 @@
 		qcom,scale-function = <4>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 
 	chan@3c {
@@ -318,6 +310,7 @@
 		qcom,scale-function = <4>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 };
 
@@ -350,3 +343,86 @@
 
 #include "msm8909-pm8916-pm.dtsi"
 
+&soc {
+	thermal-zones {
+		xo-therm-buf-adc {
+			polling-delay-passive = <0>;
+			polling-delay = <1000>;
+			thermal-sensors = <&pm8916_vadc 0x3c>;
+			thermal-governor = "user_space";
+
+			trips {
+				active-config0 {
+					temperature = <65000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		xo-therm-adc {
+			polling-delay-passive = <0>;
+			polling-delay = <1000>;
+			thermal-sensors = <&pm8916_vadc 0x32>;
+			thermal-governor = "user_space";
+
+			trips {
+				active-config0 {
+					temperature = <65000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		pa-therm0-adc {
+			polling-delay-passive = <0>;
+			polling-delay = <1000>;
+			thermal-sensors = <&pm8916_vadc 0x36>;
+			thermal-governor = "user_space";
+
+			trips {
+				active-config0 {
+					temperature = <65000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+		mdm-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+		camera-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+		gpu-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+		cpu0-2-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+		cpu1-3-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 3c2ae63..3ad1c0e 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -135,17 +135,23 @@
 		sdm670-usbc-external-codec-pm660a-mtp-overlay.dtbo \
 		sda670-cdp-overlay.dtbo \
 		sda670-mtp-overlay.dtbo \
+		sda670-hdk-overlay.dtbo \
 		sda670-pm660a-cdp-overlay.dtbo \
 		sda670-pm660a-mtp-overlay.dtbo \
 		sdm670-tasha-codec-cdp-overlay.dtbo \
 		sdm670-pm660a-tasha-codec-cdp-overlay.dtbo \
 		sdm670-aqt1000-cdp-overlay.dtbo \
 		sdm670-pm660a-aqt1000-cdp-overlay.dtbo \
+		sxr1120-lc-mtp-overlay.dtbo \
+		sxr1120-lc-external-codec-mtp-overlay.dtbo \
+		sxr1120-lc-cdp-overlay.dtbo \
+		sxr1120-lc-external-codec-cdp-overlay.dtbo \
 		qcs605-cdp-overlay.dtbo \
 		qcs605-mtp-overlay.dtbo \
 		qcs605-360camera-overlay.dtbo \
 		qcs605-external-codec-mtp-overlay.dtbo \
 		qcs605-lc-mtp-overlay.dtbo \
+		qcs605-lc-cdp-overlay.dtbo \
 		sdm710-cdp-overlay.dtbo \
 		sdm710-mtp-overlay.dtbo \
 		sdm710-qrd-overlay.dtbo \
@@ -194,13 +200,19 @@
 sdm670-pm660a-aqt1000-cdp-overlay.dtbo-base := sdm670.dtb
 sda670-cdp-overlay.dtbo-base := sda670.dtb
 sda670-mtp-overlay.dtbo-base := sda670.dtb
+sda670-hdk-overlay.dtbo-base := sda670.dtb
 sda670-pm660a-cdp-overlay.dtbo-base := sda670.dtb
 sda670-pm660a-mtp-overlay.dtbo-base := sda670.dtb
+sxr1120-lc-mtp-overlay.dtbo-base := sxr1120-lc.dtb
+sxr1120-lc-external-codec-mtp-overlay.dtbo-base := sxr1120-lc.dtb
+sxr1120-lc-cdp-overlay.dtbo-base := sxr1120-lc.dtb
+sxr1120-lc-external-codec-cdp-overlay.dtbo-base := sxr1120-lc.dtb
 qcs605-cdp-overlay.dtbo-base := qcs605.dtb
 qcs605-mtp-overlay.dtbo-base := qcs605.dtb
 qcs605-external-codec-mtp-overlay.dtbo-base := qcs605.dtb
 qcs605-lc-mtp-overlay.dtbo-base := qcs605-lc.dtb
 qcs605-360camera-overlay.dtbo-base := qcs605.dtb
+qcs605-lc-cdp-overlay.dtbo-base := qcs605-lc-cdp-base.dtb
 sdm710-cdp-overlay.dtbo-base := sdm710.dtb
 sdm710-mtp-overlay.dtbo-base := sdm710.dtb
 sdm710-qrd-overlay.dtbo-base := sdm710.dtb
@@ -245,16 +257,22 @@
 	sdm670-usbc-pm660a-cdp.dtb \
 	sdm670-usbc-pm660a-mtp.dtb \
 	sda670-mtp.dtb \
+	sda670-hdk.dtb \
 	sda670-cdp.dtb \
 	sdm670-tasha-codec-cdp.dtb \
 	sdm670-pm660a-tasha-codec-cdp.dtb \
 	sda670-pm660a-mtp.dtb \
 	sda670-pm660a-cdp.dtb \
+	sxr1120-lc-mtp.dtb \
+	sxr1120-lc-external-codec-mtp.dtb \
+	sxr1120-lc-cdp.dtb \
+	sxr1120-lc-external-codec-cdp.dtb \
 	qcs605-360camera.dtb \
 	qcs605-mtp.dtb \
 	qcs605-cdp.dtb \
 	qcs605-external-codec-mtp.dtb \
 	qcs605-lc-mtp.dtb \
+	qcs605-lc-cdp.dtb \
 	sdm710-mtp.dtb \
 	sdm710-cdp.dtb \
 	sdm710-qrd.dtb \
@@ -308,7 +326,9 @@
 
 dtbo-$(CONFIG_ARCH_SDM439) += sdm439-mtp-overlay.dtbo \
 	sdm439-cdp-overlay.dtbo \
-	sdm439-qrd-overlay.dtbo
+	sdm439-qrd-overlay.dtbo \
+	sdm439-external-codec-mtp-overlay.dtbo \
+	sdm439-rcm-overlay.dtbo
 
 dtbo-$(CONFIG_ARCH_SDM429) += sdm429-mtp-overlay.dtbo \
 	sdm429-cdp-overlay.dtbo \
@@ -368,14 +388,20 @@
 	sdm632-pm8004.dtb
 
 sdm439-mtp-overlay.dtbo-base := sdm439.dtb \
+	sda439.dtb \
 	msm8937-interposer-sdm439.dtb
 sdm439-cdp-overlay.dtbo-base := sdm439.dtb \
+	sda439.dtb \
 	msm8937-interposer-sdm439.dtb
 sdm439-qrd-overlay.dtbo-base := sdm439.dtb \
 	msm8937-interposer-sdm439.dtb
+sdm439-external-codec-mtp-overlay.dtbo-base := sdm439.dtb
+sdm439-rcm-overlay.dtbo-base := sdm439.dtb
 sdm429-mtp-overlay.dtbo-base := sdm429.dtb \
+	sda429.dtb \
 	msm8937-interposer-sdm429.dtb
 sdm429-cdp-overlay.dtbo-base := sdm429.dtb \
+	sda429.dtb \
 	msm8937-interposer-sdm429.dtb
 sdm429-qrd-overlay.dtbo-base := sdm429.dtb \
 	msm8937-interposer-sdm429.dtb
@@ -398,10 +424,10 @@
 	apq8053-iot-mtp.dtb \
 	apq8053-lite-dragon-v1.0.dtb \
 	apq8053-lite-dragon-v2.0.dtb \
-	apq8053-lite-lenovo-v1.0.dtb \
-	apq8053-lite-lenovo-v1.1.dtb \
-	apq8053-lite-harman-v1.0.dtb \
-	apq8053-lite-lge-v1.0.dtb \
+	apq8053-lite-dragon-v2.1.dtb \
+	apq8053-lite-dragon-v2.2.dtb \
+	apq8053-lite-dragon-v2.3.dtb \
+	apq8053-lite-dragon-v2.4.dtb \
 	msm8953-pmi8940-cdp.dtb \
 	msm8953-pmi8940-mtp.dtb \
 	msm8953-pmi8937-cdp.dtb \
@@ -418,16 +444,31 @@
 	msm8937-interposer-sdm429-mtp.dtb
 
 dtb-$(CONFIG_ARCH_MSM8917) += msm8917-pmi8950-mtp.dtb \
+	msm8917-pmi8950-cdp.dtb \
+	msm8917-pmi8950-rcm.dtb \
+	msm8917-pmi8950-ext-codec-cdp.dtb \
+	msm8917-pmi8950-cdp-mirror-lake-touch.dtb \
+	apq8017-pmi8950-cdp-wcd-rome.dtb \
+	apq8017-pmi8950-mtp.dtb \
+	apq8017-pmi8950-cdp.dtb \
 	msm8917-pmi8937-qrd-sku5.dtb \
+	msm8917-pmi8937-mtp.dtb \
+	msm8917-pmi8937-cdp.dtb \
+	msm8917-pmi8937-rcm.dtb \
+	apq8017-pmi8937-mtp.dtb \
+	apq8017-pmi8937-cdp.dtb \
+	apq8017-pmi8937-cdp-wcd-rome.dtb \
 	msm8917-pmi8940-mtp.dtb \
-	msm8917-pmi8937-mtp.dtb
+	msm8917-pmi8940-cdp.dtb \
+	msm8917-pmi8940-rcm.dtb
 
 dtb-$(CONFIG_ARCH_MSM8909) += msm8909w-bg-wtp-v2.dtb \
 	apq8009w-bg-wtp-v2.dtb \
 	apq8009w-bg-alpha.dtb \
 	apq8009-mtp-wcd9326-refboard.dtb \
 	apq8009-robot-som-refboard.dtb \
-	apq8009-dragon.dtb
+	apq8009-dragon.dtb \
+	apq8009-lat-v1.0.dtb
 
 dtb-$(CONFIG_ARCH_SDM450) += sdm450-rcm.dtb \
 	sdm450-cdp.dtb \
@@ -464,7 +505,9 @@
 	sdm439-cdp.dtb \
 	sdm439-qrd.dtb \
 	sda439-mtp.dtb \
-	sda439-cdp.dtb
+	sda439-cdp.dtb \
+	sdm439-external-codec-mtp.dtb \
+	sdm439-rcm.dtb
 
 dtb-$(CONFIG_ARCH_SDM429) += sdm429-mtp.dtb \
 	sdm429-cdp.dtb \
diff --git a/arch/arm64/boot/dts/qcom/apq8009-audio-external_codec.dtsi b/arch/arm64/boot/dts/qcom/apq8009-audio-external_codec.dtsi
index d28e139..9261dfe 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-audio-external_codec.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8009-audio-external_codec.dtsi
@@ -19,6 +19,12 @@
 };
 
 &soc {
+	qcom,msm-audio-apr {
+		compatible = "qcom,msm-audio-apr";
+		msm_audio_apr_dummy {
+			compatible = "qcom,msm-audio-apr-dummy";
+		};
+	};
 	sound-9335 {
 		compatible = "qcom,apq8009-audio-i2s-codec";
 		qcom,model = "apq8009-tashalite-snd-card";
@@ -234,16 +240,17 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36864>;
 		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <0>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		qcom,msm-cpudai-tdm-sec-port-enable;
 		qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
 		dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36864>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -254,17 +261,26 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36865>;
 		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <0>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		qcom,msm-cpudai-tdm-sec-port-enable;
 		qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
 		dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36865>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
 };
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8009-dragon.dts b/arch/arm64/boot/dts/qcom/apq8009-dragon.dts
index 12a4363..041563b 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-dragon.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-dragon.dts
@@ -71,10 +71,88 @@
 		compatible = "qca,qca9379";
 		qca,bt-reset-gpio = <&msm_gpio 47 0>; /* BT_EN */
 	};
+
+	cnss_sdio: qcom,cnss_sdio {
+		compatible = "qcom,cnss_sdio";
+		subsys-name = "AR6320";
+		/**
+		 * There is no vdd-wlan on board and this is not for DSRC.
+		 * IO and XTAL share the same vreg.
+		 **/
+		vdd-wlan-io-supply = <&pm8916_l5>;
+		qcom,cap-tsf-gpio = <&msm_gpio 42 1>;
+		qcom,wlan-ramdump-dynamic = <0x200000>;
+		qcom,msm-bus,name = "msm-cnss";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<79 512 0 0>,             /* No vote */
+				<79 512 6250 200000>,     /* 50 Mbps */
+				<79 512 25000 200000>,    /* 200 Mbps */
+				<79 512 2048000 4096000>; /* MAX */
+	};
+};
+
+&wcnss {
+	status = "disabled";
+};
+
+&msm_gpio {
+	sdc2_wlan_gpio_on: sdc2_wlan_gpio_on {
+		mux {
+			pins = "gpio43";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio43";
+			drive-strength = <10>;
+			bias-pull-up;
+			output-high;
+		};
+	};
+
+	sdc2_wlan_gpio_off: sdc2_wlan_gpio_off {
+		mux {
+			pins = "gpio43";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio43";
+			drive-strength = <2>;
+			bias-disable;
+			output-low;
+		};
+	};
 };
 
 &sdhc_2 {
-	status = "disabled";
+	/delete-property/cd-gpios;
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+			1 &intc 0 221 0
+			2 &msm_gpio 40 0x1>;
+	interrupt-names = "hc_irq", "pwr_irq", "sdiowakeup_irq";
+
+	qcom,vdd-voltage-level = <1800000 2950000>;
+	qcom,vdd-current-level = <15000 400000>;
+
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 50000>;
+	qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
+			&sdc2_wlan_gpio_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+			&sdc2_wlan_gpio_off>;
+	qcom,nonremovable;
+	qcom,core_3_0v_support;
+	status = "ok";
 };
 
 &usb_otg {
@@ -133,6 +211,7 @@
 
 &pm8916_chg {
 	status = "ok";
+	qcom,use-default-batt-values;
 };
 
 &pm8916_bms {
diff --git a/arch/arm64/boot/dts/qcom/apq8009-lat-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8009-lat-v1.0.dts
new file mode 100644
index 0000000..f81e369
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8009-lat-v1.0.dts
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8909-mtp.dtsi"
+#include "8909-pm8916.dtsi"
+#include "msm8909-pm8916-mtp.dtsi"
+#include "apq8009-audio-external_codec.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ8009-lat-v1.0 Board";
+	compatible = "qcom,apq8009-mtp", "qcom,apq8009", "qcom,mtp";
+	qcom,msm-id = <265 2>;
+	qcom,board-id= <10 0x1>;
+
+	bluetooth: bt_qca9379 {
+		compatible = "qca,qca9379";
+		qca,bt-reset-gpio = <&msm_gpio 47 0x0>; /* BT_EN */
+	};
+};
+
+&soc {
+	ext-codec {
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,audio-routing =
+			"AIF4 VI", "MCLK",
+			"RX_BIAS", "MCLK",
+			"MADINPUT", "MCLK",
+			"AMIC2", "MIC BIAS2",
+			"MIC BIAS2", "Headset Mic",
+			"DMIC0", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic0",
+			"DMIC1", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic1",
+			"DMIC2", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic2",
+			"DMIC3", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic3",
+			"SpkrLeft IN", "SPK1 OUT",
+			"SpkrRight IN", "SPK2 OUT";
+	};
+
+	sound-9335 {
+		status = "disabled";
+	};
+
+	i2c@78b8000 {
+		wcd9xxx_codec@d {
+			status = "disabled";
+		};
+	};
+
+	vph_pwr_vreg: vph_pwr_vreg {
+		compatible = "regulator-fixed";
+		status = "ok";
+		regulator-name = "vph_pwr";
+		regulator-always-on;
+	};
+
+	mdss_mdp: qcom,mdss_mdp@1a00000 {
+		status = "disabled";
+	};
+
+	qcom,msm-thermal {
+		qcom,core-control-mask = <0xa>;
+		qcom,freq-mitigation-value = <1190400>;
+		qcom,freq-mitigation-control-mask = <0x05>;
+	};
+};
+
+&sdhc_2 {
+	status = "disabled";
+};
+
+&usb_otg {
+	interrupts = <0 134 0>, <0 140 0>, <0 136 0>;
+	interrupt-names = "core_irq", "async_irq", "phy_irq";
+	qcom,hsusb-otg-mode = <3>;
+	qcom,phy-id-high-as-peripheral;
+	vbus_otg-supply = <&vph_pwr_vreg>;
+};
+
+&external_image_mem {
+	reg = <0x0 0x87a00000 0x0 0x0600000>;
+};
+
+&modem_adsp_mem {
+	reg = <0x0 0x88000000 0x0 0x01e00000>;
+};
+
+&peripheral_mem {
+	reg = <0x0 0x89e00000 0x0 0x0700000>;
+};
+
+&i2c_4 {
+	smb1360_otg_supply: smb1360-chg-fg@14 {
+		compatible = "qcom,smb1360-chg-fg";
+		reg = <0x14>;
+		interrupt-parent = <&msm_gpio>;
+		interrupts = <58 8>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&smb_int_default>;
+		qcom,charging-disabled;
+		qcom,empty-soc-disabled;
+		qcom,chg-inhibit-disabled;
+		qcom,float-voltage-mv = <4200>;
+		qcom,iterm-ma = <200>;
+		qcom,recharge-thresh-mv = <100>;
+		qcom,thermal-mitigation = <1500 700 600 0>;
+		regulator-name = "smb1360_otg_vreg";
+		status= "disabled";
+	};
+};
+
+&firmware {
+	android {
+		compatible = "android,firmware";
+		fstab {
+			compatible = "android,fstab";
+			vendor_fstab: vendor {
+				fsmgr_flags = "wait,slotselect";
+			};
+			/delete-node/ system;
+		};
+	};
+};
+
+&pm8916_chg {
+	status = "ok";
+};
+
+&pm8916_bms {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts
index a0a9c54..66e8986 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts
@@ -18,6 +18,8 @@
 #include "apq8009-audio-external_codec.dtsi"
 #include "apq8009-memory.dtsi"
 #include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+#include "msm8909-pm8916-camera.dtsi"
+#include "msm8909-pm8916-camera-sensor-robot.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8009 WCD9326 Reference Board";
@@ -93,7 +95,7 @@
 };
 
 &soc {
-	sound-9335 {
+	ext_codec: sound-9335 {
 		qcom,audio-routing =
 			"AIF4 VI", "MCLK",
 			"RX_BIAS", "MCLK",
@@ -110,12 +112,22 @@
 			"MIC BIAS3", "Digital Mic3",
 			"SpkrLeft IN", "SPK1 OUT",
 			"SpkrRight IN", "SPK2 OUT";
-	};
 
-	i2c@78b8000 {
-		wcd9xxx_codec@d {
-			qcom,cdc-reset-gpio = <&msm_gpio 27 0>;
-		};
+		qcom,msm-gpios =
+			"us_eu_gpio";
+		qcom,pinctrl-names =
+			"all_off",
+			"us_eu_gpio_act";
+		pinctrl-names =
+			"all_off",
+			"us_eu_gpio_act";
+		pinctrl-0 = <&cross_conn_det_sus>;
+		pinctrl-1 = <&cross_conn_det_act>;
+		qcom,pri-mi2s-gpios = <&cdc_pri_mi2s_gpios>;
+		qcom,quat-mi2s-gpios = <&cdc_quat_mi2s_gpios>;
+
+		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+					"SpkrLeft", "SpkrRight";
 	};
 
 	i2c@78b9000 {
@@ -203,6 +215,36 @@
 		qcom,id-det-gpio = <&msm_gpio 110 0>;
 		qcom,dpdm_switch_gpio = <&pm8916_gpios 3 0>;
 	};
+
+	i2c@78b8000 {
+		wcd9xxx_codec@d {
+			status = "okay";
+			qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+		};
+	};
+
+	cdc_pri_mi2s_gpios: msm_cdc_pinctrl_pri {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&pri_mi2s_active &pri_mi2s_ws_active
+		     &pri_mi2s_dout_active &pri_mi2s_din_active>;
+		pinctrl-1 = <&pri_mi2s_sleep &pri_mi2s_ws_sleep
+		     &pri_mi2s_dout_sleep &pri_mi2s_din_sleep>;
+	};
+
+	cdc_quat_mi2s_gpios: msm_cdc_pinctrl_quat {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&quat_mi2s_active &quat_mi2s_din_active>;
+		pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_din_sleep>;
+	};
+
+	wcd_rst_gpio: wcd_gpio_ctrl {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_reset_active>;
+		pinctrl-1 = <&cdc_reset_sleep>;
+	};
 };
 
 &wcnss {
@@ -285,10 +327,17 @@
 		qcom,thermal-mitigation = <1500 700 600 0>;
 		regulator-name = "smb1360_otg_vreg";
 		status= "okay";
+		smb1360_vbus: qcom,smb1360-vbus {
+			regulator-name = "qcom,smb1360-vbus";
+		};
 	};
 };
 
 &usb_otg {
+	interrupts = <0 134 0>, <0 140 0>, <0 136 0>;
+	interrupt-names = "core_irq", "async_irq", "phy_irq";
+	qcom,hsusb-otg-mode = <3>;
+	vbus_otg-supply = <&smb1360_vbus>;
 	extcon = <&smb1360_otg_supply>;
 };
 
@@ -325,4 +374,16 @@
 	status = "disabled";
 };
 
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&ext_codec {
+	status = "okay";
+};
+
+&blsp1_uart2_hs {
+	status = "disabled";
+};
+
 /delete-node/ &cont_splash_mem;
diff --git a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
index 72486b2..8f013f9 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
@@ -17,6 +17,8 @@
 #include "8909-pm8916.dtsi"
 #include "msm8909-pm8916-mtp.dtsi"
 #include "apq8009-audio-external_codec.dtsi"
+#include "msm8909-pm8916-camera.dtsi"
+#include "msm8909-pm8916-camera-sensor-robot-som.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8009 Robot SOM refboard";
@@ -71,10 +73,113 @@
 		compatible = "qca,qca9379";
 		qca,bt-reset-gpio = <&msm_gpio 47 0>; /* BT_EN */
 	};
+	cnss_sdio: qcom,cnss_sdio {
+		compatible = "qcom,cnss_sdio";
+		subsys-name = "AR6320";
+		/**
+		 * There is no vdd-wlan on board and this is not for DSRC.
+		 * IO and XTAL share the same vreg.
+		 */
+		vdd-wlan-io-supply = <&pm8916_l5>;
+		qcom,cap-tsf-gpio = <&msm_gpio 42 1>;
+		qcom,wlan-ramdump-dynamic = <0x200000>;
+		qcom,msm-bus,name = "msm-cnss";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<79 512 0 0>,             /* No vote */
+			<79 512 6250 200000>,     /* 50 Mbps */
+			<79 512 25000 200000>,    /* 200 Mbps */
+			<79 512 2048000 4096000>; /* MAX */
+	};
+
+	gpio_keys {
+		status = "disable";
+	};
+};
+
+&i2c_1 {
+	status = "okay";
+	icm20602@68 {
+		compatible = "invensense,icm20602";
+		reg = <0x68>;
+		interrupt-parent = <&msm_gpio>;
+		interrupts = <12 0>;
+		invensense,icm20602-gpio = <&msm_gpio 12 0x0>;
+		vdd-ldo-supply = <&pm8916_l6>;
+		interrupt-names = "icm20602_irq";
+		pinctrl-names = "imu_active","imu_suspend";
+		pinctrl-0 = <&imu_int_active>;
+		pinctrl-1 = <&imu_int_suspend>;
+		status = "ok";
+	};
+	vl53l0x@29 {
+		compatible = "st,stmvl53l0";
+		reg = <0x29>;
+		status = "ok";
+	};
+};
+
+&wcnss {
+	status = "disabled";
+};
+
+&msm_gpio {
+	sdc2_wlan_gpio_on: sdc2_wlan_gpio_on {
+		mux {
+			pins = "gpio43";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio43";
+			drive-strength = <10>;
+			bias-pull-up;
+			output-high;
+		};
+	};
+
+	sdc2_wlan_gpio_off: sdc2_wlan_gpio_off {
+		mux {
+			pins = "gpio43";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio43";
+			drive-strength = <2>;
+			bias-disable;
+			output-low;
+		};
+	};
 };
 
 &sdhc_2 {
-	status = "disabled";
+	/delete-property/cd-gpios;
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+		1 &intc 0 221 0
+		2 &msm_gpio 40 0x1>;
+	interrupt-names = "hc_irq", "pwr_irq", "sdiowakeup_irq";
+
+	qcom,vdd-voltage-level = <1800000 2950000>;
+	qcom,vdd-current-level = <15000 400000>;
+
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 50000>;
+	qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
+	&sdc2_wlan_gpio_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+	&sdc2_wlan_gpio_off>;
+	qcom,nonremovable;
+	qcom,core_3_0v_support;
+	status = "ok";
 };
 
 &usb_otg {
@@ -87,7 +192,7 @@
 };
 
 &external_image_mem {
-	reg = <0x0 0x87a00000 0x0 0x0600000>;
+	reg = <0x0 0x87900000 0x0 0x0700000>;
 };
 
 &modem_adsp_mem {
@@ -95,7 +200,7 @@
 };
 
 &peripheral_mem {
-	reg = <0x0 0x89e00000 0x0 0x0700000>;
+	status = "disabled";
 };
 
 &pm8916_chg {
@@ -109,3 +214,12 @@
 &blsp1_uart2_hs {
 	status = "ok";
 };
+
+&i2c_1 {
+	status = "okay";
+	vl53l0x@52 {
+		compatible = "st,stmvl53l0";
+		reg = <0x29>;
+		status = "ok";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts b/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts
index 1fe7b15..20878c0 100644
--- a/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts
@@ -52,6 +52,7 @@
 	qcom,blackghost {
 		compatible = "qcom,pil-blackghost";
 
+		qcom,pil-force-shutdown;
 		qcom,firmware-name = "bg-wear";
 		/* GPIO inputs from blackghost */
 		qcom,bg2ap-status-gpio = <&msm_gpio 97 0>;
@@ -157,8 +158,11 @@
 		interrupts = <50 0>;
 		interrupt-names = "nfc_irq";
 		pinctrl-names = "nfc_active","nfc_suspend";
-		pinctrl-0 = <&nfcw_int_active &nfcw_disable_active>;
+		pinctrl-0 = <&nfcw_int_active
+			     &nfcw_disable_active
+			     &nfc_clk_default>;
 		pinctrl-1 = <&nfcw_int_suspend &nfcw_disable_suspend>;
+		clocks = <&clock_rpm clk_bb_clk3_pin>;
 		clock-names = "ref_clk";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts b/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts
index 8113670..e7af39f 100644
--- a/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts
@@ -71,6 +71,7 @@
 	qcom,blackghost {
 		compatible = "qcom,pil-blackghost";
 
+		qcom,pil-force-shutdown;
 		qcom,firmware-name = "bg-wear";
 		/* GPIO inputs from blackghost */
 		qcom,bg2ap-status-gpio = <&msm_gpio 97 0>;
@@ -176,8 +177,11 @@
 		interrupts = <50 0>;
 		interrupt-names = "nfc_irq";
 		pinctrl-names = "nfc_active","nfc_suspend";
-		pinctrl-0 = <&nfcw_int_active &nfcw_disable_active>;
+		pinctrl-0 = <&nfcw_int_active
+			     &nfcw_disable_active
+			     &nfc_clk_default>;
 		pinctrl-1 = <&nfcw_int_suspend &nfcw_disable_suspend>;
+		clocks = <&clock_rpm clk_bb_clk3_pin>;
 		clock-names = "ref_clk";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/apq8017-audio.dtsi b/arch/arm64/boot/dts/qcom/apq8017-audio.dtsi
new file mode 100644
index 0000000..56f69ad
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8017-audio.dtsi
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&wsa881x_211 {
+	qcom,spkr-sd-n-gpio = <&tlmm 92 0>;
+};
+
+&wsa881x_212 {
+	qcom,spkr-sd-n-gpio = <&tlmm 92 0>;
+};
+
+&wsa881x_213 {
+	qcom,spkr-sd-n-gpio = <&tlmm 92 0>;
+};
+
+&wsa881x_214 {
+	qcom,spkr-sd-n-gpio = <&tlmm 92 0>;
+};
+
+&pm8937_gpios {
+	gpio@c000 {
+		status = "ok";
+		qcom,mode = <1>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <2>;
+		qcom,master-en = <1>;
+		qcom,out-strength = <2>;
+	};
+
+	gpio@c600 {
+		status = "ok";
+		qcom,mode = <1>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <0>;
+		qcom,master-en = <1>;
+		qcom,out-strength = <2>;
+	};
+};
+
+&slim_msm {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
+
+&wcd9335 {
+	status = "okay";
+	cdc-vdd-mic-bias-supply = <&pm8937_l9>;
+	qcom,cdc-vdd-mic-bias-voltage = <3300000 3300000>;
+	qcom,cdc-vdd-mic-bias-current = <15000>;
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&ext_codec {
+	status = "okay";
+	qcom,msm-mbhc-hphl-swh = <1>;
+	qcom,msm-mbhc-gnd-swh = <1>;
+	qcom,tdm-audio-intf;
+	qcom,afe-rxtx-lb;
+	reg = <0xc051000 0x4>,
+		<0xc051004 0x4>,
+		<0xc055000 0x4>,
+		<0xc052000 0x4>,
+		<0x0c056000 0x4>,
+		<0x0c054000 0x4>,
+		<0x0c053000 0x4>;
+	reg-names = "csr_gp_io_mux_mic_ctl",
+		"csr_gp_io_mux_spkr_ctl",
+		"csr_gp_io_lpaif_pri_pcm_pri_mode_muxsel",
+		"csr_gp_io_mux_quin_ctl",
+		"csr_gp_io_lpaif_qui_pcm_sec_mode_muxsel",
+		"csr_gp_io_mux_mic_ext_clk_ctl",
+		"csr_gp_io_mux_sec_tlmm_ctl";
+	qcom,audio-routing =
+		"AIF4 VI", "MCLK",
+		"AIF4 VI", "MICBIAS_REGULATOR",
+		"RX_BIAS", "MCLK",
+		"BRIDGE RX OUT", "MCLK",
+		"BRIDGE TX IN", "MCLK",
+		"MADINPUT", "MCLK",
+		"AIF4 MAD", "MICBIAS_REGULATOR",
+		"AMIC1", "MIC BIAS3",
+		"MIC BIAS3", "Handset Mic",
+		"AMIC2", "MIC BIAS2",
+		"MIC BIAS2", "Headset Mic",
+		"AMIC3", "MIC BIAS3",
+		"MIC BIAS3", "Secondary Mic",
+		"AMIC4", "MIC BIAS3",
+		"MIC BIAS3", "Analog Mic4",
+		"AMIC5", "MIC BIAS4",
+		"MIC BIAS4", "Analog Mic7",
+		"AMIC6", "MIC BIAS4",
+		"MIC BIAS4", "Analog Mic6",
+		"DMIC0", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic0",
+		"DMIC1", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic1",
+		"DMIC2", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic2",
+		"DMIC3", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic3",
+		"DMIC4", "MIC BIAS4",
+		"MIC BIAS4", "Digital Mic4",
+		"DMIC5", "MIC BIAS4",
+		"MIC BIAS4", "Digital Mic5",
+		"MIC BIAS3", "MICBIAS_REGULATOR",
+		"MIC BIAS4", "MICBIAS_REGULATOR",
+		"SpkrLeft IN", "SPK1 OUT",
+		"SpkrRight IN", "SPK2 OUT";
+	asoc-cpu = <&dai_pri_auxpcm>,
+		<&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s5>,
+		<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+		<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+		<&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+		<&afe_pcm_rx>, <&afe_pcm_tx>,
+		<&afe_proxy_rx>, <&afe_proxy_tx>,
+		<&incall_record_rx>, <&incall_record_tx>,
+		<&incall_music_rx>, <&incall_music_2_rx>,
+		<&sb_5_rx>,  <&bt_sco_rx>,
+		<&bt_sco_tx>, <&int_fm_rx>, <&int_fm_tx>,
+		<&sb_6_rx>, <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+		<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>, <&afe_loopback_tx>;
+	asoc-cpu-names = "msm-dai-q6-auxpcm.1",
+			"msm-dai-q6-mi2s.2",
+			"msm-dai-q6-mi2s.3", "msm-dai-q6-mi2s.5",
+			"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+			"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+			"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+			"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+			"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+			"msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+			"msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+			"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+			"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+			"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
+			"msm-dai-q6-dev.12288", "msm-dai-q6-dev.12289",
+			"msm-dai-q6-dev.12292", "msm-dai-q6-dev.12293",
+			"msm-dai-q6-dev.16396", "msm-dai-q6-tdm.36864",
+			"msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36880",
+			"msm-dai-q6-tdm.36881", "msm-dai-q6-dev.24577";
+	qcom,msm-gpios =
+		"quin_i2s",
+		"us_eu_gpio",
+		"quat_i2s";
+	qcom,pinctrl-names =
+		"all_off",
+		"quin_i2s_act",
+		"us_eu_gpio_act",
+		"quin_i2s_us_eu_gpio_act",
+		"quat_i2s_act",
+		"quat_i2s_quin_i2s_act",
+		"quat_i2s_us_eu_gpio_act",
+		"quat_i2s_us_eu_gpio_quin_i2s_act";
+	pinctrl-names =
+		"all_off",
+		"quin_i2s_act",
+		"us_eu_gpio_act",
+		"quin_i2s_us_eu_gpio_act",
+		"quat_i2s_act",
+		"quat_i2s_quin_i2s_act",
+		"quat_i2s_us_eu_gpio_act",
+		"quat_i2s_us_eu_gpio_quin_i2s_act";
+
+	pinctrl-0 = <&pri_tlmm_ws_sus
+		&cross_conn_det_sus &pri_mi2s_sd0_sleep
+		&pri_mi2s_sck_sleep &pri_mi2s_sd1_sleep
+		&sec_mi2s_ws_sleep &sec_mi2s_sck_sleep
+		&sec_mi2s_sd1_sleep &sec_mi2s_sd0_sleep>;
+	pinctrl-1 = <&pri_tlmm_ws_act
+		&cross_conn_det_sus &pri_mi2s_sd0_active
+		&pri_mi2s_sck_active &pri_mi2s_sd1_active
+		&sec_mi2s_ws_sleep &sec_mi2s_sck_sleep
+		&sec_mi2s_sd1_sleep &sec_mi2s_sd0_sleep>;
+	pinctrl-2 = <&pri_tlmm_ws_sus
+		&cross_conn_det_act &pri_mi2s_sd0_sleep
+		&pri_mi2s_sck_sleep &pri_mi2s_sd1_sleep
+		&sec_mi2s_ws_sleep &sec_mi2s_sck_sleep
+		&sec_mi2s_sd1_sleep &sec_mi2s_sd0_sleep>;
+	pinctrl-3 = <&pri_tlmm_ws_act
+		&cross_conn_det_act &pri_mi2s_sd0_active
+		&pri_mi2s_sck_active &pri_mi2s_sd1_active
+		&sec_mi2s_ws_sleep &sec_mi2s_sck_sleep
+		&sec_mi2s_sd1_sleep &sec_mi2s_sd0_sleep>;
+	pinctrl-4 = <&pri_tlmm_ws_sus
+		&cross_conn_det_sus &pri_mi2s_sd0_sleep
+		&pri_mi2s_sck_sleep &pri_mi2s_sd1_sleep
+		&sec_mi2s_ws_active &sec_mi2s_sck_active
+		&sec_mi2s_sd1_active &sec_mi2s_sd0_active>;
+	pinctrl-5 = <&pri_tlmm_ws_act
+		&cross_conn_det_sus &pri_mi2s_sd0_active
+		&pri_mi2s_sck_active &pri_mi2s_sd1_active
+		&sec_mi2s_ws_active &sec_mi2s_sck_active
+		&sec_mi2s_sd1_active &sec_mi2s_sd0_active>;
+	pinctrl-6 = <&pri_tlmm_ws_sus
+		&cross_conn_det_act &pri_mi2s_sd0_sleep
+		&pri_mi2s_sck_sleep &pri_mi2s_sd1_sleep
+		&sec_mi2s_ws_active &sec_mi2s_sck_active
+		&sec_mi2s_sd1_active &sec_mi2s_sd0_active>;
+	pinctrl-7 = <&pri_tlmm_ws_act
+		&cross_conn_det_act &pri_mi2s_sd0_active
+		&pri_mi2s_sck_active &pri_mi2s_sd1_active
+		&sec_mi2s_ws_active &sec_mi2s_sck_active
+		&sec_mi2s_sd1_active &sec_mi2s_sd0_active>;
+};
+
+&int_codec {
+	status = "disabled";
+};
+
+&wsa881x_i2c_f {
+	status = "disabled";
+};
+
+&wsa881x_i2c_45 {
+	status = "disabled";
+};
+
+&soc {
+	qcom,msm-dai-tdm-pri-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37120>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36864>;
+		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
+		dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36864>;
+			qcom,msm-cpudai-tdm-sync-mode = <0>;
+			qcom,msm-cpudai-tdm-sync-src = <1>;
+			qcom,msm-cpudai-tdm-data-out = <0>;
+			qcom,msm-cpudai-tdm-invert-sync = <0>;
+			qcom,msm-cpudai-tdm-data-delay = <1>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-pri-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37121>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36865>;
+		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
+		dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36865>;
+			qcom,msm-cpudai-tdm-sync-mode = <0>;
+			qcom,msm-cpudai-tdm-sync-src = <1>;
+			qcom,msm-cpudai-tdm-data-out = <0>;
+			qcom,msm-cpudai-tdm-invert-sync = <0>;
+			qcom,msm-cpudai-tdm-data-delay = <1>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-sec-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37136>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36880>;
+		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
+		dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36880>;
+			qcom,msm-cpudai-tdm-sync-mode = <0>;
+			qcom,msm-cpudai-tdm-sync-src = <1>;
+			qcom,msm-cpudai-tdm-data-out = <0>;
+			qcom,msm-cpudai-tdm-invert-sync = <0>;
+			qcom,msm-cpudai-tdm-data-delay = <1>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-sec-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37137>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36881>;
+		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
+		dai_sec_tdm_tx_0: qcom,msm-dai-q6-tdm-sec-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36881>;
+			qcom,msm-cpudai-tdm-sync-mode = <0>;
+			qcom,msm-cpudai-tdm-sync-src = <1>;
+			qcom,msm-cpudai-tdm-data-out = <0>;
+			qcom,msm-cpudai-tdm-invert-sync = <0>;
+			qcom,msm-cpudai-tdm-data-delay = <1>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+};
+
+&dai_pri_auxpcm {
+	qcom,msm-cpudai-afe-clk-ver = <2>;
+};
+
+&tlmm {
+	tlmm_gpio_key {
+		gpio_key_active: gpio_key_active {
+			mux {
+				pins = "gpio91", "gpio127", "gpio128";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio91", "gpio127", "gpio128";
+			};
+		};
+
+		gpio_key_suspend: gpio_key_suspend {
+			mux {
+				pins = "gpio91", "gpio127", "gpio128";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio91", "gpio127", "gpio128";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8017-pmi8937-cdp-wcd-rome.dts b/arch/arm64/boot/dts/qcom/apq8017-pmi8937-cdp-wcd-rome.dts
new file mode 100644
index 0000000..19c24f8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8017-pmi8937-cdp-wcd-rome.dts
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8017.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8937.dtsi"
+#include "apq8017-rome.dtsi"
+#include "apq8017-audio.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ8017-PMI8937 CDP \
+						with WCD codec/Rome card";
+	compatible = "qcom,apq8017-cdp", "qcom,apq8017", "qcom,cdp";
+	qcom,board-id= <1 2>;
+	qcom,pmic-id = <0x10019 0x020037 0x0 0x0>;
+};
+
+&blsp1_uart1 {
+	status = "ok";
+};
+
+&sdhc_2 {
+	/* device core power supply */
+	/delete-property/vdd-supply;
+	/delete-property/qcom,vdd-voltage-level;
+	/delete-property/qcom,vdd-current-level;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8937_l5>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	qcom,core_3_0v_support;
+	qcom,nonremovable;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
+			&sdc2_wlan_gpio_active>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+			&sdc2_wlan_gpio_sleep>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+		1 &intc 0 221 0
+		2 &tlmm 124 0x4>;
+	interrupt-names = "hc_irq", "pwr_irq", "sdiowakeup_irq";
+
+	/delete-property/cd-gpios;
+	/delete-property/qcom,devfreq,freq-table;
+
+	status = "ok";
+
+};
+
+&mdss_fb0 {
+	/delete-node/ qcom,cont-splash-memory;
+};
+
+/delete-node/ &cont_splash_mem;
+
+&soc {
+	gpio_keys {
+		/delete-node/ home;
+	};
+};
+
+&modem_mem {
+	reg = <0x0 0x86800000 0x0 0x1500000>;
+};
+
+&adsp_fw_mem {
+	reg = <0x0 0x87d00000 0x0 0x1100000>;
+};
+
+&wcnss_fw_mem {
+	reg = <0x0 0x88e00000 0x0 0x700000>;
+};
+
+&secure_mem {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8017-pmi8937-cdp.dts b/arch/arm64/boot/dts/qcom/apq8017-pmi8937-cdp.dts
new file mode 100644
index 0000000..6faa413
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8017-pmi8937-cdp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8017.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8937.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ8017-PMI8937 CDP";
+	compatible = "qcom,apq8017-cdp", "qcom,apq8017", "qcom,cdp";
+	qcom,board-id= <1 0>;
+	qcom,pmic-id = <0x10019 0x020037 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8017-pmi8937-mtp.dts b/arch/arm64/boot/dts/qcom/apq8017-pmi8937-mtp.dts
new file mode 100644
index 0000000..01305e6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8017-pmi8937-mtp.dts
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8017.dtsi"
+#include "msm8917-mtp.dtsi"
+#include "msm8917-pmi8937.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ8017-PMI8937 MTP";
+	compatible = "qcom,apq8017-mtp", "qcom,apq8017", "qcom,mtp";
+	qcom,board-id= <8 0>;
+	qcom,pmic-id = <0x10019 0x020037 0x0 0x0>;
+};
+
+&blsp1_uart1 {
+	status = "ok";
+};
+
+&vendor {
+	mtp_batterydata: qcom,battery-data {
+		qcom,batt-id-range-pct = <15>;
+		#include "batterydata-itech-3000mah.dtsi"
+		#include "batterydata-ascent-3450mAh.dtsi"
+	};
+};
+
+&qpnp_fg {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+&qpnp_smbcharger {
+	qcom,battery-data = <&mtp_batterydata>;
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8017-pmi8950-cdp-wcd-rome.dts b/arch/arm64/boot/dts/qcom/apq8017-pmi8950-cdp-wcd-rome.dts
new file mode 100644
index 0000000..8ddb1fc
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8017-pmi8950-cdp-wcd-rome.dts
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8017.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8950.dtsi"
+#include "apq8017-rome.dtsi"
+#include "apq8017-audio.dtsi"
+#include "apq8017-pmi8950-cdp-wcd-rome.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ8017-PMI8950 CDP \
+						with WCD codec/Rome card";
+	compatible = "qcom,apq8017-cdp", "qcom,apq8017", "qcom,cdp";
+	qcom,board-id= <1 2>;
+	qcom,pmic-id = <0x10019 0x010011 0x0 0x0>;
+};
+
+&blsp1_uart1 {
+	status = "ok";
+};
+
+&wcd9335 {
+	qcom,cdc-mic-unmute-delay = <50>;
+};
+
+&sdhc_2 {
+	/* device core power supply */
+	/delete-property/vdd-supply;
+	/delete-property/qcom,vdd-voltage-level;
+	/delete-property/qcom,vdd-current-level;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8937_l5>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	qcom,core_3_0v_support;
+	qcom,nonremovable;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
+			&sdc2_wlan_gpio_active>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+			&sdc2_wlan_gpio_sleep>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+		1 &intc 0 221 0
+		2 &tlmm 124 0x4>;
+	interrupt-names = "hc_irq", "pwr_irq", "sdiowakeup_irq";
+
+	/delete-property/cd-gpios;
+	/delete-property/qcom,devfreq,freq-table;
+
+	status = "ok";
+
+};
+
+&mdss_fb0 {
+	/delete-node/ qcom,cont-splash-memory;
+};
+
+/delete-node/ &cont_splash_mem;
+
+&soc {
+	gpio_keys {
+		/delete-node/ home;
+	};
+};
+
+&usb_otg {
+	vbus_otg-supply = <0>;
+	qcom,vbus-low-as-hostmode;
+};
+
+&i2c_4 {
+	usb2533@2c {
+		status = "ok";
+	};
+};
+
+&i2c_2 {
+	/* DSI_TO_HDMI I2C configuration */
+	adv7533@39 {
+		compatible = "adv7533";
+		reg = <0x39>;
+		instance_id = <0>;
+		adi,video-mode = <3>; /* 3 = 1080p */
+		adi,main-addr = <0x39>;
+		adi,cec-dsi-addr = <0x3C>;
+		adi,enable-audio;
+		adi,irq-gpio = <&tlmm 0x29 0x2002>;
+		adi,power-down-gpio = <&tlmm 0x7D 0x0>;
+		adi,switch-gpio = <&pm8937_gpios 0x8 0x1>;
+		pinctrl-names = "pmx_adv7533_active",
+					"pmx_adv7533_suspend";
+		pinctrl-0 = <&adv7533_int_active>;
+		pinctrl-1 = <&adv7533_int_suspend>;
+	};
+
+	pericom-type-c@1d {
+		status = "disabled";
+	};
+};
+
+&mdss_dsi {
+	hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+	qcom,dsi-pref-prim-pan = <&dsi_adv7533_1080p>;
+	qcom,platform-intf-mux-gpio = <&tlmm 115 0>;
+	status = "ok";
+	qcom,bridge-index = <0>;
+	qcom,pluggable;
+};
+
+&dsi_adv7533_1080p {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&modem_mem {
+	reg = <0x0 0x86800000 0x0 0x1500000>;
+};
+
+&adsp_fw_mem {
+	reg = <0x0 0x87d00000 0x0 0x1100000>;
+};
+
+&wcnss_fw_mem {
+	reg = <0x0 0x88e00000 0x0 0x700000>;
+};
+
+&other_ext_mem {
+	reg = <0x0 0x84f00000 0x0 0x1900000>;
+};
+
+&qcom_seecom {
+	reg = <0x84f00000 0x1400000>;
+};
+
+&secure_mem {
+	status = "disabled";
+};
+
+/* Warning, SPI6 & I2C6 cannot be enabled at the same time due to pin usage. */
+&spi_6 {
+	status = "disabled";
+};
+
+&i2c_6 {
+	status = "ok";
+	qcom,clk-freq-out = <100000>;
+
+	/* TI591XX LED Drivers */
+	tlc59116@60 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "ti,tlc59116";
+		reg = <0x60>;
+		out0@0 {
+			label = "ledsec1_g";
+			reg = <0x0>;
+		};
+		out1@1 {
+			label = "ledsec1_r";
+			reg = <0x1>;
+		};
+		out2@2 {
+			label = "ledsec1_b";
+			reg = <0x2>;
+		};
+		out3@3 {
+			label = "ledsec2_g";
+			reg = <0x3>;
+		};
+		out4@4 {
+			label = "ledsec2_r";
+			reg = <0x4>;
+		};
+		out5@5 {
+			label = "ledsec2_b";
+			reg = <0x5>;
+		};
+		out6@6 {
+			label = "ledsec3_g";
+			reg = <0x6>;
+		};
+		out7@7 {
+			label = "ledsec3_r";
+			reg = <0x7>;
+		};
+		out8@8 {
+			label = "ledsec3_b";
+			reg = <0x8>;
+		};
+		out9@9 {
+			label = "ledsec4_g";
+			reg = <0x9>;
+		};
+		out10@10 {
+			label = "ledsec4_r";
+			reg = <0xa>;
+		};
+		out11@11 {
+			label = "ledsec4_b";
+			reg = <0xb>;
+		};
+		out12@12 {
+			label = "ledsec5_g";
+			reg = <0xc>;
+		};
+		out13@13 {
+			label = "ledsec5_r";
+			reg = <0xd>;
+		};
+		out14@14 {
+			label = "ledsec5_b";
+			reg = <0xe>;
+		};
+	};
+
+	tlc59116@61 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "ti,tlc59116";
+		reg = <0x61>;
+		out0@0 {
+			label = "ledsec6_g";
+			reg = <0x0>;
+		};
+		out1@1 {
+			label = "ledsec6_r";
+			reg = <0x1>;
+		};
+		out2@2 {
+			label = "ledsec6_b";
+			reg = <0x2>;
+		};
+		out3@3 {
+			label = "ledsec7_g";
+			reg = <0x3>;
+		};
+		out4@4 {
+			label = "ledsec7_r";
+			reg = <0x4>;
+		};
+		out5@5 {
+			label = "ledsec7_b";
+			reg = <0x5>;
+		};
+		out6@6 {
+			label = "ledsec8_g";
+			reg = <0x6>;
+		};
+		out7@7 {
+			label = "ledsec8_r";
+			reg = <0x7>;
+		};
+		out8@8 {
+			label = "ledsec8_b";
+			reg = <0x8>;
+		};
+		out9@9 {
+			label = "ledsec9_g";
+			reg = <0x9>;
+		};
+		out10@10 {
+			label = "ledsec9_r";
+			reg = <0xa>;
+		};
+		out11@11 {
+			label = "ledsec9_b";
+			reg = <0xb>;
+		};
+	};
+};
+
+&soc {
+	pinctrl@1000000 {
+		i2c6{
+			tlc59116_reset: tlc59116_reset {
+				config {
+					pins = "gpio20";
+					drive-strength = <16>;
+					bias-pull-up;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8017-pmi8950-cdp-wcd-rome.dtsi b/arch/arm64/boot/dts/qcom/apq8017-pmi8950-cdp-wcd-rome.dtsi
new file mode 100644
index 0000000..3337add
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8017-pmi8950-cdp-wcd-rome.dtsi
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ8017-PMI8950 CDP \
+						with WCD codec/Rome card";
+	compatible = "qcom,apq8017-cdp", "qcom,apq8017", "qcom,cdp";
+	qcom,board-id= <1 2>;
+	qcom,pmic-id = <0x10019 0x010011 0x0 0x0>;
+
+	aliases {
+		i2c6 = &i2c_6;
+	};
+};
+
+&soc {
+	i2c_6: i2c@7af6000 { /* BLSP2 QUP2 */
+		compatible = "qcom,i2c-msm-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		reg = <0x7af6000 0x600>;
+		interrupt-names = "qup_irq";
+		interrupts = <0 300 0>;
+		qcom,clk-freq-out = <400000>;
+		qcom,clk-freq-in  = <19200000>;
+		clock-names = "iface_clk", "core_clk";
+		clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+			<&clock_gcc clk_gcc_blsp2_qup2_i2c_apps_clk>;
+
+		pinctrl-names = "i2c_active", "i2c_sleep";
+		pinctrl-0 = <&i2c_6_active>;
+		pinctrl-1 = <&i2c_6_sleep>;
+		qcom,noise-rjct-scl = <0>;
+		qcom,noise-rjct-sda = <0>;
+		qcom,master-id = <84>;
+		dmas = <&dma_blsp2 6 64 0x20000020 0x20>,
+			<&dma_blsp2 7 32 0x20000020 0x20>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8017-pmi8950-cdp.dts b/arch/arm64/boot/dts/qcom/apq8017-pmi8950-cdp.dts
new file mode 100644
index 0000000..5c8ef83
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8017-pmi8950-cdp.dts
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8017.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8950.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ8017-PMI8950 CDP";
+	compatible = "qcom,apq8017-cdp", "qcom,apq8017", "qcom,cdp";
+	qcom,board-id= <1 0>;
+	qcom,pmic-id = <0x10019 0x010011 0x0 0x0>;
+};
+
+&mdss_fb0 {
+	/delete-node/ qcom,cont-splash-memory;
+};
+
+/delete-node/ &cont_splash_mem;
diff --git a/arch/arm64/boot/dts/qcom/apq8017-pmi8950-mtp.dts b/arch/arm64/boot/dts/qcom/apq8017-pmi8950-mtp.dts
new file mode 100644
index 0000000..b5b7667
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8017-pmi8950-mtp.dts
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8017.dtsi"
+#include "msm8917-mtp.dtsi"
+#include "msm8917-pmi8950.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ8017-PMI8950 MTP";
+	compatible = "qcom,apq8017-mtp", "qcom,apq8017", "qcom,mtp";
+	qcom,board-id= <8 0>;
+	qcom,pmic-id = <0x10019 0x010011 0x0 0x0>;
+};
+
+&blsp1_uart1 {
+	status = "ok";
+};
+
+&vendor {
+	mtp_batterydata: qcom,battery-data {
+		qcom,batt-id-range-pct = <15>;
+		#include "batterydata-itech-3000mah.dtsi"
+		#include "batterydata-ascent-3450mAh.dtsi"
+	};
+};
+
+&qpnp_fg {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+&qpnp_smbcharger {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+&i2c_2 {
+	/* DSI_TO_HDMI I2C configuration */
+	adv7533@39 {
+		compatible = "adv7533";
+		reg = <0x39>;
+		instance_id = <0>;
+		adi,video-mode = <3>; /* 3 = 1080p */
+		adi,main-addr = <0x39>;
+		adi,cec-dsi-addr = <0x3C>;
+		adi,enable-audio;
+		adi,irq-gpio = <&tlmm 0x29 0x2002>;
+		adi,power-down-gpio = <&tlmm 0x7D 0x0>;
+		adi,switch-gpio = <&pm8937_gpios 0x8 0x1>;
+		pinctrl-names = "pmx_adv7533_active",
+					"pmx_adv7533_suspend";
+		pinctrl-0 = <&adv7533_int_active>;
+		pinctrl-1 = <&adv7533_int_suspend>;
+	};
+};
+
+&mdss_dsi {
+	hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+	qcom,dsi-pref-prim-pan = <&dsi_adv7533_1080p>;
+	qcom,platform-intf-mux-gpio = <&tlmm 115 0>;
+	status = "ok";
+	qcom,bridge-index = <0>;
+	qcom,pluggable;
+};
+
+&dsi_adv7533_1080p {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8017-rome.dtsi b/arch/arm64/boot/dts/qcom/apq8017-rome.dtsi
new file mode 100644
index 0000000..7dfa952
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8017-rome.dtsi
@@ -0,0 +1,33 @@
+/*
+ * copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,cnss_sdio {
+		compatible = "qcom,cnss_sdio";
+		qcom,wlan-ramdump-dynamic = <0x200000>;
+		subsys-name = "AR6320";
+		qcom,cap-tsf-gpio = <&tlmm 126 1>;
+	};
+};
+
+&pm8937_gpios {
+	gpio@c100 { /* GPIO 2 - Rome Sleep Clock */
+		qcom,mode = <1>;		/* Digital output */
+		qcom,vin-sel = <3>;		/* VIN 3 */
+		qcom,src-sel = <2>;		/* Function 2 */
+		qcom,out-strength = <2>;	/* Medium */
+		qcom,pull = <4>;
+		qcom,master-en = <1>;		/* Enable GPIO */
+		status = "okay";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8017.dtsi b/arch/arm64/boot/dts/qcom/apq8017.dtsi
new file mode 100644
index 0000000..fecc7ce8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8017.dtsi
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8917.dtsi"
+/ {
+	model = "Qualcomm Technologies, Inc. APQ8017";
+	compatible = "qcom,apq8017";
+	qcom,msm-id = <307 0x0>;
+};
+
+&tlmm {
+	pmx_adv7533_int: pmx_adv7533_int {
+		adv7533_int_active: adv7533_int_active {
+			mux {
+				pins = "gpio41";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio41";
+				function = "gpio";
+				drive-strength = <16>;
+				bias-pull-up; /* pull up */
+			};
+		};
+
+		adv7533_int_suspend: adv7533_int_suspend {
+			mux {
+				pins = "gpio41";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio41";
+				drive-strength = <16>;
+				bias-disable;
+			};
+		};
+	};
+};
+
+&mdss_dsi_active {
+	mux {
+		pins = "gpio60", "gpio98", "gpio115", "gpio133";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio60", "gpio98", "gpio115", "gpio133";
+		drive-strength = <8>; /* 8 mA */
+		bias-disable = <0>; /* no pull */
+		output-high;
+	};
+};
+
+&mdss_dsi_suspend {
+	mux {
+		pins = "gpio60", "gpio98", "gpio115", "gpio133";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio60", "gpio98", "gpio115", "gpio133";
+		drive-strength = <2>; /* 2 mA */
+		bias-pull-down; /* pull down */
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
index 8f7e326..8782001 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
@@ -26,3 +26,72 @@
 	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
 };
 
+&int_codec {
+	status = "disabled";
+};
+
+&wsa881x_i2c_f {
+	status = "disabled";
+};
+
+&wsa881x_i2c_45 {
+	status = "disabled";
+};
+
+&cdc_pri_mi2s_gpios {
+	status = "disabled";
+};
+
+&wsa881x_analog_vi_gpio {
+	status = "disabled";
+};
+
+&wsa881x_analog_clk_gpio {
+	status = "disabled";
+};
+
+&wsa881x_analog_reset_gpio {
+	status = "disabled";
+};
+
+&cdc_comp_gpios {
+	status = "disabled";
+};
+
+&slim_msm {
+	status = "okay";
+};
+
+&dai_slim {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
+
+&wcd9335 {
+	status = "okay";
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&ext_codec {
+	qcom,model = "msm8953-tasha-snd-card";
+	status = "okay";
+};
+
+&soc {
+	usb_detect: usb_detect {
+		compatible = "linux,extcon-usb-gpio";
+		pintctrl-names = "default";
+		pinctrl-0 = <&ssusb_mode_sel>;
+		id-gpio = <&tlmm 12 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-ipc.dts b/arch/arm64/boot/dts/qcom/apq8053-ipc.dts
index 3381b2a..5004d71 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-ipc.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-ipc.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,7 +15,8 @@
 
 #include "apq8053.dtsi"
 #include "msm8953-ipc.dtsi"
-
+#include "pmi8950.dtsi"
+#include "msm8953-pmi8950.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 IPC";
 	compatible = "qcom,apq8053-ipc", "qcom,apq8053", "qcom,ipc";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
index 325accf..6c9c266 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
@@ -13,10 +13,10 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-lenovo-v1.0.dtsi"
+#include "apq8053-lite-dragon-v2.1.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite Lenovo v1.0 Board";
+	model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.1";
 	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
 			"qcom,dragonboard";
 	qcom,board-id= <0x01010020 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi
similarity index 97%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi
index 4d9c40c..993799b 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi
@@ -14,7 +14,7 @@
 #include "apq8053-lite-dragon.dtsi"
 
 &mdss_dsi0 {
-	qcom,ext_vdd-gpio = <&tlmm 100 0>;
+	qcom,ext-vdd-gpio = <&tlmm 100 0>;
 	qcom,platform-bklight-en-gpio = <&tlmm 95 0>;
 
 	qcom,platform-lane-config = [00 00 ff 0f
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
index 0c7b557..ecc4fea 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
@@ -13,10 +13,10 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-lenovo-v1.1.dtsi"
+#include "apq8053-lite-dragon-v2.2.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite Lenovo v1.1 Board";
+	model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.2";
 	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
 			"qcom,dragonboard";
 	qcom,board-id= <0x01010120 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi
similarity index 70%
copy from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi
copy to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi
index 4d9c40c..1744c90 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi
@@ -13,8 +13,33 @@
 
 #include "apq8053-lite-dragon.dtsi"
 
+&i2c_3 {
+	status = "okay";
+	/delete-node/ himax_ts@48;
+};
+
+&mdss_mdp {
+	qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+	hw-config = "single_dsi";
+};
+
 &mdss_dsi0 {
-	qcom,ext_vdd-gpio = <&tlmm 100 0>;
+	qcom,dsi-pref-prim-pan = <&dsi_boent51021_1200p_video>;
+	pinctrl-names = "mdss_default", "mdss_sleep";
+	pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+	pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+	vdd-supply = <&pm8953_l10>;
+	vddio-supply = <&pm8953_l6>;
+	lab-supply = <&lab_regulator>;
+	ibb-supply = <&ibb_regulator>;
+
+	qcom,platform-te-gpio = <&tlmm 24 0>;
+	qcom,platform-reset-gpio = <&tlmm 61 0>;
+	qcom,ext-vdd-gpio = <&tlmm 100 0>;
 	qcom,platform-bklight-en-gpio = <&tlmm 95 0>;
 
 	qcom,platform-lane-config = [00 00 ff 0f
@@ -24,6 +49,10 @@
 				00 00 ff 8f];
 };
 
+&mdss_dsi1 {
+	status = "disabled";
+};
+
 &eeprom0 {
 	gpios = <&tlmm 26 0>,
 		<&tlmm 40 0>,
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
index 203b6b8..e3f80be 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
@@ -13,10 +13,10 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-harman-v1.0.dtsi"
+#include "apq8053-lite-dragon-v2.3.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite Harman v1.0 Board";
+	model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.3";
 	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
 			"qcom,dragonboard";
 	qcom,board-id= <0x01020020 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dtsi
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dtsi
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
index 70952dc..1f40ef8 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
@@ -13,10 +13,10 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-lge-v1.0.dtsi"
+#include "apq8053-lite-dragon-v2.4.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite LGE v1.0 Board";
+	model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.4";
 	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
 			"qcom,dragonboard";
 	qcom,board-id= <0x01030020 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dtsi
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dtsi
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon.dtsi
index e5b4c84..00dc555 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon.dtsi
@@ -103,6 +103,21 @@
 		pinctrl-1 = <&sec_tlmm_lines_sus>;
 	};
 
+	gpio_keys {
+		compatible = "gpio-keys";
+		input-name = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&gpio_key_active>;
+		vol_up {
+			label = "volume_up";
+			gpios = <&tlmm 85 0x1>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+	};
 };
 
 &firmware {
@@ -339,6 +354,7 @@
 &sdhc_2 {
 	/* device communication power supply */
 	vdd-io-supply = <&pm8953_l12>;
+	qcom,vdd-io-always-on;
 	qcom,vdd-io-voltage-level = <1800000 1800000>;
 	qcom,vdd-io-current-level = <200 22000>;
 
@@ -357,14 +373,6 @@
 };
 
 &spmi_bus {
-	qcom,pm8953@0 {
-		qcom,power-on@800 {
-			qcom,resin-gpiobase = <1019>;
-			qcom,pon_2 {
-				/delete-property/ linux,code;
-			};
-		};
-	};
 	qcom,pmi8950@2 {
 		qcom,leds@a100 {
 			compatible = "qcom,leds-qpnp";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dtsi
deleted file mode 100644
index 396fd55..0000000
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dtsi
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "apq8053-lite-dragon.dtsi"
-
-&i2c_3 {
-	status = "okay";
-	/delete-node/ himax_ts@48;
-};
-
-&eeprom0 {
-	gpios = <&tlmm 26 0>,
-		<&tlmm 40 0>,
-		<&tlmm 118 0>,
-		<&tlmm 119 0>,
-		<&tlmm 39 0>;
-	qcom,gpio-vdig = <3>;
-	qcom,gpio-vana = <4>;
-	qcom,gpio-req-tbl-num = <0 1 2 3 4>;
-	qcom,gpio-req-tbl-flags = <1 0 0 0 0>;
-	qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
-			"CAM_RESET0",
-			"CAM_VDIG",
-			"CAM_VANA",
-			"CAM_STANDBY0";
-};
-
-&camera0 {
-	qcom,mount-angle = <270>;
-	gpios = <&tlmm 26 0>,
-		<&tlmm 40 0>,
-		<&tlmm 39 0>,
-		<&tlmm 118 0>,
-		<&tlmm 119 0>;
-	qcom,gpio-vdig = <3>;
-	qcom,gpio-vana = <4>;
-	qcom,gpio-req-tbl-num = <0 1 2 3 4>;
-	qcom,gpio-req-tbl-flags = <1 0 0 0 0>;
-	qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
-			"CAM_RESET0",
-			"CAM_STANDBY0",
-			"CAM_VDIG",
-			"CAM_VANA";
-};
-
-&camera1 {
-	qcom,mount-angle = <270>;
-};
-
-&camera2{
-	qcom,mount-angle = <270>;
-};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
index b4ac287..06fc5a4 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
@@ -70,6 +70,10 @@
 			29 01 00 00 00 00 05 2a 00 04 01 89
 			/* Reset row start address */
 			29 01 00 00 00 00 05 2b 00 00 01 85
+			15 01 00 00 00 00 02 fe 01
+			15 01 00 00 00 00 02 04 00
+			15 01 00 00 00 00 02 fe 00
+			15 01 00 00 00 00 02 3a 77
 			];
 		qcom,mdss-dsi-traffic-mode = "burst_mode";
 		qcom,mdss-dsi-lane-map = "lane_map_0123";
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-boent51021-1200p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-boent51021-1200p-video.dtsi
new file mode 100644
index 0000000..04accd8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-boent51021-1200p-video.dtsi
@@ -0,0 +1,92 @@
+/* Novatek Android Driver Sample Code for Novatek chipset
+ *
+ * Copyright (C) 2015-2018 Novatek Microelectronics Corp.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+&mdss_mdp {
+	dsi_boent51021_1200p_video: qcom,mdss_dsi_boent51021_1200p_video {
+		qcom,mdss-dsi-panel-name =
+			"boent51021 1200p video mode dsi panel";
+		qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1200>;
+		qcom,mdss-dsi-panel-height = <1920>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <32>;
+		qcom,mdss-dsi-h-pulse-width = <1>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <14>;
+		qcom,mdss-dsi-v-front-porch = <25>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command = [
+			23 01 00 00 01 00 02 8f a5
+			23 01 00 00 00 00 02 83 00
+			23 01 00 00 00 00 02 84 00
+			23 01 00 00 00 00 02 8c 80
+			23 01 00 00 00 00 02 cd 6c
+			23 01 00 00 00 00 02 c8 fc
+			23 01 00 00 00 00 02 97 00
+			23 01 00 00 00 00 02 8b 10
+			23 01 00 00 00 00 02 a9 20
+			23 01 00 00 00 00 02 83 aa
+			23 01 00 00 00 00 02 84 11
+			23 01 00 00 00 00 02 a9 4b
+			23 01 00 00 00 00 02 85 04
+			23 01 00 00 00 00 02 86 08
+			23 01 00 00 00 00 02 9c 10
+			05 01 00 00 00 00 02 11 00
+			23 01 00 00 00 00 02 8f 00
+		];
+		qcom,mdss-dsi-off-command = [
+			23 01 00 00 00 00 02 83 00
+			23 01 00 00 78 00 02 84 00
+			05 01 00 00 78 00 02 10 00
+		];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-h-sync-pulse = <1>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-panel-timings = [
+			f2 3a 28 00 6c 70 2c 3e 2e 03 04 00];
+		qcom,mdss-dsi-t-clk-post = <0x0e>;
+		qcom,mdss-dsi-t-clk-pre = <0x33>;
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-force-clock-lane-hs;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-pan-physical-width-dimension = <135>;
+		qcom,mdss-pan-physical-height-dimension = <216>;
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-post-init-delay = <1>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-hx8399c-hd-plus-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-hx8399c-hd-plus-video.dtsi
index 237684e..89c5178 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-hx8399c-hd-plus-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-hx8399c-hd-plus-video.dtsi
@@ -20,13 +20,13 @@
 		qcom,mdss-dsi-stream = <0>;
 		qcom,mdss-dsi-panel-width = <720>;
 		qcom,mdss-dsi-panel-height = <1440>;
-		qcom,mdss-dsi-h-front-porch = <24>;
-		qcom,mdss-dsi-h-back-porch = <24>;
+		qcom,mdss-dsi-h-front-porch = <48>;
+		qcom,mdss-dsi-h-back-porch = <48>;
 		qcom,mdss-dsi-h-pulse-width = <16>;
 		qcom,mdss-dsi-h-sync-skew = <0>;
 		qcom,mdss-dsi-v-back-porch = <40>;
-		qcom,mdss-dsi-v-front-porch = <36>;
-		qcom,mdss-dsi-v-pulse-width = <2>;
+		qcom,mdss-dsi-v-front-porch = <60>;
+		qcom,mdss-dsi-v-pulse-width = <4>;
 		qcom,mdss-dsi-h-left-border = <0>;
 		qcom,mdss-dsi-h-right-border = <0>;
 		qcom,mdss-dsi-v-top-border = <0>;
@@ -40,21 +40,21 @@
 				b9 ff 83 99
 			39 01 00 00 00 00 02
 				d2 88
-			39 01 00 00 00 00 10
-				b1 02 04 74 94 01 32 33
-				11 11 e6 5d 56 73 02 02
+			39 01 00 00 00 00 0c
+				b1 02 04 72 92 01
+				32 aa 11 11 52 57
 			39 01 00 00 00 00 10
 				b2 00 80 80 cc 05 07 5a
-				11 10 10 00 1e 70 03 D4
+				11 10 10 00 1e 70 03 d4
 			39 01 00 00 00 00 2d
-				b4 00 ff 59 59 0c ac 00
-				00 0c 00 07 0a 00 28 07
-				08 0c 21 03 00 00 00 ae
-				87 59 59 0c ac 00 00 0c
-				00 07 0a 00 28 07 08 0c
-				01 00 00 ae 01
+				b4 00 ff 59 59 01 ab 00
+				00 09 00 03 05 00 28 03
+				0b 0d 21 03 02 00 0c a3
+				80 59 59 02 ab 00 00 09
+				00 03 05 00 28 03 0b 0d
+				02 00 0c a3 01
 			39 01 00 00 05 00 22
-				d3 00 00 01 01 00 00 10
+				d3 00 0c 03 03 00 00 10
 				10 00 00 03 00 03 00 08
 				78 08 78 00 00 00 00 00
 				24 02 05 05 03 00 00 00
@@ -80,8 +80,8 @@
 			39 01 00 00 00 00 02
 				bd 01
 			39 01 00 00 00 00 11
-				d8 82 ea aa aa 82 ea aa
-				aa 82 ea aa aa 82 ea aa
+				d8 00 00 00 00 00 00 00
+				00 82 ea aa aa 82 ea aa
 				aa
 			39 01 00 00 00 00 02
 				bd 02
@@ -90,25 +90,23 @@
 				3f
 			39 01 00 00 00 00 02
 				bd 00
-			39 01 00 00 00 00 02
-				dd 03
 			39 01 00 00 05 00 37
-				e0 08 2a 39 35 74 7c 87
-				7f 84 8a 8e 91 93 96 9b
-				9c 9e a5 a6 ae a1 af b2
-				5c 58 63 74 08 2a 39 35
-				74 7c 87 7f 84 8a 8e 91
-				93 96 9b 9c 9e a5 a6 ae
-				a1 af b2 5c 58 63 74
+				e0 01 21 31 2d 66 6f 7b
+				75 7a 81 86 89 8c 90 95
+				97 9a a1 a2 aa 9e ad b0
+				5b 57 63 7a 01 21 31 2d
+				66 6f 7b 75 7a 81 86 89
+				8c 90 95 97 9a a1 a2 aa
+				9e ad b0 5b 57 63 7a
 			39 01 00 00 00 00 03
 				b6 7e 7e
 			39 01 00 00 00 00 02
 				cc 08
-			39 01 00 00 00 00 06
-				c7 00 08 00 01 08
-			39 01 00 00 00 00 03
-				c0 25 5a
-			05 01 00 00 78 00 02 11 00
+			39 01 00 00 00 00 02
+				35 00
+			39 01 00 00 00 00 02
+				dd 03
+			05 01 00 00 96 00 02 11 00
 			05 01 00 00 14 00 02 29 00];
 		qcom,mdss-dsi-off-command = [
 			05 01 00 00 14 00 02 28 00
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-icn9706-720-1440p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-icn9706-720-1440p-video.dtsi
new file mode 100644
index 0000000..d50fe3b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-icn9706-720-1440p-video.dtsi
@@ -0,0 +1,98 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_icn9706_720_1440_vid: qcom,mdss_dsi_icn9706_720_1440p_video {
+		qcom,mdss-dsi-panel-name =
+			"icn9706 720 1440p video mode dsi panel";
+		qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+		qcom,mdss-dsi-panel-destination = "display_1";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-panel-height = <1440>;
+		qcom,mdss-dsi-h-front-porch = <84>;
+		qcom,mdss-dsi-h-back-porch = <84>;
+		qcom,mdss-dsi-h-pulse-width = <24>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <20>;
+		qcom,mdss-dsi-v-front-porch = <24>;
+		qcom,mdss-dsi-v-pulse-width = <8>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <1>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-panel-timings = [8b 1e 14 00 44 48 18 22 19
+						03 04 00];
+		qcom,mdss-dsi-t-clk-post = <0x04>;
+		qcom,mdss-dsi-t-clk-pre = <0x1c>;
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-on-command = [39 01 00 00 64 00 02 01 00
+			39 01 00 00 00 00 03 f0 5a 5a
+			39 01 00 00 00 00 03 f1 5a 5a
+			39 01 00 00 00 00 03 f0 b4 4b
+			39 01 00 00 00 00 03 b6 10 10
+			39 01 00 00 00 00 15 b4 0a 08 12 10 0e 0c 00 00
+				00 03 00 03 03 03 03 03 03 03 04 06
+			39 01 00 00 00 00 15 b3 0b 09 13 11 0f 0d 00 00
+				00 03 00 03 03 03 03 03 03 03 05 07
+			39 01 00 00 00 00 0d b0 54 32 23 45 44 44 44 44
+				60 01 60 01
+			39 01 00 00 00 00 09 b1 32 84 02 83 15 01 57 01
+			39 01 00 00 00 00 02 b2 33
+			39 01 00 00 00 00 07 bd 54 14 6a 6a 20 19
+			39 01 00 00 00 00 12 b7 01 01 09 11 0d 15 19 0d
+				21 1d 00 00 20 00 02 ff 3c
+			39 01 00 00 00 00 06 b8 23 01 30 34 53
+			39 01 00 00 00 00 05 b9 a1 2c ff c4
+			39 01 00 00 00 00 03 ba 88 23
+			39 01 00 00 00 00 07 c1 16 16 04 0c 10 04
+			39 01 00 00 00 00 03 c2 12 68
+			39 01 00 00 00 00 04 c3 22 31 04
+			39 01 00 00 00 00 06 c7 05 23 6b 41 00
+			39 01 00 00 00 00 27 c8 7c 54 3d 2d 26 16 1b 08
+				25 28 2d 4f 3e 48 3d 3d 35 25 06 7c 54 3d 2d
+				26 16 1b 08 25 28 2d 4f 3e 48 3d 3d 35 25 06
+			39 01 00 00 00 00 09 c6 00 00 68 00 00 60 36 00
+			05 01 00 00 64 00 02 11 00
+			05 01 00 00 32 00 02 29 00];
+
+		qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
+			05 01 00 00 32 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+		qcom,esd-check-enabled;
+		qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-reset-sequence = <1 2>, <0 20>, <1 50>;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-pan-physical-width-dimension = <63>;
+		qcom,mdss-pan-physical-height-dimension = <112>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp466076-3250mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp466076-3250mah.dtsi
new file mode 100644
index 0000000..2f48301
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp466076-3250mah.dtsi
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *GNU General Public License for more details.
+ */
+
+qcom,mlp466076_3250mah_averaged_masterslave_jun15th2018 {
+	/* #mlp466076_3250mAh_averaged_MasterSlave_Jun15th2018 */
+	qcom,max-voltage-uv = <4400000>;
+	qcom,nom-batt-capacity-mah = <3250>;
+	qcom,fastchg-current-ma = <6000000>;
+	qcom,jeita-fcc-ranges = <0  150  650000
+				151 450  4875000
+				451 550  1625000>;
+	qcom,jeita-fv-ranges =  <0   150 4150000
+				151 450 4400000
+				451 550 4150000>;
+	qcom,batt-id-kohm = <133>;
+	qcom,battery-beta = <4250>;
+	qcom,fg-cc-cv-threshold-mv = <4390>;
+	qcom,battery-type = "mlp466076_3250mah_jun15th2018";
+	qcom,checksum = <0x8905>;
+	qcom,gui-version = "PM660GUI - 0.0.0.45";
+	qcom,fg-profile-data = [
+		5E 21 D2 0D
+		E3 0B 04 05
+		EC 1C 8B 01
+		4F 05 31 03
+		80 18 D2 22
+		C2 45 73 52
+		90 00 00 00
+		13 00 00 00
+		00 00 82 C3
+		A3 CC 92 BC
+		2F 00 08 00
+		14 DA CE E5
+		B0 04 41 02
+		C5 F4 C4 12
+		0C 07 3F 32
+		2B 06 09 20
+		27 00 14 00
+		4C 20 E0 04
+		1A 0B A1 05
+		C4 1C E7 02
+		3E 0C 02 12
+		9D 18 4C 23
+		DC 44 15 5A
+		70 00 00 00
+		10 00 00 00
+		00 00 F6 07
+		1D CB 02 B4
+		20 00 00 00
+		5B E3 CE E5
+		C8 05 54 01
+		A6 06 BD FB
+		35 F4 47 23
+		C5 33 CC FF
+		07 10 00 00
+		38 0D 66 46
+		20 00 40 00
+		61 01 0A FA
+		FF 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+	];
+};
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass-msm8909.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass-msm8909.dtsi
new file mode 100644
index 0000000..fe9094f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass-msm8909.dtsi
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	pcm0: qcom,msm-pcm {
+		compatible = "qcom,msm-pcm-dsp";
+		qcom,msm-pcm-dsp-id = <0>;
+	};
+
+	routing: qcom,msm-pcm-routing {
+		compatible = "qcom,msm-pcm-routing";
+	};
+
+	compr: qcom,msm-compr-dsp {
+		compatible = "qcom,msm-compr-dsp";
+	};
+
+	pcm1: qcom,msm-pcm-low-latency {
+		compatible = "qcom,msm-pcm-dsp";
+		qcom,msm-pcm-dsp-id = <1>;
+		qcom,msm-pcm-low-latency;
+		qcom,latency-level = "regular";
+	};
+
+	pcm2: qcom,msm-ultra-low-latency {
+		compatible = "qcom,msm-pcm-dsp";
+		qcom,msm-pcm-dsp-id = <2>;
+		qcom,msm-pcm-low-latency;
+		qcom,latency-level = "ultra";
+	};
+
+	compress: qcom,msm-compress-dsp {
+		compatible = "qcom,msm-compress-dsp";
+	};
+
+	voip: qcom,msm-voip-dsp {
+		compatible = "qcom,msm-voip-dsp";
+	};
+
+	voice: qcom,msm-pcm-voice {
+		compatible = "qcom,msm-pcm-voice";
+		qcom,destroy-cvd;
+	};
+
+	stub_codec: qcom,msm-stub-codec {
+		compatible = "qcom,msm-stub-codec";
+	};
+
+	qcom,msm-dai-fe {
+		compatible = "qcom,msm-dai-fe";
+	};
+
+	afe: qcom,msm-pcm-afe {
+		compatible = "qcom,msm-pcm-afe";
+	};
+
+	loopback: qcom,msm-pcm-loopback {
+		compatible = "qcom,msm-pcm-loopback";
+	};
+
+	lsm: qcom,msm-lsm-client {
+		compatible = "qcom,msm-lsm-client";
+	};
+
+	qcom,msm-dai-q6 {
+		compatible = "qcom,msm-dai-q6";
+		bt_sco_rx: qcom,msm-dai-q6-bt-sco-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12288>;
+		};
+
+		bt_sco_tx: qcom,msm-dai-q6-bt-sco-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12289>;
+		};
+
+		int_fm_rx: qcom,msm-dai-q6-int-fm-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12292>;
+		};
+
+		int_fm_tx: qcom,msm-dai-q6-int-fm-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <12293>;
+		};
+
+		afe_pcm_rx: qcom,msm-dai-q6-be-afe-pcm-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <224>;
+		};
+
+		afe_pcm_tx: qcom,msm-dai-q6-be-afe-pcm-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <225>;
+		};
+
+		afe_proxy_rx: qcom,msm-dai-q6-afe-proxy-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <241>;
+		};
+
+		afe_proxy_tx: qcom,msm-dai-q6-afe-proxy-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <240>;
+		};
+
+		incall_record_rx: qcom,msm-dai-q6-incall-record-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32771>;
+		};
+
+		incall_record_tx: qcom,msm-dai-q6-incall-record-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32772>;
+		};
+
+		incall_music_rx: qcom,msm-dai-q6-incall-music-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32773>;
+		};
+
+		incall_music_2_rx: qcom,msm-dai-q6-incall-music-2-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32770>;
+		};
+
+		usb_audio_rx: qcom,msm-dai-q6-usb-audio-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <28672>;
+		};
+
+		usb_audio_tx: qcom,msm-dai-q6-usb-audio-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <28673>;
+		};
+	};
+
+	hostless: qcom,msm-pcm-hostless {
+		compatible = "qcom,msm-pcm-hostless";
+	};
+
+};
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index 9467297..13e5187 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -304,6 +304,7 @@
 
 	audio_apr: qcom,msm-audio-apr {
 		compatible = "qcom,msm-audio-apr";
+		qcom,subsys-name = "apr_adsp";
 	};
 
 	dai_pri_auxpcm: qcom,msm-pri-auxpcm {
diff --git a/arch/arm64/boot/dts/qcom/msm8909-audio-bg_codec.dtsi b/arch/arm64/boot/dts/qcom/msm8909-audio-bg_codec.dtsi
index f2cea32..fa6498e 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-audio-bg_codec.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-audio-bg_codec.dtsi
@@ -9,8 +9,15 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+#include "msm-audio-lpass-msm8909.dtsi"
 
 &soc {
+	qcom,msm-audio-apr {
+		compatible = "qcom,msm-audio-apr";
+		msm_audio_apr_dummy {
+			compatible = "qcom,msm-audio-apr-dummy";
+		};
+	};
 	audio_codec_bg: sound {
 		status = "disabled";
 		compatible = "qcom,msm-bg-audio-codec";
@@ -28,6 +35,8 @@
 		qcom,msm-ext-pa = "primary";
 		qcom,tdm-audio-intf;
 		qcom,msm-afe-clk-ver = <1>;
+		qcom,split-a2dp;
+		qcom,pri-mi2s-gpios = <&cdc_dmic_gpios>;
 		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
 				<&loopback>, <&compress>, <&hostless>,
 				<&afe>, <&lsm>, <&routing>, <&lpa>,
@@ -69,7 +78,12 @@
 		asoc-codec = <&stub_codec>;
 		asoc-codec-names = "msm-stub-codec.1";
 	};
-
+	cdc_dmic_gpios: cdc_dmic_pinctrl {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&quat_mi2s_active &quat_mi2s_din_active>;
+		pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_din_sleep>;
+	};
 	pri_tdm_rx: qcom,msm-dai-tdm-pri-rx {
 		compatible = "qcom,msm-dai-tdm";
 		qcom,msm-cpudai-tdm-group-id = <37120>;
@@ -77,49 +91,32 @@
 		qcom,msm-cpudai-tdm-group-port-id = <36864 36866 36868 36870>;
 		qcom,msm-cpudai-tdm-clk-rate = <0>;
 		qcom,msm-cpudai-tdm-afe-ebit-unsupported;
+		qcom,msm-cpudai-tdm-clk-internal = <0>;
+		qcom,msm-cpudai-tdm-sync-mode = <0>;
+		qcom,msm-cpudai-tdm-sync-src = <0>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <0>;
 		qcom,msm-cpudai-tdm-sec-port-enable;
 		qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
-		pinctrl-names = "default", "sleep";
-		pinctrl-0 = <&quat_mi2s_active &quat_mi2s_din_active>;
-		pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_din_sleep>;
 		dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36864>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <0>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <0>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 		dai_pri_tdm_rx_1: qcom,msm-dai-q6-tdm-pri-rx-1 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36866>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <0>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <0>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 		dai_pri_tdm_rx_2: qcom,msm-dai-q6-tdm-pri-rx-2 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36868>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <0>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <0>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 		dai_pri_tdm_rx_3: qcom,msm-dai-q6-tdm-pri-rx-3 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36870>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <0>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <0>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -131,49 +128,32 @@
 		qcom,msm-cpudai-tdm-group-port-id = <36865 36867 36869 36871>;
 		qcom,msm-cpudai-tdm-clk-rate = <0>;
 		qcom,msm-cpudai-tdm-afe-ebit-unsupported;
+		qcom,msm-cpudai-tdm-clk-internal = <0>;
+		qcom,msm-cpudai-tdm-sync-mode = <0>;
+		qcom,msm-cpudai-tdm-sync-src = <0>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <0>;
 		qcom,msm-cpudai-tdm-sec-port-enable;
 		qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
-		pinctrl-names = "default", "sleep";
-		pinctrl-0 = <&quat_mi2s_active &quat_mi2s_din_active>;
-		pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_din_sleep>;
 		dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36865>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <0>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <0>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 		dai_pri_tdm_tx_1: qcom,msm-dai-q6-tdm-pri-tx-1 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36867>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <0>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <0>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 		dai_pri_tdm_tx_2: qcom,msm-dai-q6-tdm-pri-tx-2 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36869>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <0>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <0>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 		dai_pri_tdm_tx_3: qcom,msm-dai-q6-tdm-pri-tx-3 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36871>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <0>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <0>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -187,6 +167,7 @@
 		status = "disabled";
 		compatible = "qcom,bg-codec";
 		qcom,subsys-name = "modem";
+		qcom,bg-speaker-connected;
 		qcom,bg-glink {
 			compatible = "qcom,bg-cdc-glink";
 			qcom,msm-glink-channels = <4>;
diff --git a/arch/arm64/boot/dts/qcom/msm8909-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8909-coresight.dtsi
index 4e7d6f8..9d35b06 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-coresight.dtsi
@@ -1,413 +1,694 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
+ * it under the terms of the GNU General Public License version 2 an
  * only version 2 as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
  */
 
 &soc {
 	tmc_etr: tmc@826000 {
-		compatible = "arm,coresight-tmc";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
 		reg = <0x826000 0x1000>,
 		      <0x884000 0x15000>;
 		reg-names = "tmc-base", "bam-base";
+
 		interrupts = <0 166 0>;
 		interrupt-names = "byte-cntr-irq";
 
-		qcom,memory-size = <0x100000>;
-		qcom,sg-enable;
+		arm,buffer-size = <0x100000>;
+		arm,sg-enable;
 
-		coresight-id = <0>;
 		coresight-name = "coresight-tmc-etr";
-		coresight-nr-inports = <1>;
 		coresight-ctis = <&cti0 &cti8>;
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	tpiu: tpiu@820000 {
-		compatible = "arm,coresight-tpiu";
-		reg = <0x820000 0x1000>,
-		      <0x1100000 0xb0000>;
-		reg-names = "tpiu-base", "nidnt-base";
-
-		coresight-id = <1>;
-		coresight-name = "coresight-tpiu";
-		coresight-nr-inports = <1>;
-
-		qcom,nidnthw;
-		qcom,nidnt-swduart;
-		qcom,nidnt-swdtrc;
-		qcom,nidnt-jtag;
-		qcom,nidnt-spmi;
-		nidnt-gpio = <38>;
-		nidnt-gpio-polarity = <1>;
-
-		interrupts = <0 82 0>;
-		interrupt-names = "nidnt-irq";
-
-		vdd-supply = <&pm8909_l11>;
-		qcom,vdd-voltage-level = <2950000 2950000>;
-		qcom,vdd-current-level = <15000 400000>;
-
-		vdd-io-supply = <&pm8909_l12>;
-		qcom,vdd-io-voltage-level = <2950000 2950000>;
-		qcom,vdd-io-current-level = <200 50000>;
+		coresight-csr = <&csr>;
 
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	replicator: replicator@824000 {
-		compatible = "qcom,coresight-replicator";
-		reg = <0x824000 0x1000>;
-		reg-names = "replicator-base";
-
-		coresight-id = <2>;
-		coresight-name = "coresight-replicator";
-		coresight-nr-inports = <1>;
-		coresight-outports = <0 1>;
-		coresight-child-list = <&tmc_etr &tpiu>;
-		coresight-child-ports = <0 0>;
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+		port {
+			tmc_etr_in_replicator: endpoint {
+				slave-mode;
+				remote-endpoint = <&replicator_out_tmc_etr>;
+			};
+		};
 	};
 
 	tmc_etf: tmc@825000 {
-		compatible = "arm,coresight-tmc";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
 		reg = <0x825000 0x1000>;
 		reg-names = "tmc-base";
 
-		coresight-id = <3>;
 		coresight-name = "coresight-tmc-etf";
-		coresight-nr-inports = <1>;
-		coresight-outports = <0>;
-		coresight-child-list = <&replicator>;
-		coresight-child-ports = <0>;
-		coresight-default-sink;
+
+		arm,default-sink;
 		coresight-ctis = <&cti0 &cti8>;
+		coresight-csr = <&csr>;
 
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				tmc_etf_out_replicator:endpoint {
+					remote-endpoint =
+						<&replicator_in_tmc_etf>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tmc_etf_in_funnel_in0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_in0_out_tmc_etf>;
+				};
+			};
+		};
+	};
+
+	replicator: replicator@824000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b909>;
+
+		reg = <0x824000 0x1000>;
+		reg-names = "replicator-base";
+
+		coresight-name = "coresight-replicator";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				replicator_out_tmc_etr: endpoint {
+					remote-endpoint =
+						<&tmc_etr_in_replicator>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				replicator_in_tmc_etf: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tmc_etf_out_replicator>;
+				};
+			};
+		};
 	};
 
 	funnel_in0: funnel@821000 {
-		compatible = "arm,coresight-funnel";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
 		reg = <0x821000 0x1000>;
 		reg-names = "funnel-base";
 
-		coresight-id = <4>;
 		coresight-name = "coresight-funnel-in0";
-		coresight-nr-inports = <8>;
-		coresight-outports = <0>;
-		coresight-child-list = <&tmc_etf>;
-		coresight-child-ports = <0>;
+
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				funnel_in0_out_tmc_etf: endpoint {
+					remote-endpoint =
+						<&tmc_etf_in_funnel_in0>;
+				};
+			};
+			port@1 {
+				reg = <7>;
+				funnel_in0_in_stm: endpoint {
+					slave-mode;
+					remote-endpoint = <&stm_out_funnel_in0>;
+				};
+			};
+
+			port@2 {
+				reg = <3>;
+				funnel_in0_in_funnel_center: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_center_out_funnel_in0>;
+				};
+			};
+
+			port@3 {
+				reg = <4>;
+				funnel_in0_in_funnel_right: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_right_out_funnel_in0>;
+				};
+			};
+
+			port@4 {
+
+				reg = <0>;
+				funnel_in0_in_rpm_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&rpm_etm0_out_funnel_center>;
+				};
+			};
+
+			port@5 {
+				reg = <1>;
+				funnel_in0_in_modem_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&modem_etm0_out_funnel_right>;
+				};
+			};
+		};
 	};
 
-	funnel_in2: funnel@869000 {
-		compatible = "arm,coresight-funnel";
+	funnel_center: funnel@869000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
 		reg = <0x869000 0x1000>;
 		reg-names = "funnel-base";
 
-		coresight-id = <5>;
-		coresight-name = "coresight-funnel-in2";
-		coresight-nr-inports = <8>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_in0>;
-		coresight-child-ports = <6>;
+		coresight-name = "coresight-funnel-center";
+
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_center_out_funnel_in0: endpoint {
+					remote-endpoint =
+						<&funnel_in0_in_funnel_center>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				funnel_center_in_dbgui: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&dbgui_out_funnel_center>;
+				};
+			};
+		};
 	};
 
-	funnel_in3: funnel@868000 {
-		compatible = "arm,coresight-funnel";
+	funnel_right: funnel@868000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
 		reg = <0x868000 0x1000>;
 		reg-names = "funnel-base";
 
-		coresight-id = <6>;
-		coresight-name = "coresight-funnel-in3";
-		coresight-nr-inports = <2>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_in2>;
-		coresight-child-ports = <7>;
+		coresight-name = "coresight-funnel-right";
+
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_right_out_funnel_in0: endpoint {
+					remote-endpoint =
+						<&funnel_in0_in_funnel_right>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				funnel_right_in_funnel_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					       <&funnel_apss_out_funnel_right>;
+				};
+			};
+
+			port@3 {
+				reg = <0>;
+				funnel_right_in_wcn_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&wcn_etm0_out_funnel_mm>;
+				};
+			};
+		};
 	};
 
-	cti0: cti@810000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x810000 0x1000>;
-		reg-names = "cti-base";
+	funnel_apss: funnel@855000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
 
-		coresight-id = <7>;
-		coresight-name = "coresight-cti0";
-		coresight-nr-inports = <0>;
+		reg = <0x855000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-apss";
+
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_apss_out_funnel_right: endpoint {
+					remote-endpoint =
+						<&funnel_right_in_funnel_apss>;
+				};
+			};
+
+			port@1 {
+				reg = <4>;
+				funnel_apss0_in_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm0_out_funnel_apss0>;
+				};
+			};
+
+			port@2 {
+				reg = <5>;
+				funnel_apss0_in_etm1: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm1_out_funnel_apss0>;
+				};
+			};
+
+			port@3 {
+				reg = <6>;
+				funnel_apss0_in_etm2: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm2_out_funnel_apss0>;
+					};
+			};
+
+			port@4 {
+				reg = <7>;
+				funnel_apss0_in_etm3: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm3_out_funnel_apss0>;
+				};
+			};
+
+		};
+
 	};
 
-	cti1: cti@811000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x811000 0x1000>;
-		reg-names = "cti-base";
+	tpiu: tpiu@820000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b912>;
+		coresight-name = "coresight-tpiu";
 
-		coresight-id = <8>;
-		coresight-name = "coresight-cti1";
-		coresight-nr-inports = <0>;
+		reg = <0x820000 0x1000>,
+		      <0x1100000 0xb0000>;
+		reg-names = "tpiu-base", "nidnt-base";
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
 	};
 
-	cti2: cti@812000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x812000 0x1000>;
-		reg-names = "cti-base";
+	etm0: etm@84c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b955>;
 
-		coresight-id = <9>;
-		coresight-name = "coresight-cti2";
-		coresight-nr-inports = <0>;
+		reg = <0x84c000 0x1000>;
+		cpu = <&CPU0>;
+		coresight-name = "coresight-etm0";
+
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+
+		port {
+			etm0_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm0>;
+			};
+		};
 	};
 
-	cti3: cti@813000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x813000 0x1000>;
-		reg-names = "cti-base";
+	etm1: etm@84d000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b955>;
 
-		coresight-id = <10>;
-		coresight-name = "coresight-cti3";
-		coresight-nr-inports = <0>;
+		reg = <0x84d000 0x1000>;
+		cpu = <&CPU1>;
+		coresight-name = "coresight-etm1";
+
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+
+		port {
+			etm1_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm1>;
+			};
+		};
 	};
 
-	cti4: cti@814000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x814000 0x1000>;
-		reg-names = "cti-base";
+	etm2: etm@84e000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b955>;
 
-		coresight-id = <11>;
-		coresight-name = "coresight-cti4";
-		coresight-nr-inports = <0>;
+		reg = <0x84e000 0x1000>;
+		cpu = <&CPU2>;
+		coresight-name = "coresight-etm2";
+
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+
+		port {
+			etm2_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm2>;
+			};
+		};
 	};
 
-	cti5: cti@815000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x815000 0x1000>;
-		reg-names = "cti-base";
+	etm3: etm@84f000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b955>;
 
-		coresight-id = <12>;
-		coresight-name = "coresight-cti5";
-		coresight-nr-inports = <0>;
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	cti6: cti@816000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x816000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <13>;
-		coresight-name = "coresight-cti6";
-		coresight-nr-inports = <0>;
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-
-		qcom,cti-gpio-trigout = <2>;
-		pinctrl-names = "cti-trigout-pctrl";
-		pinctrl-0 = <&trigout_a0>;
-	};
-
-	cti7: cti@817000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x817000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <14>;
-		coresight-name = "coresight-cti7";
-		coresight-nr-inports = <0>;
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	cti8: cti@818000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x818000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <15>;
-		coresight-name = "coresight-cti8";
-		coresight-nr-inports = <0>;
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	cti_cpu0: cti@851000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x851000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <16>;
-		coresight-name = "coresight-cti-cpu0";
-		coresight-nr-inports = <0>;
-		coresight-cti-cpu = <&CPU0>;
-
-		qcom,cti-save;
+		reg = <0x84f000 0x1000>;
+		coresight-name = "coresight-etm3";
+		cpu = <&CPU3>;
 
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
+		clock-names = "apb_pclk";
 
-	cti_cpu1: cti@852000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x852000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <17>;
-		coresight-name = "coresight-cti-cpu1";
-		coresight-nr-inports = <0>;
-		coresight-cti-cpu = <&CPU1>;
-
-		qcom,cti-save;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	cti_cpu2: cti@853000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x853000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <18>;
-		coresight-name = "coresight-cti-cpu2";
-		coresight-nr-inports = <0>;
-		coresight-cti-cpu = <&CPU2>;
-
-		qcom,cti-save;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	cti_cpu3: cti@854000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x854000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <19>;
-		coresight-name = "coresight-cti-cpu3";
-		coresight-nr-inports = <0>;
-		coresight-cti-cpu = <&CPU3>;
-
-		qcom,cti-save;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	cti_rpm_cpu0: cti@83c000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x83c000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <20>;
-		coresight-name = "coresight-cti-rpm-cpu0";
-		coresight-nr-inports = <0>;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	cti_modem_cpu0: cti@838000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x838000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <21>;
-		coresight-name = "coresight-cti-modem-cpu0";
-		coresight-nr-inports = <0>;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	cti_wcn_cpu0: cti@835000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x835000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <22>;
-		coresight-name = "coresight-cti-wcn-cpu0";
-		coresight-nr-inports = <0>;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	cti_video_cpu0: cti@830000 {
-		compatible = "arm,coresight-cti";
-		reg = <0x830000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-id = <23>;
-		coresight-name = "coresight-cti-video-cpu0";
-		coresight-nr-inports = <0>;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		port {
+			etm3_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm3>;
+			};
+		};
 	};
 
 	stm: stm@802000 {
-		compatible = "arm,coresight-stm";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b962>;
+
 		reg = <0x802000 0x1000>,
 		      <0x9280000 0x180000>;
-		reg-names = "stm-base", "stm-data-base";
+		reg-names = "stm-base", "stm-stimulus-base";
 
-		coresight-id = <24>;
 		coresight-name = "coresight-stm";
-		coresight-nr-inports = <0>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_in0>;
-		coresight-child-ports = <7>;
+
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+
+		port {
+			stm_out_funnel_in0: endpoint {
+				remote-endpoint = <&funnel_in0_in_stm>;
+			};
+		};
+	};
+
+	cti0: cti@810000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x810000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti0";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti1: cti@811000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x811000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti1";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti2: cti@812000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x812000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti2";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti3: cti@813000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x813000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti3";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti4: cti@814000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x814000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti4";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti5: cti@815000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x815000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti5";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti6: cti@816000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x816000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti6";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti7: cti@817000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x817000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti7";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti8: cti@818000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x818000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti8";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu0: cti@851000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x851000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu0";
+		cpu = <&CPU0>;
+		qcom,cti-save;
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu1: cti@852000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x852000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu1";
+		cpu = <&CPU1>;
+		qcom,cti-save;
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu2: cti@853000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x853000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu2";
+		cpu = <&CPU2>;
+		qcom,cti-save;
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu3: cti@854000{
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x854000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-cpu3";
+		cpu = <&CPU3>;
+		qcom,cti-save;
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_modem_cpu0: cti@838000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x838000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-modem-cpu0";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* Venus CTI */
+	cti_video_cpu0: cti@830000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x830000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-video-cpu0";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* Pronto CTI */
+	cti_wcn_cpu0: cti@835000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x835000 0x1000>;
+		reg-names = "cti-base";
+		coresight-name = "coresight-cti-wcn-cpu0";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	wcn_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-wcn-etm0";
+		qcom,inst-id = <3>;
+
+		port {
+			wcn_etm0_out_funnel_mm: endpoint {
+				remote-endpoint = <&funnel_right_in_wcn_etm0>;
+			};
+		};
+	};
+
+	/* MSS_SCL */
+	modem_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-modem-etm0";
+		qcom,inst-id = <11>;
+
+		port {
+			modem_etm0_out_funnel_right: endpoint {
+				remote-endpoint = <&funnel_in0_in_modem_etm0>;
+			};
+		};
+	};
+
+	rpm_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-rpm-etm0";
+		qcom,inst-id = <4>;
+
+		port {
+			rpm_etm0_out_funnel_center: endpoint {
+				remote-endpoint = <&funnel_in0_in_rpm_etm0>;
+			};
+		};
 	};
 
 	csr: csr@801000 {
@@ -415,209 +696,52 @@
 		reg = <0x801000 0x1000>;
 		reg-names = "csr-base";
 
-		coresight-id = <25>;
 		coresight-name = "coresight-csr";
-		coresight-nr-inports = <0>;
 
+		qcom,usb-bam-support;
+		qcom,hwctrl-set-support;
+		qcom,set-byte-cntr-support;
 		qcom,blk-size = <1>;
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	funnel_apss: funnel@855000 {
-		compatible = "arm,coresight-funnel";
-		reg = <0x855000 0x1000>;
-		reg-names = "funnel-base";
-
-		coresight-id = <26>;
-		coresight-name = "coresight-funnel-apss";
-		coresight-nr-inports = <4>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_in0>;
-		coresight-child-ports = <4>;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			<&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	etm0: etm@84c000 {
-		compatible = "arm,coresight-etm";
-		reg = <0x84c000 0x1000>;
-		reg-names = "etm-base";
-
-		coresight-id = <27>;
-		coresight-name = "coresight-etm0";
-		coresight-nr-inports = <0>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_apss>;
-		coresight-child-ports = <0>;
-		coresight-etm-cpu = <&CPU0>;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	etm1: etm@84d000 {
-		compatible = "arm,coresight-etm";
-		reg = <0x84d000 0x1000>;
-		reg-names = "etm-base";
-
-		coresight-id = <28>;
-		coresight-name = "coresight-etm1";
-		coresight-nr-inports = <0>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_apss>;
-		coresight-child-ports = <1>;
-		coresight-etm-cpu = <&CPU1>;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	etm2: etm@84e000 {
-		compatible = "arm,coresight-etm";
-		reg = <0x84e000 0x1000>;
-		reg-names = "etm-base";
-
-		coresight-id = <29>;
-		coresight-name = "coresight-etm2";
-		coresight-nr-inports = <0>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_apss>;
-		coresight-child-ports = <2>;
-		coresight-etm-cpu = <&CPU2>;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	etm3: etm@84f000 {
-		compatible = "arm,coresight-etm";
-		reg = <0x84f000 0x1000>;
-		reg-names = "etm-base";
-
-		coresight-id = <30>;
-		coresight-name = "coresight-etm3";
-		coresight-nr-inports = <0>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_apss>;
-		coresight-child-ports = <3>;
-		coresight-etm-cpu = <&CPU3>;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	hwevent: hwevent@86c000 {
-		compatible = "qcom,coresight-hwevent";
-		reg = <0x86c000 0x108>,
-		      <0x86cfb0 0x4>,
-		      <0x78c5010 0x4>,
-		      <0x7885010 0x4>;
-		reg-names = "wrapper-mux", "wrapper-lockaccess", "usbbam-mux",
-				"blsp-mux";
-		coresight-id = <31>;
-		coresight-name = "coresight-hwevent";
-		coresight-nr-inports = <0>;
-
-		clocks = <&clock_rpm clk_qdss_clk>,
-			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
-	};
-
-	rpm_etm0 {
-		compatible = "qcom,coresight-remote-etm";
-
-		coresight-id = <32>;
-		coresight-name = "coresight-rpm-etm0";
-		coresight-nr-inports = <0>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_in0>;
-		coresight-child-ports = <0>;
-
-		qcom,inst-id = <4>;
-	};
-
-	wcn_etm0 {
-		compatible = "qcom,coresight-remote-etm";
-
-		coresight-id = <33>;
-		coresight-name = "coresight-wcn-etm0";
-		coresight-nr-inports = <0>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_in3>;
-		coresight-child-ports = <0>;
-
-		qcom,inst-id = <3>;
-	};
-
-	modem_etm0 {
-		compatible = "qcom,coresight-remote-etm";
-
-		coresight-id = <34>;
-		coresight-name = "coresight-modem-etm0";
-		coresight-nr-inports = <0>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_in0>;
-		coresight-child-ports = <2>;
-
-		qcom,inst-id = <2>;
-	};
-
-	fuse: fuse@5e01c {
-		compatible = "arm,coresight-fuse-v2";
-		reg = <0x5e01c 0x8>,
-		      <0x58040 0x4>,
-		      <0x5e00c 0x4>;
-		reg-names = "fuse-base", "nidnt-fuse-base", "qpdi-fuse-base";
-
-		coresight-id = <35>;
-		coresight-name = "coresight-fuse";
-		coresight-nr-inports = <0>;
-	};
-
-	qpdi: qpdi@1941000 {
-		compatible = "qcom,coresight-qpdi";
-		reg = <0x1941000 0x4>;
-		reg-names = "qpdi-base";
-
-		coresight-id = <36>;
-		coresight-name = "coresight-qpdi";
-		coresight-nr-inports = <0>;
-
-		vdd-supply = <&pm8909_l11>;
-		qcom,vdd-voltage-level = <2800000 2950000>;
-		qcom,vdd-current-level = <15000 400000>;
-
-		vdd-io-supply = <&pm8909_l12>;
-		qcom,vdd-io-voltage-level = <1800000 2950000>;
-		qcom,vdd-io-current-level = <200 50000>;
 	};
 
 	dbgui: dbgui@86d000 {
 		compatible = "qcom,coresight-dbgui";
 		reg = <0x86d000 0x1000>;
 		reg-names = "dbgui-base";
-
-		coresight-id = <37>;
 		coresight-name = "coresight-dbgui";
-		coresight-nr-inports = <0>;
-		coresight-outports = <0>;
-		coresight-child-list = <&funnel_in2>;
-		coresight-child-ports = <2>;
 
 		qcom,dbgui-addr-offset = <0x30>;
-		qcom,dbgui-data-offset = <0xB0>;
-		qcom,dbgui-size = <32>;
+		qcom,dbgui-data-offset = <0x130>;
+		qcom,dbgui-size = <64>;
 
 		clocks = <&clock_rpm clk_qdss_clk>,
 			 <&clock_rpm clk_qdss_a_clk>;
-		clock-names = "core_clk", "core_a_clk";
+		clock-names = "apb_pclk";
+
+		port {
+			dbgui_out_funnel_center: endpoint {
+				remote-endpoint = <&funnel_center_in_dbgui>;
+			};
+		};
 	};
+
+	hwevent: hwevent@86c000 {
+		compatible = "qcom,coresight-hwevent";
+
+		reg = <0x86c000 0x108>,
+		      <0x86cfb0 0x4>,
+		      <0x78c5010 0x4>,
+		      <0x7885010 0x4>;
+
+		reg-names = "center-wrapper-mux", "center-wrapper-lockaccess",
+				"right-wrapper-mux", "right-wrapper-lockaccess";
+
+		coresight-csr = <&csr>;
+		coresight-name = "coresight-hwevent";
+
+		clocks = <&clock_rpm clk_qdss_clk>,
+			 <&clock_rpm clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
index dc95570..96d9ea7 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
@@ -19,26 +19,15 @@
 	/* To use BIMC based bus governor */
 	gpubw: qcom,gpubw {
 		compatible = "qcom,devbw";
-		governor = "bw_hwmon";
+		governor = "bw_vbif";
 		qcom,src-dst-ports = <26 512>;
 		qcom,bw-tbl =
 			<    0 >,	/*   9.6 MHz */
-			<  381 >,	/*  50.0 MHz */
-			<  762 >,	/* 100.0 MHz */
 			< 1525 >,	/* 200.0 MHz */
 			< 3051 >,	/* 400.0 MHz */
 			< 4066 >;	/* 533.0 MHz */
 	};
 
-	qcom,gpu-bwmon@410000 {
-		compatible = "qcom,bimc-bwmon2";
-		reg = <0x00410000 0x300>, <0x00401000 0x200>;
-		reg-names = "base", "global_base";
-		interrupts = <0 183 4>;
-		qcom,mport = <2>;
-		qcom,target-dev = <&gpubw>;
-	};
-
 	msm_gpu: qcom,kgsl-3d0@01c00000 {
 		label = "kgsl-3d0";
 		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
@@ -92,6 +81,9 @@
 		qcom,pm-qos-active-latency = <701>;
 		qcom,pm-qos-wakeup-latency = <701>;
 
+		/* Enable gpu cooling device */
+		#cooling-cells = <2>;
+
 		/* Power levels */
 		qcom,gpu-pwrlevels {
 			#address-cells = <1>;
@@ -103,24 +95,32 @@
 				reg = <0>;
 				qcom,gpu-freq = <456000000>;
 				qcom,bus-freq = <3>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <3>;
 			};
 
 			qcom,gpu-pwrlevel@1 {
 				reg = <1>;
 				qcom,gpu-freq = <307200000>;
 				qcom,bus-freq = <2>;
+				qcom,bus-min = <2>;
+				qcom,bus-max = <3>;
 			};
 
 			qcom,gpu-pwrlevel@2 {
 				reg = <2>;
 				qcom,gpu-freq = <200000000>;
-				qcom,bus-freq = <1>;
+				qcom,bus-freq = <2>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <2>;
 			};
 
 			qcom,gpu-pwrlevel@3 {
 				reg = <3>;
 				qcom,gpu-freq = <19200000>;
 				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
 			};
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/msm8909-ion.dtsi b/arch/arm64/boot/dts/qcom/msm8909-ion.dtsi
index 858429b..cd21e6e 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-ion.dtsi
@@ -33,12 +33,6 @@
 			qcom,ion-heap-type = "DMA";
 		};
 
-		modem_heap: qcom,ion-heap@26 { /* MODEM HEAP */
-			reg = <26>;
-			memory-region = <&modem_adsp_mem>;
-			qcom,ion-heap-type = "DMA";
-		};
-
 		qcom,ion-heap@19 { /* QSEECOM TA HEAP */
 			reg = <19>;
 			memory-region = <&qseecom_ta_mem>;
diff --git a/arch/arm64/boot/dts/qcom/msm8909-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8909-mtp.dtsi
index 18fc575..e2fbc9e 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-mtp.dtsi
@@ -410,27 +410,3 @@
 		};
 	};
 };
-
-/* CoreSight */
-&tpiu {
-	pinctrl-names = "sdcard", "trace", "swduart",
-			"swdtrc", "jtag", "spmi";
-	/* NIDnT */
-	pinctrl-0 = <&qdsd_clk_sdcard &qdsd_cmd_sdcard
-		     &qdsd_data0_sdcard &qdsd_data1_sdcard
-		     &qdsd_data2_sdcard &qdsd_data3_sdcard>;
-	pinctrl-1 = <&qdsd_clk_trace &qdsd_cmd_trace
-		     &qdsd_data0_trace &qdsd_data1_trace
-		     &qdsd_data2_trace &qdsd_data3_trace>;
-	pinctrl-2 = <&qdsd_cmd_swduart &qdsd_data0_swduart
-		     &qdsd_data1_swduart &qdsd_data2_swduart
-		     &qdsd_data3_swduart>;
-	pinctrl-3 = <&qdsd_clk_swdtrc &qdsd_cmd_swdtrc
-		     &qdsd_data0_swdtrc &qdsd_data1_swdtrc
-		     &qdsd_data2_swdtrc &qdsd_data3_swdtrc>;
-	pinctrl-4 = <&qdsd_cmd_jtag &qdsd_data0_jtag
-		     &qdsd_data1_jtag &qdsd_data2_jtag
-		     &qdsd_data3_jtag>;
-	pinctrl-5 = <&qdsd_clk_spmi &qdsd_cmd_spmi
-		     &qdsd_data0_spmi &qdsd_data3_spmi>;
-};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi
index c86da64..743cf1c 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi
@@ -545,6 +545,33 @@
 			};
 		};
 
+		imu {
+			imu_int_active: imu_int_active{
+				mux {
+					pins = "gpio12";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio12";
+					drive-strength = <6>;
+					bias-pull-up;
+				};
+			};
+
+			imu_int_suspend: imu_int_suspend{
+				mux {
+					pins = "gpio12";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio12";
+					drive-strength = <6>;
+					bias-pull-up;
+				};
+			};
+
+		};
+
 		nfc {
 			nfcw_int_active: nfcw_int_active {
 				mux {
@@ -1890,6 +1917,34 @@
 			};
 		};
 
+		cdc_reset_ctrl {
+			cdc_reset_sleep: cdc_reset_sleep {
+				mux {
+					pins = "gpio27";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio27";
+					drive-strength = <16>;
+					bias-disable;
+					output-low;
+				};
+			};
+			cdc_reset_active:cdc_reset_active {
+				mux {
+					pins = "gpio27";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio27"; /* gpio67 old */
+					drive-strength = <16>;
+					bias-pull-down;
+					output-high;
+				};
+			};
+		};
+
+
 		cdc-dmic-lines {
 			cdc_dmic0_clk_act: dmic0_clk_on {
 				mux {
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-som.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-som.dtsi
new file mode 100644
index 0000000..310b53a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-som.dtsi
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&i2c_3 {
+	status = "ok";
+};
+
+&i2c_3 {
+	qcom,camera@0 {
+		cell-index = <0>;
+		compatible = "qcom,camera";
+		reg = <0x2>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		cam_vdig-supply = <&pm8916_l2>;
+		cam_vana-supply = <&pm8916_l17>;
+		cam_vio-supply = <&pm8916_l6>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+		qcom,cam-vreg-min-voltage = <1200000 1800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 1800000 2850000>;
+		qcom,cam-vreg-op-mode = <200000 0 80000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+		gpios = <&msm_gpio 26 0>,
+			<&msm_gpio 35 0>,
+			<&msm_gpio 34 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET",
+			"CAM_STANDBY";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+
+	qcom,camera@1 {
+		cell-index = <1>;
+		compatible = "qcom,camera";
+		reg = <0x1>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <1>;
+		qcom,mount-angle = <90>;
+		cam_vana-supply = <&pm8916_l17>;
+		cam_vio-supply = <&pm8916_l6>;
+		qcom,cam-vreg-name = "cam_vio","cam_vana";
+		qcom,cam-vreg-min-voltage = <1800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1800000 2850000>;
+		qcom,cam-vreg-op-mode = <0 80000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+		gpios = <&msm_gpio 26 0>,
+			<&msm_gpio 91 0>;
+		qcom,gpio-reset = <1>;
+
+		qcom,gpio-req-tbl-num = <0 1>;
+		qcom,gpio-req-tbl-flags = <1 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot.dtsi
new file mode 100644
index 0000000..6f6655a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot.dtsi
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&i2c_3 {
+	status = "ok";
+};
+
+&i2c_3 {
+	qcom,camera@0 {
+		cell-index = <0>;
+		compatible = "qcom,camera";
+		reg = <0x2>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		cam_vdig-supply = <&pm8916_l2>;
+		cam_vana-supply = <&pm8916_l17>;
+		cam_vio-supply = <&pm8916_l6>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+		qcom,cam-vreg-min-voltage = <1200000 1800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 1800000 2850000>;
+		qcom,cam-vreg-op-mode = <200000 0 80000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+		gpios = <&msm_gpio 26 0>,
+			<&msm_gpio 35 0>,
+			<&msm_gpio 34 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET",
+			"CAM_STANDBY";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+
+	qcom,camera@1 {
+		cell-index = <1>;
+		compatible = "qcom,camera";
+		reg = <0x1>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		cam_vana-supply = <&pm8916_l17>;
+		cam_vio-supply = <&pm8916_l6>;
+		qcom,cam-vreg-name = "cam_vio","cam_vana";
+		qcom,cam-vreg-min-voltage = <1800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1800000 2850000>;
+		qcom,cam-vreg-op-mode = <0 80000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+		gpios = <&msm_gpio 26 0>,
+			<&msm_gpio 35 0>,
+			<&msm_gpio 34 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET",
+			"CAM_STANDBY";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera.dtsi
new file mode 100644
index 0000000..0b648ec
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera.dtsi
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,msm-cam@1800000{
+		compatible = "qcom,msm-cam";
+		reg = <0x1b00000 0x40000>;
+		reg-names = "msm-cam";
+		status = "ok";
+		bus-vectors = "suspend", "svs", "nominal", "turbo";
+		qcom,bus-votes = <0 320000000 640000000 640000000>;
+	};
+
+	qcom,csiphy@1b0ac00 {
+		cell-index = <0>;
+		compatible = "qcom,csiphy-v3.1", "qcom,csiphy";
+		reg = <0x1b0ac00 0x200>,
+			<0x1b00030 0x4>;
+		reg-names = "csiphy", "csiphy_clk_mux";
+		interrupts = <0 78 0>;
+		interrupt-names = "csiphy";
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_csi0phytimer_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0phytimer_clk>,
+			<&clock_gcc clk_camss_top_ahb_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0phy_clk>,
+			<&clock_gcc clk_gcc_camss_csi1phy_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ahb_src", "csi0_phy_clk", "csi1_phy_clk",
+			"camss_ahb_clk";
+		qcom,clock-rates = <0 0 200000000 0 0 0 0 0>;
+	};
+
+	qcom,csid@1b08000  {
+		cell-index = <0>;
+		compatible = "qcom,csid-v3.1", "qcom,csid";
+		reg = <0x1b08000 0x100>;
+		reg-names = "csid";
+		interrupts = <0 49 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8916_l2>;
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi0_ahb_clk>,
+			<&clock_gcc clk_csi0_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0_clk>,
+			<&clock_gcc clk_gcc_camss_csi0pix_clk>,
+			<&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "ispif_ahb_clk", "camss_top_ahb_clk",
+			"csi_ahb_clk", "csi_src_clk",
+			"csi_clk",  "csi_pix_clk",
+			"csi_rdi_clk", "camss_ahb_clk";
+		qcom,clock-rates = <40000000 0 0 200000000 0 0 0 0>;
+	};
+
+	qcom,csid@1b08400 {
+		cell-index = <1>;
+		compatible = "qcom,csid-v3.1", "qcom,csid";
+		reg = <0x1b08400 0x100>;
+		reg-names = "csid";
+		interrupts = <0 50 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8916_l2>;
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi1_ahb_clk>,
+			<&clock_gcc clk_csi1_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1_clk>,
+			<&clock_gcc clk_gcc_camss_csi1pix_clk>,
+			<&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi1phy_clk>;
+		clock-names = "ispif_ahb_clk", "camss_top_ahb_clk",
+			"csi_ahb_clk", "csi_src_clk",
+			"csi_clk", "csi_pix_clk",
+			"csi_rdi_clk", "camss_ahb_clk", "camss_csi1_phy";
+		qcom,clock-rates = <40000000 0 0 200000000 0 0 0 0 0>;
+	};
+
+	qcom,ispif@1b0a000 {
+		cell-index = <0>;
+		compatible = "qcom,ispif";
+		reg = <0x1b0a000 0x500>,
+			<0x1b00020 0x10>;
+		reg-names = "ispif", "csi_clk_mux";
+		interrupts = <0 51 0>;
+		interrupt-names = "ispif";
+		qcom,num-isps = <0x1>;
+		vfe0_vdd_supply = <&gdsc_vfe>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+
+			<&clock_gcc clk_csi0_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0_clk>,
+			<&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+			<&clock_gcc clk_gcc_camss_csi0pix_clk>,
+			<&clock_gcc clk_csi1_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1_clk>,
+			<&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+			<&clock_gcc clk_gcc_camss_csi1pix_clk>,
+			<&clock_gcc clk_vfe0_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe0_clk>;
+
+		clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+			"csi0_src_clk", "csi0_clk",
+			"csi0_rdi_clk", "csi0_pix_clk",
+			"csi1_src_clk", "csi1_clk",
+			"csi1_rdi_clk", "csi1_pix_clk",
+			"vfe0_clk_src", "camss_vfe_vfe0_clk",
+			"camss_csi_vfe0_clk";
+		qcom,clock-rates = <0 40000000
+			200000000 0 0 0
+			200000000 0 0 0
+			0 0 0>;
+		qcom,clock-control = "NO_SET_RATE", "SET_RATE",
+			"SET_RATE", "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+			"SET_RATE", "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+			"INIT_RATE", "NO_SET_RATE", "NO_SET_RATE";
+	};
+
+	qcom,vfe@1b10000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe32";
+		reg = <0x1b10000 0x830>,
+			<0x1b40000 0x200>;
+		reg-names = "vfe", "vfe_vbif";
+		interrupts = <0 52 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe>;
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_vfe0_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_vfe_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_vfe_axi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>;
+		clock-names = "camss_top_ahb_clk", "vfe_clk_src",
+			"camss_vfe_vfe_clk", "camss_csi_vfe_clk", "iface_clk",
+			"bus_clk", "camss_ahb_clk", "ispif_ahb_clk";
+		qcom,clock-rates = <40000000 266670000 0 0 0 0 0 0>;
+
+		qos-entries = <8>;
+		qos-regs = <0x7BC 0x7C0 0x7C4 0x7C8 0x7CC 0x7D0
+				0x7D4 0x798>;
+		qos-settings = <0xAAA5AAA5 0xAAA5AAA5 0xAAA5AAA5
+				0xAAA5AAA5 0xAAA5AAA5 0xAAA5AAA5
+				0xAAA5AAA5 0x00010000>;
+		vbif-entries = <1>;
+		vbif-regs = <0x04>;
+		vbif-settings = <0x1>;
+		ds-entries = <15>;
+		ds-regs = <0x7D8 0x7DC 0x7E0 0x7E4 0x7E8
+			0x7EC 0x7F0 0x7F4 0x7F8 0x7FC 0x800
+			0x804 0x808 0x80C 0x810>;
+		ds-settings = <0xCCCC1111 0xCCCC1111 0xCCCC1111
+				0xCCCC1111 0xCCCC1111 0xCCCC1111
+				0xCCCC1111 0xCCCC1111 0xCCCC1111
+				0xCCCC1111 0xCCCC1111 0xCCCC1111
+				0xCCCC1111 0xCCCC1111 0x00000103>;
+
+		bus-util-factor = <1024>;
+	};
+
+	qcom,cam_smmu {
+		status = "ok";
+		compatible = "qcom,msm-cam-smmu";
+		msm_cam_smmu_cb1: msm_cam_smmu_cb1 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_iommu 0x400 0x00>;
+			label = "vfe";
+			qcom,scratch-buf-support;
+		};
+	};
+
+	qcom,irqrouter@1b00000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,irqrouter";
+		reg = <0x1b00000 0x100>;
+		reg-names = "irqrouter";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8909-regulator.dtsi
index 7197f88..70d4939 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-regulator.dtsi
@@ -48,6 +48,12 @@
 			qcom,init-voltage = <1>;
 			qcom,use-voltage-corner;
 		};
+		pm8909_cx_cdev: regulator-cx-cdev {
+			compatible = "qcom,regulator-cooling-device";
+			regulator-cdev-supply = <&pm8909_s1_floor_corner>;
+			regulator-levels = <5 1>;
+			#cooling-cells = <2>;
+		};
 	};
 
 	rpm-regulator-smpa2 {
diff --git a/arch/arm64/boot/dts/qcom/msm8909-thermal.dtsi b/arch/arm64/boot/dts/qcom/msm8909-thermal.dtsi
new file mode 100644
index 0000000..21c393b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-thermal.dtsi
@@ -0,0 +1,477 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+&soc {
+	qmi-tmd-devices {
+		compatible = "qcom,qmi_cooling_devices";
+
+		modem {
+			qcom,instance-id = <0x0>;
+
+			modem_pa: modem_pa {
+				qcom,qmi-dev-name = "pa";
+				#cooling-cells = <2>;
+			};
+
+			modem_proc: modem_proc {
+				qcom,qmi-dev-name = "modem";
+				#cooling-cells = <2>;
+			};
+
+			modem_current: modem_current {
+				qcom,qmi-dev-name = "modem_current";
+				#cooling-cells = <2>;
+			};
+
+			modem_vdd: modem_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+	};
+};
+
+&thermal_zones {
+	mdm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 0>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	camera-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 1>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	gpu-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	cpu0-2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 3>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	cpu1-3-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 4>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pop-mem-step {
+		polling-delay-passive = <250>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 0>;
+		thermal-governor = "step_wise";
+		trips {
+			pop_trip: pop-trip {
+				temperature = <75000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			pop_cdev0 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev1 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev2 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev3 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+
+	gpu-step {
+		polling-delay-passive = <250>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 2>;
+		thermal-governor = "step_wise";
+		trips {
+			gpu_step_trip: gpu-step-trip {
+				temperature = <80000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			gpu_cdev0 {
+				trip = <&gpu_step_trip>;
+				cooling-device =
+					<&msm_gpu THERMAL_NO_LIMIT
+						THERMAL_NO_LIMIT>;
+			};
+		};
+	};
+
+	cpu0-2-step {
+		polling-delay-passive = <65>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 3>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu0_2_step_trip: cpu0-2-step-trip {
+				temperature = <85000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&cpu0_2_step_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu1_cdev {
+				trip = <&cpu0_2_step_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu2_cdev {
+				trip = <&cpu0_2_step_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu3_cdev {
+				trip = <&cpu0_2_step_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+
+	cpu1-3-step {
+		polling-delay-passive = <65>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 4>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu1_3_step_trip: cpu1-3-step-trip {
+				temperature = <85000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&cpu1_3_step_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu1_cdev {
+				trip = <&cpu1_3_step_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu2_cdev {
+				trip = <&cpu1_3_step_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu3_cdev {
+				trip = <&cpu1_3_step_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+	mdm-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 0>;
+		tracks-low;
+		trips {
+			mdm_trip: mdm-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device =
+					<&CPU0 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu1_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device =
+					<&CPU1 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu2_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device =
+					<&CPU2 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu3_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device =
+					<&CPU3 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&pm8909_cx_cdev 0 0>;
+			};
+		};
+	};
+	camera-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 1>;
+		tracks-low;
+		trips {
+			camera_trip: camera-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device =
+					<&CPU0 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu1_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device =
+					<&CPU1 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu2_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device =
+					<&CPU2 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu3_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device =
+					<&CPU3 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&pm8909_cx_cdev 0 0>;
+			};
+		};
+	};
+	gpu-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 2>;
+		tracks-low;
+		trips {
+			gpu_trip: gpu-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&gpu_trip>;
+				cooling-device =
+					<&CPU0 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu1_vdd_cdev {
+				trip = <&gpu_trip>;
+				cooling-device =
+					<&CPU1 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu2_vdd_cdev {
+				trip = <&gpu_trip>;
+				cooling-device =
+					<&CPU2 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu3_vdd_cdev {
+				trip = <&gpu_trip>;
+				cooling-device =
+					<&CPU3 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&gpu_trip>;
+				cooling-device = <&pm8909_cx_cdev 0 0>;
+			};
+		};
+	};
+	cpu0-2-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 3>;
+		tracks-low;
+		trips {
+			cpu0_2_trip: cpu0-2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu0_2_trip>;
+				cooling-device =
+					<&CPU0 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu1_vdd_cdev {
+				trip = <&cpu0_2_trip>;
+				cooling-device =
+					<&CPU1 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu2_vdd_cdev {
+				trip = <&cpu0_2_trip>;
+				cooling-device =
+					<&CPU2 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu3_vdd_cdev {
+				trip = <&cpu0_2_trip>;
+				cooling-device =
+					<&CPU3 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu0_2_trip>;
+				cooling-device = <&pm8909_cx_cdev 0 0>;
+			};
+		};
+	};
+	cpu1-3-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 4>;
+		tracks-low;
+		trips {
+			cpu1_3_trip: cpu1-3-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu1_3_trip>;
+				cooling-device =
+					<&CPU0 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu1_vdd_cdev {
+				trip = <&cpu1_3_trip>;
+				cooling-device =
+					<&CPU1 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu2_vdd_cdev {
+				trip = <&cpu1_3_trip>;
+				cooling-device =
+					<&CPU2 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu3_vdd_cdev {
+				trip = <&cpu1_3_trip>;
+				cooling-device =
+					<&CPU3 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu1_3_trip>;
+				cooling-device = <&pm8909_cx_cdev 0 0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-vidc.dtsi b/arch/arm64/boot/dts/qcom/msm8909-vidc.dtsi
new file mode 100644
index 0000000..5eeaa21
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-vidc.dtsi
@@ -0,0 +1,163 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,vidc@1d00000 {
+		compatible = "qcom,msm-vidc";
+		reg = <0x01d00000 0xff000>;
+		interrupts = <0 44 0>;
+		qcom,hfi-version = "3xx";
+		venus-supply = <&gdsc_venus>;
+		venus-core0-supply = <&gdsc_venus_core0>;
+		clocks = <&clock_gcc clk_gcc_venus0_vcodec0_clk>,
+			<&clock_gcc clk_gcc_venus0_core0_vcodec0_clk>,
+			<&clock_gcc clk_gcc_venus0_ahb_clk>,
+			<&clock_gcc clk_gcc_venus0_axi_clk>;
+		clock-names = "core_clk", "core0_clk", "iface_clk", "bus_clk";
+		qcom,clock-configs = <0x1 0x0 0x0 0x0>;
+		qcom,sw-power-collapse;
+		qcom,slave-side-cp;
+		qcom,hfi = "venus";
+		qcom,reg-presets = <0xe0020 0x05555556>,
+			<0xe0024 0x05555556>,
+			<0x80124 0x00000003>;
+		qcom,qdss-presets = <0x826000 0x1000>,
+			<0x827000 0x1000>,
+			<0x822000 0x1000>,
+			<0x803000 0x1000>,
+			<0x9180000 0x1000>,
+			<0x9181000 0x1000>;
+		qcom,max-hw-load = <244800>; /* 1080p@30 + 720p@30 */
+		qcom,firmware-name = "venus";
+		qcom,allowed-clock-rates = <307200000 266670000 133330000>;
+		qcom,clock-freq-tbl {
+			qcom,profile-enc {
+				qcom,codec-mask = <0x55555555>;
+				qcom,cycles-per-mb = <2316>;
+				qcom,low-power-mode-factor = <32768>;
+			};
+			qcom,profile-dec {
+				qcom,codec-mask = <0xf3ffffff>;
+				qcom,cycles-per-mb = <788>;
+			};
+			qcom,profile-hevcdec {
+				qcom,codec-mask = <0x0c000000>;
+				qcom,cycles-per-mb = <1015>;
+			};
+		};
+
+		/* MMUs */
+		non_secure_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_ns";
+			iommus = <&apps_iommu 0x800 0x00>,
+				<&apps_iommu 0x807 0x00>,
+				<&apps_iommu 0x808 0x27>,
+				<&apps_iommu 0x811 0x0>;
+			buffer-types = <0xfff>;
+			virtual-addr-pool = <0x5dc00000 0x8f000000>;
+		};
+
+		secure_bitstream_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_bitstream";
+			iommus = <&apps_iommu 0x90c 0x20>;
+			buffer-types = <0x241>;
+			virtual-addr-pool = <0x4b000000 0x12c00000>;
+			qcom,secure-context-bank;
+		};
+
+		secure_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_pixel";
+			iommus = <&apps_iommu 0x900 0x00>,
+				<&apps_iommu 0x909 0x20>,
+				<&apps_iommu 0x90a 0x00>,
+				<&apps_iommu 0x90b 0x20>,
+				<&apps_iommu 0x90e 0x00>;
+			buffer-types = <0x106>;
+			virtual-addr-pool = <0x25800000 0x25800000>;
+			qcom,secure-context-bank;
+		};
+
+		secure_non_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_non_pixel";
+			iommus = <&apps_iommu 0x9c0 0x00>,
+				<&apps_iommu 0x907 0x00>,
+				<&apps_iommu 0x908 0x20>,
+				<&apps_iommu 0x90d 0x20>,
+				<&apps_iommu 0x90f 0x00>;
+			buffer-types = <0x480>;
+			virtual-addr-pool = <0x1000000 0x24800000>;
+			qcom,secure-context-bank;
+		};
+
+		/* Buses */
+		venus_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "venus-ddr-gov";
+			qcom,bus-range-kbps = <1000 917000>;
+		};
+
+		arm9_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-arm9-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1 1>;
+		};
+	};
+
+	venus-ddr-gov {
+		compatible = "qcom,msm-vidc,governor,table";
+		name = "venus-ddr-gov";
+		status = "ok";
+		qcom,bus-freq-table {
+			qcom,profile-enc {
+				qcom,codec-mask = <0x55555555>;
+				qcom,load-busfreq-tbl =
+					<244800 841000>,   /* 1080p30E   */
+					<216000 740000>,   /* 720p60E    */
+					<194400 680000>,   /* FWVGA120E  */
+					<144000 496000>,   /* VGA120E    */
+					<108000 370000>,   /* 720p30E    */
+					<97200  340000>,   /* FWVGA60E   */
+					<48600  170000>,   /* FWVGA30E   */
+					<72000  248000>,   /* VGA60E     */
+					<36000  124000>,   /* VGA30E     */
+					<18000  70000>,    /* QVGA60E    */
+					<9000   35000>,    /* QVGA30E    */
+					<0      0>;
+			};
+			qcom,profile-dec {
+				qcom,codec-mask = <0xffffffff>;
+				qcom,load-busfreq-tbl =
+					<244800 605000>,   /* 1080p30D   */
+					<216000 540000>,   /* 720p60D    */
+					<194400 484000>,   /* FWVGA120D  */
+					<144000 360000>,   /* VGA120D    */
+					<108000 270000>,   /* 720p30D    */
+					<97200  242000>,   /* FWVGA60D   */
+					<48600  121000>,   /* FWVGA30D   */
+					<72000  180000>,   /* VGA60D     */
+					<36000  90000>,    /* VGA30D     */
+					<18000  45000>,    /* HVGA30D    */
+					<0      0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909.dtsi b/arch/arm64/boot/dts/qcom/msm8909.dtsi
index a6d42f1..c48756c 100644
--- a/arch/arm64/boot/dts/qcom/msm8909.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909.dtsi
@@ -77,7 +77,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			qcom,sleep-status = <&cpu0_slp_sts>;
-			qcom,limits-info = <&mitigation_profile0>;
+			#cooling-cells = <2>;
 		};
 
 		CPU1: cpu@1 {
@@ -87,7 +87,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			qcom,sleep-status = <&cpu1_slp_sts>;
-			qcom,limits-info = <&mitigation_profile2>;
+			#cooling-cells = <2>;
 		};
 
 		CPU2: cpu@2 {
@@ -97,7 +97,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			qcom,sleep-status = <&cpu2_slp_sts>;
-			qcom,limits-info = <&mitigation_profile1>;
+			#cooling-cells = <2>;
 		};
 
 		CPU3: cpu@3 {
@@ -107,7 +107,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			qcom,sleep-status = <&cpu3_slp_sts>;
-			qcom,limits-info = <&mitigation_profile2>;
+			#cooling-cells = <2>;
 		};
 	};
 
@@ -239,6 +239,7 @@
 #include "msm8909-gpu.dtsi"
 #include "msm8909-coresight.dtsi"
 #include "msm8909-bus.dtsi"
+#include "msm8909-vidc.dtsi"
 #include "msm8909-mdss.dtsi"
 #include "msm8909-mdss-pll.dtsi"
 
@@ -356,6 +357,7 @@
 		reg = <0x1800000 0x80000>,
 		      <0xb016000 0x00040>;
 		reg-names = "cc_base", "apcs_base";
+		qcom,gfx3d_clk_src-opp-store-vcorner = <&msm_gpu>;
 		vdd_dig-supply = <&pm8909_s1_corner>;
 		vdd_sr2_dig-supply = <&pm8909_s1_corner_ao>;
 		vdd_sr2_pll-supply = <&pm8909_l7_ao>;
@@ -558,77 +560,16 @@
 
 	};
 
-	qcom,sensor-information {
-		compatible = "qcom,sensor-information";
-		sensor_information0: qcom,sensor-information-0 {
-			qcom,sensor-type = "tsens";
-			qcom,sensor-name = "tsens_tz_sensor0";
-			qcom,alias-name = "pop_mem";
-		};
-
-		sensor_information1: qcom,sensor-information-1 {
-			qcom,sensor-type =  "tsens";
-			qcom,sensor-name = "tsens_tz_sensor1";
-		};
-
-		sensor_information2: qcom,sensor-information-2 {
-			qcom,sensor-type =  "tsens";
-			qcom,sensor-name = "tsens_tz_sensor2";
-		};
-
-		sensor_information3: qcom,sensor-information-3 {
-			qcom,sensor-type =  "tsens";
-			qcom,sensor-name = "tsens_tz_sensor3";
-		};
-
-		sensor_information4: qcom,sensor-information-4 {
-			qcom,sensor-type = "tsens";
-			qcom,sensor-name = "tsens_tz_sensor4";
-		};
-
-		sensor_information5: qcom,sensor-information-5 {
-			qcom,sensor-type = "adc";
-			qcom,sensor-name = "pa_therm0";
-		};
-
-		sensor_information6: qcom,sensor-information-6 {
-			qcom,sensor-type = "adc";
-			qcom,sensor-name = "case_therm";
-		};
-
-		sensor_information7: qcom,sensor-information-7 {
-			qcom,sensor-type = "alarm";
-			qcom,sensor-name = "pm8909_tz";
-			qcom,scaling-factor = <1000>;
-		};
-
-		sensor_information8: qcom,sensor-information-8 {
-			qcom,sensor-type = "adc";
-			qcom,sensor-name = "xo_therm";
-		};
-
-		sensor_information9: qcom,sensor-information-9 {
-			qcom,sensor-type = "adc";
-			qcom,sensor-name = "xo_therm_buf";
-		};
-	};
-
-	mitigation_profile0: qcom,limit_info-0 {
-		qcom,temperature-sensor = <&sensor_information3>;
-		qcom,boot-frequency-mitigate;
-		qcom,emergency-frequency-mitigate;
-	};
-
-	mitigation_profile1: qcom,limit_info-1 {
-		qcom,temperature-sensor = <&sensor_information3>;
-		qcom,boot-frequency-mitigate;
-		qcom,hotplug-mitigation-enable;
-	};
-
-	mitigation_profile2: qcom,limit_info-2 {
-		qcom,temperature-sensor = <&sensor_information4>;
-		qcom,boot-frequency-mitigate;
-		qcom,hotplug-mitigation-enable;
+	tsens0: tsens@4a8000 {
+		compatible = "qcom,msm8909-tsens";
+		reg = <0x4a8000 0x1000>,
+			<0x4a9000 0x1000>,
+			<0x5c000  0x1000>;
+		reg-names = "tsens_srot_physical",
+			"tsens_tm_physical", "tsens_eeprom_physical";
+		interrupts = <0 184 0>;
+		interrupt-names = "tsens-upper-lower";
+		#thermal-sensor-cells = <1>;
 	};
 
 	qcom,ipc-spinlock@1905000 {
@@ -1411,7 +1352,7 @@
 		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
 			 <&clock_gcc clk_gcc_blsp1_qup1_i2c_apps_clk>;
 		clock-names = "iface_clk", "core_clk";
-		qcom,clk-freq-out = <100000>;
+		qcom,clk-freq-out = <400000>;
 		qcom,clk-freq-in  = <19200000>;
 		pinctrl-names = "i2c_active", "i2c_sleep";
 		pinctrl-0 = <&i2c_1_active>;
@@ -1947,19 +1888,6 @@
 		memory-region = <&modem_adsp_mem>;
 	};
 
-	mcd {
-		compatible = "qcom,mcd";
-		qcom,ce-hw-instance = <0>;
-		qcom,ce-device = <0>;
-		clocks = <&clock_gcc clk_crypto_clk_src>,
-			 <&clock_gcc clk_gcc_crypto_clk>,
-			 <&clock_gcc clk_gcc_crypto_ahb_clk>,
-			 <&clock_gcc clk_gcc_crypto_axi_clk>;
-		clock-names = "core_clk_src", "core_clk",
-				"iface_clk", "bus_clk";
-		qcom,ce-opp-freq = <100000000>;
-	};
-
 	cpu0_slp_sts: cpu-sleep-status@b088008 {
 		reg = <0xb088008 0x100>;
 		qcom,sleep-status-mask= <0x80000>;
@@ -2015,3 +1943,4 @@
 	clocks = <&clock_gcc clk_gcc_oxili_gfx3d_clk>;
 	status = "okay";
 };
+#include "msm8909-thermal.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts b/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts
index 9dd80f0..255c146 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts
+++ b/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts
@@ -54,6 +54,8 @@
 			synaptics,power-delay-ms = <200>;
 			synaptics,reset-delay-ms = <200>;
 			synaptics,max-y-for-2d = <389>;
+			synaptics,bus-lpm-cur-uA = <450>;
+			synaptics,do-not-disable-regulators;
 			synaptics,wakeup-gestures-en;
 			synaptics,resume-in-workqueue;
 			synaptics,x-flip;
@@ -71,6 +73,7 @@
 
 	qcom,blackghost {
 		compatible = "qcom,pil-blackghost";
+		qcom,pil-force-shutdown;
 		qcom,firmware-name = "bg-wear";
 		/* GPIO inputs from blackghost */
 		qcom,bg2ap-status-gpio = <&msm_gpio 97 0>;
@@ -221,6 +224,7 @@
 			     &nfcw_disable_active
 			     &nfc_clk_default>;
 		pinctrl-1 = <&nfcw_int_suspend &nfcw_disable_suspend>;
+		clocks = <&clock_rpm clk_bb_clk3_pin>;
 		clock-names = "ref_clk";
 	};
 };
@@ -241,6 +245,33 @@
 	status = "disabled";
 };
 
+&sdc1_clk_off {
+	config {
+		pins = "sdc1_clk";
+		bias-disable; /* NO pull */
+		drive-strength = <2>; /* 2 MA */
+		output-low;
+	};
+};
+
+&sdc1_cmd_off {
+	config {
+		pins = "sdc1_cmd";
+		bias-disable; /* NO pull */
+		drive-strength = <2>; /* 2 MA */
+		output-low;
+	};
+};
+
+&sdc1_data_off {
+	config {
+		pins = "sdc1_data";
+		bias-disable; /* NO pull */
+		drive-strength = <2>; /* 2 MA */
+		output-low;
+	};
+};
+
 &sdhc_2 {
 	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
index 512b0fb..ecf28c5 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
@@ -52,6 +52,13 @@
 			qcom,init-voltage = <1>;
 			qcom,use-voltage-corner;
 		};
+
+		pm660_cx_cdev: regulator-cx-cdev {
+			compatible = "qcom,regulator-cooling-device";
+			regulator-cdev-supply = <&pm660_s2_floor_corner>;
+			regulator-levels = <5 1>;
+			#cooling-cells = <2>;
+		};
 	};
 
 	/* MX supply */
diff --git a/arch/arm64/boot/dts/qcom/msm8909w.dtsi b/arch/arm64/boot/dts/qcom/msm8909w.dtsi
index c2e28d1..7229564 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909w.dtsi
@@ -105,3 +105,127 @@
 	qcom,platform-reset-gpio = <&msm_gpio 25 0>;
 	qcom,platform-bklight-en-gpio = <&msm_gpio 37 0>;
 };
+
+&thermal_zones {
+	gpu-step {
+		trips {
+			gpu-step-trip {
+				temperature = <70000>;
+			};
+		};
+	};
+	cpu0-2-step {
+		trips {
+			cpu0-2-step-trip {
+				temperature = <60000>;
+			};
+		};
+	};
+	cpu1-3-step {
+		trips {
+			cpu1-3-step-trip {
+				temperature = <60000>;
+			};
+		};
+	};
+
+	case-therm-step {
+		polling-delay-passive = <5000>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm660_adc_tm 0x51>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu_freq_trip: cpu-freq-trip {
+				temperature = <40000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			modem_mon_trip0: modem-mon-trip0 {
+				temperature = <47000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			modem_mon_trip1: modem-mon-trip1 {
+				temperature = <55000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			cpu1_hotplug_trip: cpu1-hotplug-trip {
+				temperature = <49000>;
+				hysteresis = <3000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&cpu_freq_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT 2>;
+			};
+			cpu1_cdev {
+				trip = <&cpu_freq_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT 2>;
+			};
+			cpu2_cdev {
+				trip = <&cpu_freq_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT 2>;
+			};
+			cpu3_cdev {
+				trip = <&cpu_freq_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT 2>;
+			};
+			modem_lvl1 {
+				trip = <&modem_mon_trip0>;
+				cooling-device = <&modem_pa 2 2>;
+			};
+			modem_lvl2 {
+				trip = <&modem_mon_trip1>;
+				cooling-device = <&modem_pa 3 3>;
+			};
+			hotplug_cpu1_cdev {
+				trip = <&cpu1_hotplug_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+	mdm-lowf {
+		cooling-maps {
+			cx_vdd_cdev {
+				cooling-device = <&pm660_cx_cdev 0 0>;
+			};
+		};
+	};
+	camera-lowf {
+		cooling-maps {
+			cx_vdd_cdev {
+				cooling-device = <&pm660_cx_cdev 0 0>;
+			};
+		};
+	};
+	gpu-lowf {
+		cooling-maps {
+			cx_vdd_cdev {
+				cooling-device = <&pm660_cx_cdev 0 0>;
+			};
+		};
+	};
+	cpu0-2-lowf {
+		cooling-maps {
+			cx_vdd_cdev {
+				cooling-device = <&pm660_cx_cdev 0 0>;
+			};
+		};
+	};
+	cpu1-3-lowf {
+		cooling-maps {
+			cx_vdd_cdev {
+				cooling-device = <&pm660_cx_cdev 0 0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8916-regulator.dtsi
index 0313ebd..36a67af 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-regulator.dtsi
@@ -141,6 +141,12 @@
 			qcom,use-voltage-floor-corner;
 			qcom,always-send-voltage;
 		};
+		pm8916_cx_cdev: regulator-cx-cdev {
+			compatible = "qcom,regulator-cooling-device";
+			regulator-cdev-supply = <&pm8916_s1_floor_corner>;
+			regulator-levels = <5 1>;
+			#cooling-cells = <2>;
+		};
 	};
 
 	rpm-regulator-smpa3 {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-audio-cdp.dtsi b/arch/arm64/boot/dts/qcom/msm8917-audio-cdp.dtsi
new file mode 100644
index 0000000..465859a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-audio-cdp.dtsi
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&int_codec {
+	status = "okay";
+	qcom,msm-hs-micbias-type = "external";
+
+	asoc-wsa-codec-names = "wsa881x-i2c-codec.2-000f";
+	asoc-wsa-codec-prefixes = "SpkrMono";
+
+	msm-vdd-wsa-switch-supply = <&pm8937_l13>;
+	qcom,msm-vdd-wsa-switch-voltage = <3075000>;
+	qcom,msm-vdd-wsa-switch-current = <5000>;
+};
+
+&wsa881x_i2c_f {
+	status = "okay";
+};
+
+&wsa881x_i2c_45 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-camera-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8917-camera-pinctrl.dtsi
new file mode 100644
index 0000000..33cc8ae
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-camera-pinctrl.dtsi
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+cci {
+	cci0_active: cci0_active {
+		/* cci0 active state */
+		mux {
+			/* CLK, DATA */
+			pins = "gpio29", "gpio30";
+			function = "cci_i2c";
+		};
+
+		config {
+			pins = "gpio29", "gpio30";
+			drive-strength = <2>; /* 2 MA */
+			bias-disable; /* No PULL */
+		};
+	};
+
+	cci0_suspend: cci0_suspend {
+		/* cci0 suspended state */
+		mux {
+			/* CLK, DATA */
+			pins = "gpio29", "gpio30";
+			function = "cci_i2c";
+		};
+
+		config {
+			pins = "gpio29", "gpio30";
+			drive-strength = <2>; /* 2 MA */
+			bias-disable; /* No PULL */
+		};
+	};
+
+	cci1_active: cci1_active {
+		/* cci1 active state */
+		mux {
+			/* CLK, DATA */
+			pins = "gpio31", "gpio32";
+			function = "cci_i2c";
+		};
+
+		config {
+			pins = "gpio31", "gpio32";
+			drive-strength = <2>; /* 2 MA */
+			bias-disable; /* No PULL */
+		};
+	};
+
+	cci1_suspend: cci1_suspend {
+		/* cci1 suspended state */
+		mux {
+			/* CLK, DATA */
+			pins = "gpio31", "gpio32";
+			function = "cci_i2c";
+		};
+
+		config {
+			pins = "gpio31", "gpio32";
+			drive-strength = <2>; /* 2 MA */
+			bias-disable; /* No PULL */
+		};
+	};
+};
+
+/*sensors */
+cam_sensor_mclk0_default: cam_sensor_mclk0_default {
+	/* MCLK0 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio26";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio26";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_mclk0_sleep: cam_sensor_mclk0_sleep {
+	/* MCLK0 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio26";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio26";
+		bias-pull-down; /* PULL DOWN */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_rear_default: cam_sensor_rear_default {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio36", "gpio35";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio36","gpio35";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_rear_sleep: cam_sensor_rear_sleep {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio36","gpio35";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio36","gpio35";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_rear_vdig: cam_sensor_rear_vdig {
+	/* VDIG */
+	mux {
+		pins = "gpio62";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio62";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_rear_vdig_sleep: cam_sensor_rear_vdig_sleep {
+	/* VDIG */
+	mux {
+		pins = "gpio62";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio62";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_mclk1_default: cam_sensor_mclk1_default {
+	/* MCLK1 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio27";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio27";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_mclk1_sleep: cam_sensor_mclk1_sleep {
+	/* MCLK1 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio27";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio27";
+		bias-pull-down; /* PULL DOWN */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_front_default: cam_sensor_front_default {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio38","gpio50";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio38","gpio50";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_front_sleep: cam_sensor_front_sleep {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio38","gpio50";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio38","gpio50";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_mclk2_default: cam_sensor_mclk2_default {
+	/* MCLK2 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio28";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio28";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_mclk2_sleep: cam_sensor_mclk2_sleep {
+	/* MCLK2 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio28";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio28";
+		bias-pull-down; /* PULL DOWN */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_front1_default: cam_sensor_front1_default {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio40", "gpio39";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio40", "gpio39";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_front1_sleep: cam_sensor_front1_sleep {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio40", "gpio39";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio40", "gpio39";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8917-camera-sensor-qrd.dtsi
new file mode 100644
index 0000000..dbaccfa
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-camera-sensor-qrd.dtsi
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&cci {
+	actuator0: qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		qcom,cci-master = <0>;
+		cam_vaf-supply = <&pm8937_l17>;
+		qcom,cam-vreg-name = "cam_vaf";
+		qcom,cam-vreg-min-voltage = <2850000>;
+		qcom,cam-vreg-max-voltage = <2850000>;
+		qcom,cam-vreg-op-mode = <80000>;
+	};
+
+	eeprom0: qcom,eeprom@0 {
+		cell-index = <0>;
+		compatible = "qcom,eeprom";
+		qcom,cci-master = <0>;
+		reg = <0x0>;
+		cam_vdig-supply = <&pm8937_l23>;
+		cam_vana-supply = <&pm8937_l22>;
+		cam_vio-supply = <&pm8937_l6>;
+		cam_vaf-supply = <&pm8937_l17>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+							"cam_vaf";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-op-mode = <200000 0 80000 100000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+		gpios = <&tlmm 26 0>,
+			<&tlmm 36 0>,
+			<&tlmm 35 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+			"CAM_RESET0",
+			"CAM_STANDBY0";
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <19200000 0>;
+	};
+
+	eeprom2: qcom,eeprom@2 {
+		cell-index = <2>;
+		compatible = "qcom,eeprom";
+		reg = <0x02>;
+		cam_vdig-supply = <&pm8937_l23>;
+		cam_vana-supply = <&pm8937_l22>;
+		cam_vio-supply = <&pm8937_l6>;
+		cam_vaf-supply = <&pm8937_l17>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+					"cam_vaf";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-op-mode = <105000 0 80000 100000>;
+		qcom,gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_default
+				&cam_sensor_front1_default>;
+		pinctrl-1 = <&cam_sensor_mclk2_sleep
+				&cam_sensor_front1_sleep>;
+		gpios = <&tlmm 28 0>,
+			<&tlmm 40 0>,
+			<&tlmm 39 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+					  "CAM_RESET2",
+					  "CAM_STANDBY2";
+		qcom,cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk2_clk_src>,
+			<&clock_gcc clk_gcc_camss_mclk2_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <19200000 0>;
+	};
+
+	qcom,camera@0 {
+		cell-index = <0>;
+		compatible = "qcom,camera";
+		reg = <0x0>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		qcom,actuator-src = <&actuator0>;
+		qcom,led-flash-src = <&led_flash0>;
+		qcom,eeprom-src = <&eeprom0>;
+		cam_vdig-supply = <&pm8937_l23>;
+		cam_vana-supply = <&pm8937_l22>;
+		cam_vio-supply = <&pm8937_l6>;
+		cam_vaf-supply = <&pm8937_l17>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+							"cam_vaf";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-op-mode = <200000 0 80000 100000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep
+				&cam_sensor_rear_sleep>;
+		gpios = <&tlmm 26 0>,
+			<&tlmm 36 0>,
+			<&tlmm 35 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+			"CAM_RESET0",
+			"CAM_STANDBY0";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		qcom,cci-master = <0>;
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+			<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+
+	qcom,camera@2 {
+		cell-index = <2>;
+		compatible = "qcom,camera";
+		reg = <0x02>;
+		qcom,csiphy-sd-index = <1>;
+		qcom,csid-sd-index = <1>;
+		qcom,eeprom-src = <&eeprom2>;
+		qcom,mount-angle = <270>;
+		cam_vdig-supply = <&pm8937_l23>;
+		cam_vana-supply = <&pm8937_l22>;
+		cam_vio-supply = <&pm8937_l6>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000>;
+		qcom,cam-vreg-op-mode = <105000 0 80000>;
+		qcom,gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_default
+				&cam_sensor_front1_default>;
+		pinctrl-1 = <&cam_sensor_mclk2_sleep
+				&cam_sensor_front1_sleep>;
+		gpios = <&tlmm 28 0>,
+			<&tlmm 40 0>,
+			<&tlmm 39 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+					  "CAM_RESET2",
+					  "CAM_STANDBY2";
+		qcom,sensor-position = <1>;
+		qcom,sensor-mode = <0>;
+		qcom,cci-master = <0>;
+		clocks = <&clock_gcc clk_mclk2_clk_src>,
+			<&clock_gcc clk_gcc_camss_mclk2_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-camera.dtsi b/arch/arm64/boot/dts/qcom/msm8917-camera.dtsi
new file mode 100644
index 0000000..4991ff7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-camera.dtsi
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,msm-cam@1b00000 {
+		compatible = "qcom,msm-cam";
+		reg = <0x1b00000 0x40000>;
+		reg-names = "msm-cam";
+		status = "ok";
+		bus-vectors = "suspend", "svs", "nominal", "turbo";
+		qcom,bus-votes = <0 160000000 320000000 320000000>;
+	};
+
+	qcom,csiphy@1b34000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
+		reg = <0x1b34000 0x1000>,
+			<0x1b00030 0x4>;
+		reg-names = "csiphy", "csiphy_clk_mux";
+		interrupts = <0 78 0>;
+		interrupt-names = "csiphy";
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_csi0phytimer_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0phytimer_clk>,
+			<&clock_gcc clk_camss_top_ahb_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0phy_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ahb_src", "csi_phy_clk",
+			"camss_ahb_clk";
+		qcom,clock-rates = <0 61540000 200000000 0 0 0 0>;
+	};
+
+	qcom,csiphy@1b35000 {
+		status = "ok";
+		cell-index = <1>;
+		compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
+		reg = <0x1b35000 0x1000>,
+			<0x1b00038 0x4>;
+		reg-names = "csiphy", "csiphy_clk_mux";
+		interrupts = <0 79 0>;
+		interrupt-names = "csiphy";
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_csi1phytimer_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1phytimer_clk>,
+			<&clock_gcc clk_camss_top_ahb_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1phy_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ahb_src", "csi_phy_clk",
+			"camss_ahb_clk";
+		qcom,clock-rates = <0 61540000 200000000 0 0 0 0>;
+	};
+
+	qcom,csid@1b30000  {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,csid-v3.4.3", "qcom,csid";
+		reg = <0x1b30000 0x400>;
+		reg-names = "csid";
+		interrupts = <0 51 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8937_l2>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi0_ahb_clk>,
+			<&clock_gcc clk_csi0_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0_clk>,
+			<&clock_gcc clk_gcc_camss_csi0pix_clk>,
+			<&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_ahb_clk", "csi_src_clk",
+			"csi_clk",  "csi_pix_clk",
+			"csi_rdi_clk", "camss_ahb_clk";
+		qcom,clock-rates = <0 61540000 0 200000000 0 0 0 0>;
+	};
+
+	qcom,csid@1b30400 {
+		status = "ok";
+		cell-index = <1>;
+		compatible = "qcom,csid-v3.4.3", "qcom,csid";
+		reg = <0x1b30400 0x400>;
+		reg-names = "csid";
+		interrupts = <0 52 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8937_l2>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi1_ahb_clk>,
+			<&clock_gcc clk_csi1_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1_clk>,
+			<&clock_gcc clk_gcc_camss_csi1pix_clk>,
+			<&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_ahb_clk", "csi_src_clk",
+			"csi_clk", "csi_pix_clk",
+			"csi_rdi_clk", "camss_ahb_clk";
+		qcom,clock-rates = <0 61540000 0 200000000 0 0 0 0>;
+	};
+
+	qcom,csid@1b30800 {
+		status = "ok";
+		cell-index = <2>;
+		compatible = "qcom,csid-v3.4.3", "qcom,csid";
+		reg = <0x1b30800 0x400>;
+		reg-names = "csid";
+		interrupts = <0 153 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8937_l2>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi2_ahb_clk>,
+			<&clock_gcc clk_csi2_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi2_clk>,
+			<&clock_gcc clk_gcc_camss_csi2pix_clk>,
+			<&clock_gcc clk_gcc_camss_csi2rdi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_ahb_clk", "csi_src_clk",
+			"csi_clk", "csi_pix_clk",
+			"csi_rdi_clk", "camss_ahb_clk";
+		qcom,clock-rates = <0 61540000 0 200000000 0 0 0 0>;
+	};
+
+	qcom,ispif@1b31000 {
+		cell-index = <0>;
+		compatible = "qcom,ispif-v3.0", "qcom,ispif";
+		reg = <0x1b31000 0x500>,
+			<0x1b00020 0x10>;
+		reg-names = "ispif", "csi_clk_mux";
+		interrupts = <0 55 0>;
+		interrupt-names = "ispif";
+		qcom,num-isps = <0x2>;
+		vfe0-vdd-supply = <&gdsc_vfe>;
+		vfe1-vdd-supply = <&gdsc_vfe1>;
+		qcom,vdd-names = "vfe0-vdd", "vfe1-vdd";
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_camss_top_ahb_clk_src>,
+			<&clock_gcc clk_csi0_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0_clk>,
+			<&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+			<&clock_gcc clk_gcc_camss_csi0pix_clk>,
+			<&clock_gcc clk_csi1_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1_clk>,
+			<&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+			<&clock_gcc clk_gcc_camss_csi1pix_clk>,
+			<&clock_gcc clk_csi2_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi2_clk>,
+			<&clock_gcc clk_gcc_camss_csi2rdi_clk>,
+			<&clock_gcc clk_gcc_camss_csi2pix_clk>,
+			<&clock_gcc clk_vfe0_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe0_clk>,
+			<&clock_gcc clk_vfe1_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe1_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe1_clk>;
+		clock-names = "ispif_ahb_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"camss_ahb_src",
+			"csi0_src_clk", "csi0_clk",
+			"csi0_rdi_clk", "csi0_pix_clk",
+			"csi1_src_clk", "csi1_clk",
+			"csi1_rdi_clk", "csi1_pix_clk",
+			"csi2_src_clk", "csi2_clk",
+			"csi2_rdi_clk", "csi2_pix_clk",
+			"vfe0_clk_src", "camss_vfe_vfe0_clk",
+			"camss_csi_vfe0_clk", "vfe1_clk_src",
+			"camss_vfe_vfe1_clk", "camss_csi_vfe1_clk";
+		qcom,clock-rates = <61540000 0 0 0
+			200000000 0 0 0
+			200000000 0 0 0
+			200000000 0 0 0
+			0 0 0
+			0 0 0>;
+		qcom,clock-cntl-support;
+		qcom,clock-control = "SET_RATE","NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE", "SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+			"SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "INIT_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "INIT_RATE", "NO_SET_RATE",
+			"NO_SET_RATE";
+	};
+
+	vfe0: qcom,vfe0@1b10000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe40";
+		reg = <0x1b10000 0x1000>,
+			<0x1b40000 0x200>;
+		reg-names = "vfe", "vfe_vbif";
+		interrupts = <0 57 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_vfe0_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_vfe_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_vfe_axi_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>;
+		clock-names = "camss_top_ahb_clk", "camss_ahb_clk",
+			"vfe_clk_src", "camss_vfe_vfe_clk",
+			"camss_csi_vfe_clk", "iface_clk",
+			"bus_clk", "iface_ahb_clk";
+		qcom,clock-rates = <0 0 266670000 0 0 0 0 0>;
+		qos-entries = <8>;
+		qos-regs = <0x2c4 0x2c8 0x2cc 0x2d0 0x2d4 0x2d8
+			0x2dc 0x2e0>;
+		qos-settings = <0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55>;
+		vbif-entries = <1>;
+		vbif-regs = <0x124>;
+		vbif-settings = <0x3>;
+		ds-entries = <17>;
+		ds-regs = <0x988 0x98c 0x990 0x994 0x998
+			0x99c 0x9a0 0x9a4 0x9a8 0x9ac 0x9b0
+			0x9b4 0x9b8 0x9bc 0x9c0 0x9c4 0x9c8>;
+		ds-settings = <0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0x00000110>;
+		max-clk-nominal = <400000000>;
+		max-clk-turbo = <432000000>;
+	};
+
+	vfe1: qcom,vfe1@1b14000 {
+		cell-index = <1>;
+		compatible = "qcom,vfe40";
+		reg = <0x1b14000 0x1000>,
+			<0x1ba0000 0x200>;
+		reg-names = "vfe", "vfe_vbif";
+		interrupts = <0 29 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe1>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_vfe1_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe1_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe1_clk>,
+			<&clock_gcc clk_gcc_camss_vfe1_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_vfe1_axi_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>;
+		clock-names = "camss_top_ahb_clk" , "camss_ahb_clk",
+			"vfe_clk_src", "camss_vfe_vfe_clk",
+			"camss_csi_vfe_clk", "iface_clk",
+			"bus_clk", "iface_ahb_clk";
+		qcom,clock-rates = <0 0 266670000 0 0 0 0 0>;
+		qos-entries = <8>;
+		qos-regs = <0x2c4 0x2c8 0x2cc 0x2d0 0x2d4 0x2d8
+			0x2dc 0x2e0>;
+		qos-settings = <0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55>;
+		vbif-entries = <1>;
+		vbif-regs = <0x124>;
+		vbif-settings = <0x3>;
+		ds-entries = <17>;
+		ds-regs = <0x988 0x98c 0x990 0x994 0x998
+			0x99c 0x9a0 0x9a4 0x9a8 0x9ac 0x9b0
+			0x9b4 0x9b8 0x9bc 0x9c0 0x9c4 0x9c8>;
+		ds-settings = <0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0x00000110>;
+		max-clk-nominal = <400000000>;
+		max-clk-turbo = <432000000>;
+	};
+
+	qcom,vfe {
+		compatible = "qcom,vfe";
+		num_child = <2>;
+	};
+
+	qcom,cam_smmu {
+		status = "ok";
+		compatible = "qcom,msm-cam-smmu";
+		msm_cam_smmu_cb1: msm_cam_smmu_cb1 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_iommu 0x400 0x00>,
+				<&apps_iommu 0x2400 0x00>;
+			label = "vfe";
+			qcom,scratch-buf-support;
+		};
+
+		msm_cam_smmu_cb2: msm_cam_smmu_cb2 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			label = "vfe_secure";
+			qcom,secure-context;
+		};
+
+		msm_cam_smmu_cb3: msm_cam_smmu_cb3 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_iommu 0x1c00 0x00>;
+			label = "cpp";
+		};
+
+		msm_cam_smmu_cb4: msm_cam_smmu_cb4 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_iommu 0x1800 0x00>;
+			label = "jpeg_enc0";
+		};
+	};
+
+	qcom,jpeg@1b1c000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,jpeg";
+		reg = <0x1b1c000 0x400>,
+			<0x1b60000 0xc30>;
+		reg-names = "jpeg_hw", "jpeg_vbif";
+		interrupts = <0 59 0>;
+		interrupt-names = "jpeg";
+		vdd-supply = <&gdsc_jpeg>;
+		qcom,vdd-names = "vdd";
+		clock-names =  "core_clk", "iface_clk", "bus_clk0",
+			"camss_top_ahb_clk", "camss_ahb_clk";
+		clocks = <&clock_gcc clk_gcc_camss_jpeg0_clk>,
+			<&clock_gcc clk_gcc_camss_jpeg_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_jpeg_axi_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		qcom,clock-rates = <266670000 0 0 0 0>;
+		qcom,qos-reg-settings = <0x28 0x0000555e>,
+			<0xc8 0x00005555>;
+		qcom,msm-bus,name = "msm_camera_jpeg0";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <62 512 0 0>,
+			<62 512 800000 800000>;
+		qcom,vbif-reg-settings = <0xc0 0x10101000>,
+			<0xb0 0x10100010>;
+	};
+
+	qcom,irqrouter@1b00000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,irqrouter";
+		reg = <0x1b00000 0x100>;
+		reg-names = "irqrouter";
+	};
+
+	qcom,cpp@1b04000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,cpp";
+		reg = <0x1b04000 0x100>,
+			<0x1b80000 0x200>,
+			<0x1b18000 0x018>,
+			<0x1858078 0x4>;
+		reg-names = "cpp", "cpp_vbif", "cpp_hw", "camss_cpp";
+		interrupts = <0 49 0>;
+		interrupt-names = "cpp";
+		vdd-supply = <&gdsc_cpp>;
+		qcom,vdd-names = "vdd";
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_cpp_clk_src>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_cpp_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_cpp_axi_clk>,
+			<&clock_gcc clk_gcc_camss_cpp_clk>,
+			<&clock_gcc clk_gcc_camss_micro_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "ispif_ahb_clk", "cpp_core_clk",
+			"camss_top_ahb_clk", "camss_vfe_cpp_ahb_clk",
+			"camss_vfe_cpp_axi_clk", "camss_vfe_cpp_clk",
+			"micro_iface_clk", "camss_ahb_clk";
+		qcom,clock-rates = <61540000 180000000 0 0 0 180000000 0 0>;
+		qcom,min-clock-rate = <133000000>;
+		resets = <&clock_gcc GCC_CAMSS_MICRO_BCR>;
+		reset-names = "micro_iface_reset";
+		qcom,bus-master = <1>;
+		qcom,msm-bus,name = "msm_camera_cpp";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<106 512 0 0>,
+			<106 512 0 0>;
+		qcom,msm-bus-vector-dyn-vote;
+		qcom,micro-reset;
+		qcom,cpp-fw-payload-info {
+			qcom,stripe-base = <156>;
+			qcom,plane-base = <141>;
+			qcom,stripe-size = <27>;
+			qcom,plane-size = <5>;
+			qcom,fe-ptr-off = <5>;
+			qcom,we-ptr-off = <11>;
+		};
+	};
+
+	cci: qcom,cci@1b0c000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,cci";
+		reg = <0x1b0c000 0x4000>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "cci";
+		interrupts = <0 50 0>;
+		interrupt-names = "cci";
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_cci_clk_src>,
+			<&clock_gcc clk_gcc_camss_cci_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_cci_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>;
+		clock-names = "ispif_ahb_clk", "cci_src_clk",
+			"cci_ahb_clk", "camss_cci_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk";
+		qcom,clock-rates = <61540000 19200000 0 0 0 0>,
+				<61540000 37500000 0 0 0 0>;
+		pinctrl-names = "cci_default", "cci_suspend";
+			pinctrl-0 = <&cci0_active &cci1_active>;
+			pinctrl-1 = <&cci0_suspend &cci1_suspend>;
+		gpios = <&tlmm 29 0>,
+			<&tlmm 30 0>,
+			<&tlmm 31 0>,
+			<&tlmm 32 0>;
+		qcom,gpio-tbl-num = <0 1 2 3>;
+		qcom,gpio-tbl-flags = <1 1 1 1>;
+		qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+						"CCI_I2C_CLK0",
+						"CCI_I2C_DATA1",
+						"CCI_I2C_CLK1";
+		i2c_freq_100Khz: qcom,i2c_standard_mode {
+			status = "disabled";
+		};
+		i2c_freq_400Khz: qcom,i2c_fast_mode {
+			status = "disabled";
+		};
+		i2c_freq_custom: qcom,i2c_custom_mode {
+			status = "disabled";
+		};
+
+		i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+			status = "disabled";
+		};
+	};
+};
+
+&i2c_freq_100Khz {
+	qcom,hw-thigh = <78>;
+	qcom,hw-tlow = <114>;
+	qcom,hw-tsu-sto = <28>;
+	qcom,hw-tsu-sta = <28>;
+	qcom,hw-thd-dat = <10>;
+	qcom,hw-thd-sta = <77>;
+	qcom,hw-tbuf = <118>;
+	qcom,hw-scl-stretch-en = <0>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <1>;
+};
+
+&i2c_freq_400Khz {
+	qcom,hw-thigh = <20>;
+	qcom,hw-tlow = <28>;
+	qcom,hw-tsu-sto = <21>;
+	qcom,hw-tsu-sta = <21>;
+	qcom,hw-thd-dat = <13>;
+	qcom,hw-thd-sta = <18>;
+	qcom,hw-tbuf = <32>;
+	qcom,hw-scl-stretch-en = <0>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <3>;
+	status = "ok";
+};
+
+&i2c_freq_custom {
+	qcom,hw-thigh = <15>;
+	qcom,hw-tlow = <28>;
+	qcom,hw-tsu-sto = <21>;
+	qcom,hw-tsu-sta = <21>;
+	qcom,hw-thd-dat = <13>;
+	qcom,hw-thd-sta = <18>;
+	qcom,hw-tbuf = <25>;
+	qcom,hw-scl-stretch-en = <1>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <3>;
+	status = "ok";
+};
+
+&i2c_freq_1Mhz {
+	qcom,hw-thigh = <16>;
+	qcom,hw-tlow = <22>;
+	qcom,hw-tsu-sto = <17>;
+	qcom,hw-tsu-sta = <18>;
+	qcom,hw-thd-dat = <16>;
+	qcom,hw-thd-sta = <15>;
+	qcom,hw-tbuf = <19>;
+	qcom,hw-scl-stretch-en = <1>;
+	qcom,hw-trdhld = <3>;
+	qcom,hw-tsp = <3>;
+	qcom,cci-clk-src = <37500000>;
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi b/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi
new file mode 100644
index 0000000..1f19e20
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8917-pinctrl.dtsi"
+/* #include "msm8917-camera-sensor-cdp.dtsi"*/
+
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		input-name = "gpio-keys";
+		pinctrl-names = "tlmm_gpio_key_active", "tlmm_gpio_key_suspend";
+		pinctrl-0 = <&gpio_key_active>;
+		pinctrl-1 = <&gpio_key_suspend>;
+
+		camera_focus {
+			label = "camera_focus";
+			gpios = <&tlmm 128 0x1>;
+			linux,input-type = <1>;
+			linux,code = <0x210>;
+			debounce-interval = <15>;
+		};
+
+		camera_snapshot {
+			label = "camera_snapshot";
+			gpios = <&tlmm 127 0x1>;
+			linux,input-type = <1>;
+			linux,code = <0x2fe>;
+			debounce-interval = <15>;
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&tlmm 91 0x1>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			debounce-interval = <15>;
+		};
+
+		home {
+			label = "home";
+			gpios = <&tlmm 86 0x1>;
+			linux,input-type = <1>;
+			linux,code = <102>;
+			debounce-interval = <15>;
+		};
+	};
+
+	hbtp {
+		compatible = "qcom,hbtp-input";
+		vcc_ana-supply = <&pm8937_l10>;
+		vcc_dig-supply = <&pm8937_l5>;
+		qcom,afe-load = <50000>;
+		qcom,afe-vtg-min = <2850000>;
+		qcom,afe-vtg-max = <2850000>;
+		qcom,dig-load = <15000>;
+		qcom,dig-vtg-min = <1800000>;
+		qcom,dig-vtg-max = <1800000>;
+	};
+
+	usb_detect {
+		compatible = "qcom,gpio-usbdetect";
+		interrupt-names = "vbus_det_irq";
+		interrupt-parent = <&tlmm>;
+		interrupts = <130 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb_mode_select>;
+		qcom,gpio-mode-sel = <&tlmm 130 0>;
+		qcom,notify-host-mode;
+		status = "disabled";
+	};
+};
+
+&flash_led {
+	compatible = "qcom,qpnp-flash-led";
+	reg = <0xd300 0x100>;
+	pinctrl-names = "flash_led_enable","flash_led_disable";
+	pinctrl-0 = <&rear_flash_led_enable>;
+	pinctrl-1 = <&rear_flash_led_disable>;
+	qcom,follow-otst2-rb-disabled;
+};
+
+&wled {
+	qcom,cons-sync-write-delay-us = <1000>;
+};
+
+&pmi_haptic{
+	qcom,actuator-type = "lra";
+	qcom,wave-play-rate-us = <4165>;
+	qcom,lra-auto-res-mode = "qwd";
+	qcom,lra-high-z = "opt1";
+	qcom,lra-res-cal-period = <0>;
+};
+
+&blsp1_uart2 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+#include "msm8937-mdss-panels.dtsi"
+
+&mdss_mdp {
+	qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+	hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+	qcom,dsi-pref-prim-pan = <&dsi_truly_720_vid>;
+	pinctrl-names = "mdss_default", "mdss_sleep";
+	pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+	pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+	qcom,platform-te-gpio = <&tlmm 24 0>;
+	qcom,platform-reset-gpio = <&tlmm 60 0>;
+	qcom,platform-bklight-en-gpio = <&tlmm 98 0>;
+};
+
+&dsi_truly_720_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+};
+
+&dsi_truly_720_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,ulps-enabled;
+	qcom,partial-update-enabled;
+	qcom,panel-roi-alignment = <2 2 2 2 2 2>;
+};
+
+&dsi_icn9706_720_1440_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+};
+
+&tlmm {
+	tlmm_gpio_key {
+		gpio_key_active: gpio_key_active {
+			mux {
+				pins = "gpio86", "gpio91", "gpio127", "gpio128";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio86", "gpio91", "gpio127", "gpio128";
+			};
+		};
+
+		gpio_key_suspend: gpio_key_suspend {
+			mux {
+				pins = "gpio86", "gpio91", "gpio127", "gpio128";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio86", "gpio91", "gpio127", "gpio128";
+			};
+		};
+	};
+};
+
+&sdhc_1 {
+	/* device core power supply */
+	vdd-supply = <&pm8937_l8>;
+	qcom,vdd-voltage-level = <2900000 2900000>;
+	qcom,vdd-current-level = <200 570000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8937_l5>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on  &sdc1_rclk_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 192000000
+								384000000>;
+	qcom,nonremovable;
+	qcom,bus-speed-mode = "HS400_1p8v", "HS200_1p8v", "DDR_1p8v";
+
+	status = "ok";
+};
+
+&sdhc_2 {
+	/* device core power supply */
+	vdd-supply = <&pm8937_l11>;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <15000 800000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8937_l12>;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+		1 &intc 0 221 0
+		2 &tlmm 67 0>;
+	interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+	cd-gpios = <&tlmm 67 0x1>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
+								200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	status = "ok";
+};
+
+&i2c_3 {
+	status = "okay";
+	synaptics@22 {
+		compatible = "synaptics,dsx";
+		reg = <0x22>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <65 0x2008>;
+		avdd-supply = <&pm8937_l10>;
+		vdd-supply = <&pm8937_l5>;
+		synaptics,vdd-voltage = <1880000 1880000>;
+		synaptics,avdd-voltage = <3008000 3008000>;
+		synaptics,vdd-current = <40000>;
+		synaptics,avdd-current = <20000>;
+		pinctrl-names = "pmx_ts_active","pmx_ts_suspend";
+		pinctrl-0 = <&ts_int_active &ts_reset_active>;
+		pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+		synaptics,display-coords = <0 0 719 1439>;
+		synaptics,panel-coords  = <0 0 719 1439>;
+		synaptics,reset-gpio = <&tlmm 64 0x00>;
+		synaptics,irq-gpio = <&tlmm 65 0x2008>;
+		synaptics,disable-gpios;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-cdp.dtsi b/arch/arm64/boot/dts/qcom/msm8917-cdp.dtsi
new file mode 100644
index 0000000..513e995
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-cdp.dtsi
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "msm8917-pinctrl.dtsi"
+/* #include "msm8917-camera-sensor-cdp.dtsi"*/
+
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		input-name = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&gpio_key_active>;
+
+		camera_focus {
+			label = "camera_focus";
+			gpios = <&tlmm 128 0x1>;
+			linux,input-type = <1>;
+			linux,code = <0x210>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+
+		camera_snapshot {
+			label = "camera_snapshot";
+			gpios = <&tlmm 127 0x1>;
+			linux,input-type = <1>;
+			linux,code = <0x2fe>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&tlmm 91 0x1>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+
+		home {
+			label = "home";
+			gpios = <&tlmm 86 0x1>;
+			linux,input-type = <1>;
+			linux,code = <102>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+	};
+
+	hbtp {
+		compatible = "qcom,hbtp-input";
+		vcc_ana-supply = <&pm8937_l10>;
+		vcc_dig-supply = <&pm8937_l5>;
+		qcom,afe-load = <50000>;
+		qcom,afe-vtg-min = <2850000>;
+		qcom,afe-vtg-max = <2850000>;
+		qcom,dig-load = <15000>;
+		qcom,dig-vtg-min = <1800000>;
+		qcom,dig-vtg-max = <1800000>;
+	};
+
+	usb_detect {
+		compatible = "qcom,gpio-usbdetect";
+		interrupt-names = "vbus_det_irq";
+		interrupt-parent = <&tlmm>;
+		interrupts = <130 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb_mode_select>;
+		qcom,gpio-mode-sel = <&tlmm 130 0>;
+		qcom,notify-host-mode;
+		status = "disabled";
+	};
+};
+
+&blsp1_uart2 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+#include "msm8937-mdss-panels.dtsi"
+
+&mdss_mdp {
+	qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+	hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+	qcom,dsi-pref-prim-pan = <&dsi_truly_720_vid>;
+	pinctrl-names = "mdss_default", "mdss_sleep";
+	pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+	pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+	qcom,platform-te-gpio = <&tlmm 24 0>;
+	qcom,platform-reset-gpio = <&tlmm 60 0>;
+	qcom,platform-bklight-en-gpio = <&tlmm 98 0>;
+};
+
+&dsi_truly_720_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+};
+
+&dsi_truly_720_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,ulps-enabled;
+	qcom,partial-update-enabled;
+	qcom,panel-roi-alignment = <2 2 2 2 2 2>;
+};
+
+&tlmm {
+	tlmm_gpio_key {
+		gpio_key_active: gpio_key_active {
+			mux {
+				pins = "gpio86", "gpio91", "gpio127", "gpio128";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio86", "gpio91", "gpio127", "gpio128";
+			};
+		};
+
+		gpio_key_suspend: gpio_key_suspend {
+			mux {
+				pins = "gpio86", "gpio91", "gpio127", "gpio128";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio86", "gpio91", "gpio127", "gpio128";
+			};
+		};
+	};
+};
+
+&sdhc_1 {
+	/* device core power supply */
+	vdd-supply = <&pm8937_l8>;
+	qcom,vdd-voltage-level = <2900000 2900000>;
+	qcom,vdd-current-level = <200 570000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8937_l5>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on  &sdc1_rclk_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 192000000
+								384000000>;
+	qcom,nonremovable;
+	qcom,bus-speed-mode = "HS400_1p8v", "HS200_1p8v", "DDR_1p8v";
+
+	status = "ok";
+};
+
+&sdhc_2 {
+	/* device core power supply */
+	vdd-supply = <&pm8937_l11>;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <15000 800000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8937_l12>;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+		1 &intc 0 221 0
+		2 &tlmm 67 0>;
+	interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+	cd-gpios = <&tlmm 67 0x1>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
+								200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	status = "ok";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8917-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8917-coresight.dtsi
new file mode 100644
index 0000000..87303c5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-coresight.dtsi
@@ -0,0 +1,1039 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	tmc_etr: tmc@6028000 {
+		compatible = "arm,primecell";
+		reg = <0x6028000 0x1000>,
+		      <0x6044000 0x15000>;
+		reg-names = "tmc-base", "bam-base";
+
+		interrupts = <0 166 0>;
+		interrupt-names = "byte-cntr-irq";
+
+		arm,buffer-size = <0x100000>;
+		arm,sg-enable;
+		qcom,force-reg-dump;
+
+		coresight-name = "coresight-tmc-etr";
+		coresight-csr = <&csr>;
+		coresight-ctis = <&cti0 &cti8>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			tmc_etr_in_replicator: endpoint {
+				slave-mode;
+				remote-endpoint = <&replicator_out_tmc_etr>;
+			};
+		};
+	};
+
+	replicator: replicator@6026000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b909>;
+
+		reg = <0x6026000 0x1000>;
+		reg-names = "replicator-base";
+
+		coresight-name = "coresight-replicator";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				replicator_out_tmc_etr: endpoint {
+					remote-endpoint =
+						<&tmc_etr_in_replicator>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				replicator_in_tmc_etf: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tmc_etf_out_replicator>;
+				};
+			};
+		};
+	};
+
+	tmc_etf: tmc@6027000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6027000 0x1000>;
+		reg-names = "tmc-base";
+
+		coresight-name = "coresight-tmc-etf";
+		coresight-csr = <&csr>;
+
+		arm,default-sink;
+		qcom,force-reg-dump;
+
+		coresight-ctis = <&cti0 &cti8>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		 ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				tmc_etf_out_replicator:endpoint {
+					remote-endpoint =
+						<&replicator_in_tmc_etf>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tmc_etf_in_funnel_in0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_in0_out_tmc_etf>;
+				};
+			};
+		};
+	};
+
+	funnel_in0: funnel@6021000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6021000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-in0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		 ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_in0_out_tmc_etf: endpoint {
+					remote-endpoint =
+						<&tmc_etf_in_funnel_in0>;
+				};
+			};
+
+			port@1 {
+				reg = <7>;
+				funnel_in0_in_stm: endpoint {
+					slave-mode;
+					remote-endpoint = <&stm_out_funnel_in0>;
+				};
+			};
+
+			port@2 {
+				reg = <6>;
+				funnel_in0_in_tpda: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_out_funnel_in0>;
+				};
+			};
+
+			port@3 {
+				reg = <3>;
+				funnel_in0_in_funnel_center: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_center_out_funnel_in0>;
+				};
+			};
+
+			port@4 {
+				reg = <4>;
+				funnel_in0_in_funnel_right: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_right_out_funnel_in0>;
+				};
+			};
+
+			port@5 {
+				reg = <5>;
+				funnel_in0_in_funnel_mm: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_mm_out_funnel_in0>;
+				};
+			};
+		};
+	};
+
+	funnel_mm: funnel@6130000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6130000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-mm";
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_mm_out_funnel_in0: endpoint {
+					remote-endpoint =
+						<&funnel_in0_in_funnel_mm>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_mm_in_wcn_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+					<&wcn_etm0_out_funnel_mm>;
+				};
+			};
+
+			port@2 {
+				reg = <4>;
+				funnel_mm_in_funnel_cam: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_cam_out_funnel_mm>;
+				};
+			};
+
+			port@3 {
+				reg = <5>;
+				funnel_mm_in_audio_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&audio_etm0_out_funnel_mm>;
+				};
+			};
+		};
+	};
+
+	funnel_center: funnel@6100000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6100000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-center";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_center_out_funnel_in0: endpoint {
+					remote-endpoint =
+						<&funnel_in0_in_funnel_center>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_center_in_rpm_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&rpm_etm0_out_funnel_center>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				funnel_center_in_dbgui: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&dbgui_out_funnel_center>;
+				};
+			};
+		};
+	};
+
+	funnel_right: funnel@6120000 {
+		compatible = "arm,primecell";
+
+		reg = <0x6120000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-right";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_right_out_funnel_in0: endpoint {
+					remote-endpoint =
+						<&funnel_in0_in_funnel_right>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				funnel_right_in_modem_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&modem_etm0_out_funnel_right>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				funnel_right_in_funnel_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_apss_out_funnel_right>;
+				};
+			};
+		};
+	};
+
+	funnel_cam: funnel@6132000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6132000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-cam";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		 port {
+			funnel_cam_out_funnel_mm: endpoint {
+				remote-endpoint = <&funnel_mm_in_funnel_cam>;
+			};
+		};
+	};
+
+	funnel_apss: funnel@61a1000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x61a1000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-apss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				funnel_apss_out_funnel_right: endpoint {
+					remote-endpoint =
+						<&funnel_right_in_funnel_apss>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_apss0_in_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm0_out_funnel_apss0>;
+				};
+			};
+
+			port@2 {
+				reg = <1>;
+				funnel_apss0_in_etm1: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm1_out_funnel_apss0>;
+				};
+			};
+
+			port@3 {
+				reg = <2>;
+				funnel_apss0_in_etm2: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm2_out_funnel_apss0>;
+				};
+			};
+
+			port@4 {
+				reg = <3>;
+				funnel_apss0_in_etm3: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&etm3_out_funnel_apss0>;
+				};
+			};
+		};
+	};
+
+	etm0: etm@61bc000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x61bc000 0x1000>;
+		cpu = <&CPU0>;
+		reg-names = "etm-base";
+
+		coresight-name = "coresight-etm0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+		port {
+			etm0_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm0>;
+			};
+		};
+	};
+
+	etm1: etm@61bd000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x61bd000 0x1000>;
+		cpu = <&CPU1>;
+		coresight-name = "coresight-etm1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm1_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm1>;
+			};
+		};
+	};
+
+	etm2: etm@61be000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x61be000 0x1000>;
+		cpu = <&CPU2>;
+		coresight-name = "coresight-etm2";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm2_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm2>;
+			};
+		};
+	};
+
+	etm3: etm@61bf000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb95d>;
+
+		reg = <0x61bf000 0x1000>;
+		cpu = <&CPU3>;
+		coresight-name = "coresight-etm3";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm3_out_funnel_apss0: endpoint {
+				remote-endpoint = <&funnel_apss0_in_etm3>;
+			};
+		};
+	};
+
+	stm: stm@6002000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b962>;
+
+		reg = <0x6002000 0x1000>,
+		      <0x9280000 0x180000>;
+		reg-names = "stm-base", "stm-data-base";
+
+		coresight-name = "coresight-stm";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			stm_out_funnel_in0: endpoint {
+				remote-endpoint = <&funnel_in0_in_stm>;
+			};
+		};
+	};
+
+	cti0: cti@6010000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6010000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti1: cti@6011000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6011000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti1";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti2: cti@6012000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6012000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti2";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti3: cti@6013000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6013000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti3";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti4: cti@6014000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6014000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti4";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti5: cti@6015000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6015000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti5";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti6: cti@6016000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6016000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti6";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti7: cti@6017000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6017000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti7";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti8: cti@6018000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6018000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti8";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti9: cti@6019000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6019000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti9";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti10: cti@601a000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601a000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti10";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti11: cti@601b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601b000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti11";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti12: cti@601c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601c000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti12";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti13: cti@601d000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601d000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti13";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti14: cti@601e000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601e000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti14";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti15: cti@601f000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x601f000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti15";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu0: cti@61b8000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x61b8000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu0";
+		cpu = <&CPU0>;
+		qcom,cti-save;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu1: cti@61b9000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x61b9000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu1";
+		cpu = <&CPU1>;
+		qcom,cti-save;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu2: cti@61ba000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x61ba000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu2";
+		cpu = <&CPU2>;
+		qcom,cti-save;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_cpu3: cti@61bb000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x61bb000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu3";
+		cpu = <&CPU3>;
+		qcom,cti-save;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_modem_cpu0: cti@6124000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6124000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-modem-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* Proto CTI */
+	cti_wcn_cpu0: cti@6139000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6139000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-wcn-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* Venus CTI */
+	cti_video_cpu0: cti@6134000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x6134000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-video-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* LPASS CTI */
+	cti_audio_cpu0: cti@613c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x613c000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-audio-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* RPM CTI */
+	cti_rpm_cpu0: cti@610c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+
+		reg = <0x610c000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-rpm-cpu0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	/* Proto ETM */
+	wcn_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-wcn-etm0";
+		qcom,inst-id = <3>;
+
+		port {
+			wcn_etm0_out_funnel_mm: endpoint {
+				remote-endpoint = <&funnel_mm_in_wcn_etm0>;
+			};
+		};
+	};
+
+	rpm_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-rpm-etm0";
+		qcom,inst-id = <4>;
+
+		port {
+			rpm_etm0_out_funnel_center: endpoint {
+				remote-endpoint = <&funnel_center_in_rpm_etm0>;
+			};
+		};
+	};
+
+	/* LPASS ETM */
+	audio_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-audio-etm0";
+		qcom,inst-id = <5>;
+
+		port {
+			audio_etm0_out_funnel_mm: endpoint {
+				remote-endpoint = <&funnel_mm_in_audio_etm0>;
+			};
+		};
+	};
+
+	modem_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+		coresight-name = "coresight-modem-etm0";
+		qcom,inst-id = <11>;
+
+		port {
+			modem_etm0_out_funnel_right: endpoint {
+				remote-endpoint = <&funnel_right_in_modem_etm0>;
+			};
+		};
+	};
+
+	csr: csr@6001000 {
+		compatible = "qcom,coresight-csr";
+		reg = <0x6001000 0x1000>;
+		reg-names = "csr-base";
+
+		coresight-name = "coresight-csr";
+
+		qcom,usb-bam-support;
+		qcom,hwctrl-set-support;
+		qcom,set-byte-cntr-support;
+
+		qcom,blk-size = <1>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+
+	dbgui: dbgui@6108000 {
+		compatible = "qcom,coresight-dbgui";
+		reg = <0x6108000 0x1000>;
+		reg-names = "dbgui-base";
+
+		coresight-name = "coresight-dbgui";
+
+		qcom,dbgui-addr-offset = <0x30>;
+		qcom,dbgui-data-offset = <0x130>;
+		qcom,dbgui-size = <32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			dbgui_out_funnel_center: endpoint {
+				remote-endpoint = <&funnel_center_in_dbgui>;
+			};
+		};
+	};
+
+	tpda: tpda@6003000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b969>;
+
+		reg = <0x6003000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda";
+
+		qcom,tpda-atid = <64>;
+		qcom,cmb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				tpda_out_funnel_in0: endpoint {
+					remote-endpoint = <&funnel_in0_in_tpda>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_in_tpdm_dcc: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_dcc_out_tpda>;
+				};
+			};
+		};
+	};
+
+	tpdm_dcc: tpdm@6110000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+
+		reg = <0x6110000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-dcc";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+
+		port {
+			tpdm_dcc_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_dcc>;
+			};
+		};
+	};
+
+	hwevent: hwevent@6101000 {
+		compatible = "qcom,coresight-hwevent";
+		reg = <0x6101000 0x148>,
+		      <0x6101fb0 0x4>,
+		      <0x6121000 0x148>,
+		      <0x6121fb0 0x4>,
+		      <0x6131000 0x148>,
+		      <0x6131fb0 0x4>,
+		      <0x78c5010 0x4>,
+		      <0x7885010 0x4>;
+		reg-names = "center-wrapper-mux", "center-wrapper-lockaccess",
+			    "right-wrapper-mux", "right-wrapper-lockaccess",
+			    "mm-wrapper-mux", "mm-wrapper-lockaccess",
+			    "usbbam-mux", "blsp-mux";
+
+		coresight-name = "coresight-hwevent";
+		coresight-csr = <&csr>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "apb_pclk";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi b/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
index 792d5d1..5a242db 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
@@ -117,3 +117,46 @@
 
 	};
 };
+
+&soc {
+	cpuss_dump {
+		compatible = "qcom,cpuss-dump";
+		qcom,l2_dump1 {
+			/* L2 cache dump for A53 cluster */
+			qcom,dump-node = <&L2_1>;
+			qcom,dump-id = <0xC1>;
+		};
+		qcom,l1_i_cache100 {
+			qcom,dump-node = <&L1_I_100>;
+			qcom,dump-id = <0x60>;
+		};
+		qcom,l1_i_cache101 {
+			qcom,dump-node = <&L1_I_101>;
+			qcom,dump-id = <0x61>;
+		};
+		qcom,l1_i_cache102 {
+			qcom,dump-node = <&L1_I_102>;
+			qcom,dump-id = <0x62>;
+		};
+		qcom,l1_i_cache103 {
+			qcom,dump-node = <&L1_I_103>;
+			qcom,dump-id = <0x63>;
+		};
+		qcom,l1_d_cache100 {
+			qcom,dump-node = <&L1_D_100>;
+			qcom,dump-id = <0x80>;
+		};
+		qcom,l1_d_cache101 {
+			qcom,dump-node = <&L1_D_101>;
+			qcom,dump-id = <0x81>;
+		};
+		qcom,l1_d_cache102 {
+			qcom,dump-node = <&L1_D_102>;
+			qcom,dump-id = <0x82>;
+		};
+		qcom,l1_d_cache103 {
+			qcom,dump-node = <&L1_D_103>;
+			qcom,dump-id = <0x83>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8917-mtp.dtsi
index 800ea1c..c79b9f8 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-mtp.dtsi
@@ -68,6 +68,57 @@
 	status = "ok";
 };
 
+&soc {
+	hbtp {
+		compatible = "qcom,hbtp-input";
+		vcc_ana-supply = <&pm8937_l10>;
+		vcc_dig-supply = <&pm8937_l5>;
+		qcom,afe-load = <50000>;
+		qcom,afe-vtg-min = <2850000>;
+		qcom,afe-vtg-max = <2850000>;
+		qcom,dig-load = <15000>;
+		qcom,dig-vtg-min = <1800000>;
+		qcom,dig-vtg-max = <1800000>;
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		input-name = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&gpio_key_active>;
+
+		camera_focus {
+			label = "camera_focus";
+			gpios = <&tlmm 128 0x1>;
+			linux,input-type = <1>;
+			linux,code = <0x210>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+
+		camera_snapshot {
+			label = "camera_snapshot";
+			gpios = <&tlmm 127 0x1>;
+			linux,input-type = <1>;
+			linux,code = <0x2fe>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&tlmm 91 0x1>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+	};
+};
+
 #include "msm8937-mdss-panels.dtsi"
 
 &mdss_mdp {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
index b9229e1..858936d 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
@@ -15,13 +15,14 @@
 	tlmm: pinctrl@1000000 {
 		compatible = "qcom,msm8917-pinctrl";
 		reg = <0x1000000 0x300000>;
-		interrupts = <0 208 0>;
+		interrupts-extended = <&wakegic GIC_SPI 208 IRQ_TYPE_NONE>;
 		gpio-controller;
 		#gpio-cells = <2>;
 		interrupt-controller;
+		interrupt-parent = <&wakegpio>;
 		#interrupt-cells = <2>;
 
-
+#include "msm8917-camera-pinctrl.dtsi"
 		/* add pingrp for touchscreen */
 		pmx_ts_int_active {
 			ts_int_active: ts_int_active {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
index 6200b4e..a3b4679 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
@@ -31,7 +31,6 @@
 
 	qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
-		qcom,use-psci;
 		#address-cells = <1>;
 		#size-cells = <0>;
 
@@ -40,8 +39,6 @@
 			#address-cells = <1>;
 			#size-cells = <0>;
 			label = "perf";
-			qcom,spm-device-names = "l2";
-			qcom,default-level=<0>;
 			qcom,psci-mode-shift = <4>;
 			qcom,psci-mode-mask = <0xf>;
 
@@ -98,11 +95,12 @@
 				#size-cells = <0>;
 				qcom,psci-mode-shift = <0>;
 				qcom,psci-mode-mask = <0xf>;
+				qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
 
 				qcom,pm-cpu-level@0 {
 					reg = <0>;
-					qcom,psci-cpu-mode = <0>;
 					label = "wfi";
+					qcom,psci-cpu-mode = <1>;
 					qcom,latency-us = <12>;
 					qcom,ss-power = <463>;
 					qcom,energy-overhead = <23520>;
@@ -111,8 +109,8 @@
 
 				qcom,pm-cpu-level@1 {
 					reg = <1>;
-					qcom,psci-cpu-mode = <3>;
 					label = "pc";
+					qcom,psci-cpu-mode = <3>;
 					qcom,latency-us = <180>;
 					qcom,ss-power = <429>;
 					qcom,energy-overhead = <162991>;
@@ -125,7 +123,17 @@
 		};
 	};
 
-	qcom,cpu-sleep-status {
-		compatible = "qcom,cpu-sleep-status";
+	qcom,rpm-stats@29dba0 {
+		compatible = "qcom,rpm-stats";
+		reg = <0x200000 0x1000>, <0x290014 0x4>;
+		reg-names = "phys_addr_base", "offset_addr";
+	};
+
+	qcom,rpm-master-stats@60150 {
+		compatible = "qcom,rpm-master-stats";
+		reg = <0x60150 0x5000>;
+		qcom,masters = "APSS", "MPSS", "PRONTO", "TZ", "LPASS";
+		qcom,master-stats-version = <2>;
+		qcom,master-offset = <4096>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8937-cdp.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8937-cdp.dts
new file mode 100644
index 0000000..9d8a2eb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8937-cdp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8917.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8937.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8917-PMI8937 CDP";
+	compatible = "qcom,msm8917-cdp", "qcom,msm8917", "qcom,cdp";
+	qcom,board-id= <1 0>;
+	qcom,pmic-id = <0x10019 0x020037 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8937-qrd-sku5.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8937-qrd-sku5.dts
index 29ef47c..832a0ab 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8937-qrd-sku5.dts
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8937-qrd-sku5.dts
@@ -114,4 +114,13 @@
 		qcom,key-codes = <139 172 158>;
 		qcom,y-offset = <0>;
 	};
+
+	led_flash0: qcom,camera-flash {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		qcom,flash-type = <1>;
+		qcom,flash-source = <&pmi8937_flash0>;
+		qcom,torch-source = <&pmi8937_torch0>;
+		qcom,switch-source = <&pmi8937_switch>;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8937-rcm.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8937-rcm.dts
new file mode 100644
index 0000000..c7fe115
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8937-rcm.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8917.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8937.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8917-PMI8937 RCM";
+	compatible = "qcom,msm8917-cdp", "qcom,msm8917", "qcom,cdp";
+	qcom,board-id= <21 0>;
+	qcom,pmic-id = <0x10019 0x020037 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8937.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pmi8937.dtsi
index 3b24ab7..91a290f 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8937.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8937.dtsi
@@ -16,7 +16,7 @@
 &qpnp_smbcharger {
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
-	/delete-property/ dpdm-supply;
+	dpdm-supply = <&usb_otg>;
 };
 
 &usb_otg {
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8940-cdp.dts
similarity index 66%
copy from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
copy to arch/arm64/boot/dts/qcom/msm8917-pmi8940-cdp.dts
index 325accf..9785a4f 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8940-cdp.dts
@@ -13,15 +13,13 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-lenovo-v1.0.dtsi"
+#include "msm8917.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8940.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite Lenovo v1.0 Board";
-	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
-			"qcom,dragonboard";
-	qcom,board-id= <0x01010020 0>;
-};
-
-&blsp2_uart0 {
-	status = "okay";
+	model = "Qualcomm Technologies, Inc. MSM8917-PMI8940 CDP";
+	compatible = "qcom,msm8917-cdp", "qcom,msm8917", "qcom,cdp";
+	qcom,board-id = <1 0>;
+	qcom,pmic-id = <0x10019 0x020040 0x0 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8940-rcm.dts
similarity index 66%
copy from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
copy to arch/arm64/boot/dts/qcom/msm8917-pmi8940-rcm.dts
index 325accf..2cab716 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8940-rcm.dts
@@ -13,15 +13,13 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-lenovo-v1.0.dtsi"
+#include "msm8917.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8940.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite Lenovo v1.0 Board";
-	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
-			"qcom,dragonboard";
-	qcom,board-id= <0x01010020 0>;
-};
-
-&blsp2_uart0 {
-	status = "okay";
+	model = "Qualcomm Technologies, Inc. MSM8917-PMI8940 RCM";
+	compatible = "qcom,msm8917-cdp", "qcom,msm8917", "qcom,cdp";
+	qcom,board-id = <21 0>;
+	qcom,pmic-id = <0x10019 0x020040 0x0 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8940.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pmi8940.dtsi
index 528bde4..90226da 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8940.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8940.dtsi
@@ -16,7 +16,7 @@
 &qpnp_smbcharger {
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
-	/delete-property/ dpdm-supply;
+	dpdm-supply = <&usb_otg>;
 };
 
 &usb_otg {
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp-mirror-lake-touch.dts
similarity index 62%
copy from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
copy to arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp-mirror-lake-touch.dts
index 325accf..4a38b6f 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp-mirror-lake-touch.dts
@@ -13,15 +13,14 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-lenovo-v1.0.dtsi"
+#include "msm8917.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8950.dtsi"
+#include "msm8917-pmi8950-cdp-mirror-lake-touch.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite Lenovo v1.0 Board";
-	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
-			"qcom,dragonboard";
-	qcom,board-id= <0x01010020 0>;
-};
-
-&blsp2_uart0 {
-	status = "okay";
+	model = "Qualcomm Technologies, Inc. MSM8917-PMI8950 CDP ML Touch";
+	compatible = "qcom,msm8917-cdp", "qcom,msm8917", "qcom,cdp";
+	qcom,board-id = <1 4>;
+	qcom,pmic-id = <0x10019 0x010011 0x0 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp-mirror-lake-touch.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp-mirror-lake-touch.dtsi
new file mode 100644
index 0000000..4701101
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp-mirror-lake-touch.dtsi
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "pmi8950.dtsi"
+#include "msm8917-cdp-mirror-lake-touch.dtsi"
+#include "msm8917-audio-cdp.dtsi"
+
+&soc {
+	led_flash0: qcom,camera-flash {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		qcom,flash-type = <1>;
+		qcom,flash-source = <&pmi8950_flash0 &pmi8950_flash1>;
+		qcom,torch-source = <&pmi8950_torch0 &pmi8950_torch1>;
+		qcom,switch-source = <&pmi8950_switch>;
+	};
+
+	bluetooth: bt_qca6174 {
+		compatible = "qca,qca6174";
+		qca,bt-reset-gpio = <&tlmm 129 0>; /* BT_EN */
+	};
+};
+
+&qpnp_smbcharger {
+	/delete-property/ dpdm-supply;
+};
+
+&pm8937_gpios {
+	gpio@c400 {
+		qcom,mode = <0>;
+		qcom,output-type = <0>;
+		qcom,pull = <0>;
+		qcom,vin-sel = <2>;
+		qcom,out-strength = <3>;
+		qcom,src-sel = <0>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+};
+
+&i2c_5 { /* BLSP2 QUP1 */
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 17 0x00>;
+		qcom,nq-ven = <&tlmm 16 0x00>;
+		qcom,nq-firm = <&tlmm 130 0x00>;
+		qcom,nq-clkreq = <&pm8937_gpios 5 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK2";
+		interrupts = <17 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_disable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+		clocks = <&clock_gcc clk_bb_clk2_pin>;
+		clock-names = "ref_clk";
+	};
+};
+
+&mdss_dsi0 {
+	lab-supply = <&lab_regulator>;
+	ibb-supply = <&ibb_regulator>;
+};
+
+&labibb {
+	status = "ok";
+	qpnp,qpnp-labibb-mode = "lcd";
+};
+
+&ibb_regulator {
+	qcom,qpnp-ibb-discharge-resistor = <32>;
+};
+
+&dsi_panel_pwr_supply {
+	qcom,panel-supply-entry@2 {
+		reg = <2>;
+		qcom,supply-name = "lab";
+		qcom,supply-min-voltage = <4600000>;
+		qcom,supply-max-voltage = <6000000>;
+		qcom,supply-enable-load = <100000>;
+		qcom,supply-disable-load = <100>;
+	};
+
+	qcom,panel-supply-entry@3 {
+		reg = <3>;
+		qcom,supply-name = "ibb";
+		qcom,supply-min-voltage = <4600000>;
+		qcom,supply-max-voltage = <6000000>;
+		qcom,supply-enable-load = <100000>;
+		qcom,supply-disable-load = <100>;
+		qcom,supply-post-on-sleep = <20>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp.dts
new file mode 100644
index 0000000..a32e2b3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8917.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8950.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8917-PMI8950 CDP";
+	compatible = "qcom,msm8917-cdp", "qcom,msm8917", "qcom,cdp";
+	qcom,board-id= <1 0>;
+	qcom,pmic-id = <0x10019 0x010011 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-ext-codec-cdp.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-ext-codec-cdp.dts
new file mode 100644
index 0000000..91c4f3a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-ext-codec-cdp.dts
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8917.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8950.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8917 External Audio Codec CDP";
+	compatible = "qcom,msm8917-cdp", "qcom,msm8917", "qcom,cdp";
+	qcom,board-id= <1 1>;
+	qcom,pmic-id = <0x10019 0x010011 0x0 0x0>;
+};
+
+&pm8937_gpios {
+	gpio@c000 {
+		status = "ok";
+		qcom,mode = <1>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <2>;
+		qcom,master-en = <1>;
+		qcom,out-strength = <2>;
+	};
+
+	gpio@c600 {
+		status = "ok";
+		qcom,mode = <1>;
+		qcom,pull = <5>;
+		qcom,vin-sel = <0>;
+		qcom,src-sel = <0>;
+		qcom,master-en = <1>;
+		qcom,out-strength = <2>;
+	};
+};
+
+&slim_msm {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
+
+&wcd9335 {
+	status = "okay";
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&ext_codec {
+	status = "okay";
+};
+
+&int_codec {
+	status = "disabled";
+};
+
+&wsa881x_i2c_f {
+	status = "disabled";
+};
+
+&wsa881x_i2c_45 {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dts
index 3fe60a3..5b458b2 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dts
@@ -14,7 +14,8 @@
 /dts-v1/;
 
 #include "msm8917.dtsi"
-#include "msm8917-pmi8950-mtp.dtsi"
+#include "msm8917-mtp.dtsi"
+#include "msm8917-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8917-PMI8950 MTP";
@@ -22,3 +23,23 @@
 	qcom,board-id= <8 0>;
 	qcom,pmic-id = <0x10019 0x010011 0x0 0x0>;
 };
+
+&blsp1_uart1 {
+	status = "ok";
+};
+
+&vendor {
+	mtp_batterydata: qcom,battery-data {
+		qcom,batt-id-range-pct = <15>;
+		#include "batterydata-itech-3000mah.dtsi"
+		#include "batterydata-ascent-3450mAh.dtsi"
+	};
+};
+
+&qpnp_fg {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+&qpnp_smbcharger {
+	qcom,battery-data = <&mtp_batterydata>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dtsi
index 4da1384..f882917 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dtsi
@@ -59,7 +59,7 @@
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
-	/delete-property/ dpdm-supply;
+	dpdm-supply = <&usb_otg>;
 };
 
 &usb_otg {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-rcm.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-rcm.dts
new file mode 100644
index 0000000..a91bea1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-rcm.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8917.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8950.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8917-PMI8950 RCM";
+	compatible = "qcom,msm8917-cdp", "qcom,msm8917", "qcom,cdp";
+	qcom,board-id= <21 0>;
+	qcom,pmic-id = <0x10019 0x010011 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8950.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pmi8950.dtsi
new file mode 100644
index 0000000..e86b9d7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950.dtsi
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "pmi8950.dtsi"
+
+&qpnp_smbcharger {
+	qcom,chg-led-sw-controls;
+	qcom,chg-led-support;
+	dpdm-supply = <&usb_otg>;
+};
+
+&usb_otg {
+	extcon = <&qpnp_smbcharger>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
index fae258d0..1e5393b 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
@@ -12,6 +12,7 @@
  */
 
 #include "msm8917-pinctrl.dtsi"
+#include "msm8917-camera-sensor-qrd.dtsi"
 
 &blsp1_uart2 {
 	status = "ok";
@@ -87,9 +88,8 @@
 	gpio_keys {
 		compatible = "gpio-keys";
 		input-name = "gpio-keys";
-		pinctrl-names = "tlmm_gpio_key_active","tlmm_gpio_key_suspend";
+		pinctrl-names = "default";
 		pinctrl-0 = <&gpio_key_active>;
-		pinctrl-1 = <&gpio_key_suspend>;
 
 		vol_up {
 			label = "volume_up";
@@ -97,6 +97,8 @@
 			linux,input-type = <1>;
 			linux,code = <115>;
 			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8917.dtsi b/arch/arm64/boot/dts/qcom/msm8917.dtsi
index 36db486..1d5fe79 100644
--- a/arch/arm64/boot/dts/qcom/msm8917.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917.dtsi
@@ -21,7 +21,7 @@
 	model = "Qualcomm Technologies, Inc. MSM8917";
 	compatible = "qcom,msm8917";
 	qcom,msm-id = <303 0x0>, <308 0x0>, <309 0x0>;
-	interrupt-parent = <&intc>;
+	interrupt-parent = <&wakegic>;
 
 	chosen {
 		bootargs = "sched_enable_hmp=1";
@@ -157,10 +157,12 @@
 };
 
 #include "msm8917-pinctrl.dtsi"
+#include "msm8917-camera.dtsi"
 #include "msm8917-cpu.dtsi"
 #include "msm8917-pm.dtsi"
 #include "msm8917-ion.dtsi"
 #include "msm8917-smp2p.dtsi"
+#include "msm8917-coresight.dtsi"
 #include "msm8917-bus.dtsi"
 #include "msm8917-mdss.dtsi"
 #include "msm8917-mdss-pll.dtsi"
@@ -183,20 +185,32 @@
 			<0x0b002000 0x1000>;
 	};
 
+	dcc: dcc@b3000 {
+		compatible = "qcom,dcc";
+		reg = <0xb3000 0x1000>,
+			<0xb4000 0x2000>;
+		reg-names = "dcc-base", "dcc-ram-base";
+
+		clocks = <&clock_gcc clk_gcc_dcc_clk>;
+		clock-names = "apb_pclk";
+
+		qcom,save-reg;
+	};
+
 	wakegic: wake-gic {
-		compatible = "qcom,mpm-gic", "qcom,mpm-gic-msm8937";
+		compatible = "qcom,mpm-gic-msm8937", "qcom,mpm-gic";
 		interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
 		reg = <0x601d0 0x1000>,
 			<0xb011008 0x4>;  /* MSM_APCS_GCC_BASE 4K */
 		reg-names = "vmpm", "ipc";
-		qcom,num-mpm-irqs = <96>;
+		qcom,num-mpm-irqs = <64>;
 		interrupt-controller;
 		interrupt-parent = <&intc>;
 		#interrupt-cells = <3>;
 	};
 
 	wakegpio: wake-gpio {
-		compatible = "qcom,mpm-gpio", "qcom,mpm-gpio-msm8937";
+		compatible = "qcom,mpm-gpio-msm8937", "qcom,mpm-gpio";
 		interrupt-controller;
 		interrupt-parent = <&intc>;
 		#interrupt-cells = <2>;
@@ -482,6 +496,23 @@
 		#clock-cells = <1>;
 	};
 
+	msm_cpufreq: qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		clock-names = "cpu0_clk", "cpu1_clk", "cpu2_clk",
+				"cpu3_clk";
+		clocks = <&clock_cpu clk_a53_bc_clk>,
+			 <&clock_cpu clk_a53_bc_clk>,
+			 <&clock_cpu clk_a53_bc_clk>,
+			 <&clock_cpu clk_a53_bc_clk>;
+
+		qcom,cpufreq-table =
+			 <  960000 >,
+			 < 1094400 >,
+			 < 1248000 >,
+			 < 1401000 >,
+			 < 1497600 >;
+	};
+
 	i2c_2: i2c@78b6000 { /* BLSP1 QUP2 */
 		compatible = "qcom,i2c-msm-v2";
 		#address-cells = <1>;
@@ -713,6 +744,24 @@
 		qcom,target-dev = <&cpubw>;
 	};
 
+	devfreq-cpufreq {
+		cpubw-cpufreq {
+		target-dev = <&cpubw>;
+		cpu-to-dev-map =
+			<  998400  4248 >,
+			< 1094400  4541 >,
+			< 1497600  5645 >;
+		};
+
+		mincpubw-cpufreq {
+			target-dev = <&mincpubw>;
+			cpu-to-dev-map =
+				<  998400 2270 >,
+				< 1094400 4248 >,
+				< 1497600 4248 >;
+		};
+	};
+
 	qcom,wdt@b017000 {
 		compatible = "qcom,msm-watchdog";
 		reg = <0xb017000 0x1000>;
@@ -724,6 +773,32 @@
 		qcom,wakeup-enable;
 	};
 
+	qcom,memshare {
+		compatible = "qcom,memshare";
+
+		qcom,client_1 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x200000>;
+			qcom,client-id = <0>;
+			qcom,allocate-boot-time;
+			label = "modem";
+		};
+
+		qcom,client_2 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x300000>;
+			qcom,client-id = <2>;
+			label = "modem";
+		};
+
+		mem_client_3_size: qcom,client_3 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x0>;
+			qcom,client-id = <1>;
+			label = "modem";
+		};
+	};
+
 	spmi_bus: qcom,spmi@200f000 {
 		compatible = "qcom,spmi-pmic-arb";
 		reg = <0x200f000 0x1000>,
@@ -782,6 +857,64 @@
 
 	};
 
+	 jtag_fuse: jtagfuse@a601c {
+		compatible = "qcom,jtag-fuse-v2";
+		reg = <0xa601c 0x8>;
+		reg-names = "fuse-base";
+	};
+
+	jtag_mm0: jtagmm@61bc000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x61bc000 0x1000>,
+			<0x61b0000 0x1000>;
+		reg-names = "etm-base", "debug-base";
+
+		qcom,coresight-jtagmm-cpu = <&CPU0>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	jtag_mm1: jtagmm@61bd000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x61bd000 0x1000>,
+			<0x61b2000 0x1000>;
+		reg-names = "etm-base", "debug-base";
+
+		qcom,coresight-jtagmm-cpu = <&CPU1>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			<&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	jtag_mm2: jtagmm@61be000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x61be000 0x1000>,
+			<0x61b4000 0x1000>;
+		reg-names = "etm-base", "debug-base";
+
+		qcom,coresight-jtagmm-cpu = <&CPU2>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			<&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	jtag_mm3: jtagmm@61bf000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x61bf000 0x1000>,
+			<0x61b6000 0x1000>;
+		reg-names = "etm-base", "debug-base";
+
+		qcom,coresight-jtagmm-cpu = <&CPU3>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
 	qcom,ipc-spinlock@1905000 {
 		compatible = "qcom,ipc-spinlock-sfpb";
 		reg = <0x1905000 0x8000>;
@@ -1141,6 +1274,10 @@
 		qcom,ce-opp-freq = <100000000>;
 	};
 
+	qcom,iris-fm {
+		compatible = "qcom,iris_fm";
+	};
+
 	qcom,mss@4080000 {
 		compatible = "qcom,pil-q6v55-mss";
 		reg = <0x04080000 0x100>,
@@ -1503,6 +1640,10 @@
 		qcom,has-vsys-adc-channel;
 		qcom,wcnss-adc_tm = <&pm8937_adc_tm>;
 	};
+
+	ssc_sensors: qcom,msm-ssc-sensors {
+		compatible = "qcom,msm-ssc-sensors";
+	};
 };
 
 #include "pm8937-rpm-regulator.dtsi"
@@ -1575,3 +1716,141 @@
 	qcom,clk-dis-wait-val = <0x5>;
 	status = "okay";
 };
+
+/* GPU overrides */
+&msm_gpu {
+
+	qcom,gpu-speed-bin = <0x6018 0x80000000 31>;
+	/delete-node/qcom,gpu-pwrlevels;
+
+	qcom,gpu-pwrlevel-bins {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible="qcom,gpu-pwrlevel-bins";
+
+		qcom,gpu-pwrlevels-0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <0>;
+			qcom,initial-pwrlevel = <3>;
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <598000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <7>;
+				qcom,bus-max = <7>;
+			};
+
+			/* NOM+ */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <523200000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <7>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <484800000>;
+				qcom,bus-freq = <5>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <6>;
+			};
+
+			/* SVS+ */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <400000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <270000000>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <3>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <19200000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+		qcom,gpu-pwrlevels-1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <1>;
+			qcom,initial-pwrlevel = <3>;
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <8>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <8>;
+			};
+
+			/* NOM+ */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <523200000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <7>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <484800000>;
+				qcom,bus-freq = <5>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <6>;
+			};
+
+			/* SVS+ */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <400000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <270000000>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <3>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <19200000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8937-audio.dtsi b/arch/arm64/boot/dts/qcom/msm8937-audio.dtsi
index 2e4d38e..ffc266f 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-audio.dtsi
@@ -240,16 +240,23 @@
 			"SpkrRight IN", "SPK2 OUT";
 
 		qcom,tasha-mclk-clk-freq = <9600000>;
+		qcom,cdc-us-euro-gpios = <&tlmm 63 0>;
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,cdc-us-eu-gpios = <&cdc_us_euro_sw>;
+		qcom,quin-mi2s-gpios = <&cdc_quin_mi2s_gpios>;
 
 		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
 				<&loopback>, <&compress>, <&hostless>,
-				<&afe>, <&lsm>, <&routing>;
+				<&afe>, <&lsm>, <&routing>, <&cpe>,
+				<&pcm_noirq>;
 		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
 				"msm-pcm-dsp.2", "msm-voip-dsp",
 				"msm-pcm-voice", "msm-pcm-loopback",
 				"msm-compress-dsp", "msm-pcm-hostless",
 				"msm-pcm-afe", "msm-lsm-client",
-				"msm-pcm-routing";
+				"msm-pcm-routing", "msm-cpe-lsm",
+				"msm-pcm-dsp-noirq";
 
 		asoc-cpu = <&dai_pri_auxpcm>,
 				<&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s5>,
@@ -281,9 +288,6 @@
 
 		asoc-codec = <&stub_codec>, <&hdmi_dba>;
 		asoc-codec-names = "msm-stub-codec.1", "msm-hdmi-dba-codec-rx";
-		qcom,cdc-us-euro-gpios = <&tlmm 63 0>;
-		qcom,msm-mbhc-hphl-swh = <0>;
-		qcom,msm-mbhc-gnd-swh = <0>;
 
 		qcom,wsa-max-devs = <2>;
 		qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
@@ -292,11 +296,19 @@
 				"SpkrLeft", "SpkrRight";
 	};
 
+	cpe: qcom,msm-cpe-lsm {
+		compatible = "qcom,msm-cpe-lsm";
+	};
+
 	wcd9xxx_intc: wcd9xxx-irq {
 		status = "disabled";
+		compatible = "qcom,wcd9xxx-irq";
+		interrupt-controller;
+		#interrupt-cells = <1>;
 		interrupt-parent = <&tlmm>;
-		interrupts = <73 0>;
 		qcom,gpio-connect = <&tlmm 73 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&wcd_intr_default>;
 	};
 
 	clock_audio: audio_ext_clk {
@@ -306,28 +318,61 @@
 		qcom,node_has_rpm_clock;
 		#clock-cells = <1>;
 		qcom,audio-ref-clk-gpio = <&pm8937_gpios 1 0>;
-		qcom,lpass-mclk-id = "pri_mclk";
 		clocks = <&clock_gcc clk_div_clk2>;
 		pinctrl-0 = <&cdc_mclk2_sleep>;
 		pinctrl-1 = <&cdc_mclk2_active>;
 	};
 
-	wcd_rst_gpio: wcd_gpio_ctrl {
+	wcd_rst_gpio: msm_cdc_pinctrl {
 		status = "disabled";
-		qcom,cdc-rst-n-gpio = <&tlmm 68 0>;
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_reset_active>;
+		pinctrl-1 = <&cdc_reset_sleep>;
 	};
 };
 
 &slim_msm {
 	status = "disabled";
+
+	dai_slim: msm_dai_slim {
+		status = "disabled";
+		compatible = "qcom,msm-dai-slim";
+		elemental-addr = [ff ff ff fe 17 02];
+	};
+
 	wcd9335: tasha_codec {
 		status = "disabled";
 		compatible = "qcom,tasha-slim-pgd";
+		elemental-addr = [00 01 A0 01 17 02];
+
+		qcom,cdc-slim-ifd = "tasha-slim-ifd";
+		qcom,cdc-slim-ifd-elemental-addr = [00 00 A0 01 17 02];
+
+		interrupt-parent = <&wcd9xxx_intc>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+				17 18 19 20 21 22 23 24 25 26 27 28 29 30>;
+
+		qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
 		clock-names = "wcd_clk", "wcd_native_clk";
 		clocks = <&clock_audio clk_audio_pmi_clk>,
 			<&clock_audio clk_audio_ap_clk2>;
 
-		qcom,cdc-reset-gpio = <&tlmm 68 0>;
+		qcom,cdc-static-supplies =
+				"cdc-vdd-buck",
+				"cdc-buck-sido",
+				"cdc-vdd-tx-h",
+				"cdc-vdd-rx-h",
+				"cdc-vdd-px";
+		qcom,cdc-on-demand-supplies = "cdc-vdd-mic-bias";
+		qcom,cdc-micbias1-mv = <1800>;
+		qcom,cdc-micbias2-mv = <1800>;
+		qcom,cdc-micbias3-mv = <1800>;
+		qcom,cdc-micbias4-mv = <1800>;
+
+		qcom,cdc-dmic-sample-rate = <2400000>;
+		qcom,cdc-mclk-clk-rate = <9600000>;
 
 		cdc-vdd-buck-supply = <&eldo2_pm8937>;
 		qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
@@ -355,18 +400,6 @@
 	};
 };
 
-&pm8937_gpios {
-	gpio@c000 {
-		status = "ok";
-		qcom,mode = <1>;
-		qcom,pull = <5>;
-		qcom,vin-sel = <0>;
-		qcom,src-sel = <2>;
-		qcom,master-en = <1>;
-		qcom,out-strength = <2>;
-	};
-};
-
 &pm8937_1 {
 	pmic_analog_codec: analog-codec@f000 {
 		status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi
index b952908..e64af14 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi
@@ -462,6 +462,8 @@
 
 		reg = <0x619c000 0x1000>;
 		cpu = <&CPU4>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm4";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -481,6 +483,8 @@
 
 		reg = <0x619d000 0x1000>;
 		cpu = <&CPU5>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm5";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -500,6 +504,8 @@
 
 		reg = <0x619e000 0x1000>;
 		cpu = <&CPU6>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm6";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -519,6 +525,8 @@
 
 		reg = <0x619f000 0x1000>;
 		cpu = <&CPU7>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm7";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -538,6 +546,8 @@
 
 		reg = <0x61bc000 0x1000>;
 		cpu = <&CPU0>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm0";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -557,6 +567,8 @@
 
 		reg = <0x61bd000 0x1000>;
 		cpu = <&CPU1>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm1";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -576,6 +588,8 @@
 
 		reg = <0x61be000 0x1000>;
 		cpu = <&CPU2>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm2";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -594,6 +608,8 @@
 		arm,primecell-periphid = <0x000bb95d>;
 
 		reg = <0x61bf000 0x1000>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm3";
 		cpu = <&CPU3>;
 
@@ -957,26 +973,13 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti_modem_cpu0: cti@6128000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6128000 0x1000>;
-		reg-names = "cti-base";
-		coresight-name = "coresight-cti-modem-cpu0";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti_modem_cpu1: cti@6124000{
+	cti_modem_cpu0: cti@6124000{
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 
 		reg = <0x6124000 0x1000>;
 		reg-names = "cti-base";
-		coresight-name = "coresight-cti-modem-cpu1";
+		coresight-name = "coresight-cti-modem-cpu0";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
diff --git a/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm439-qrd.dts b/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm439-qrd.dts
index 71157e2..2bad28b 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm439-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm439-qrd.dts
@@ -22,10 +22,3 @@
 	qcom,board-id = <0xb 2>;
 	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
 };
-
-&pmi632_vadc {
-	chan@4a {
-		qcom,scale-function = <22>;
-	};
-};
-
diff --git a/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi b/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi
index ef4f4b0..528e8aa 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi
@@ -25,6 +25,8 @@
 #include "dsi-panel-hx8399c-hd-plus-video.dtsi"
 #include "dsi-panel-nt35695b-truly-fhd-video.dtsi"
 #include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
+#include "dsi-panel-icn9706-720-1440p-video.dtsi"
+
 &soc {
 	dsi_panel_pwr_supply: dsi_panel_pwr_supply {
 		#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
index 57823b8..90685e9 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
@@ -156,6 +156,39 @@
 	};
 };
 
+&pm8937_gpios {
+	nfc_clk {
+		nfc_clk_default: nfc_clk_default {
+			pins = "gpio5";
+			function = "normal";
+			input-enable;
+			power-source = <1>;
+		};
+	};
+};
+
+&i2c_5 { /* BLSP2 QUP1 (NFC) */
+	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 17 0x00>;
+		qcom,nq-ven = <&tlmm 16 0x00>;
+		qcom,nq-firm = <&tlmm 130 0x00>;
+		qcom,nq-clkreq = <&pm8937_gpios 5 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK2";
+		interrupts = <17 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_disable_active
+						&nfc_clk_default>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+		clocks = <&clock_gcc clk_bb_clk2_pin>;
+		clock-names = "ref_clk";
+	};
+};
+
 &thermal_zones {
 	quiet-therm-step {
 		status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
index 86e41a3..a730287 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
@@ -319,6 +319,88 @@
 			};
 		};
 
+		spi7 {
+			spi7_default: spi7_default {
+				mux {
+					pins = "gpio85", "gpio86", "gpio88";
+					function = "blsp_spi7";
+				};
+
+				config {
+					pins = "gpio85", "gpio86", "gpio88";
+					drive-strength = <16>;
+					bias-disable = <0>;
+				};
+			};
+
+			spi7_sleep: spi7_sleep {
+				mux {
+					pins = "gpio85", "gpio86", "gpio88";
+					function = "blsp_spi7";
+				};
+
+				config {
+					pins = "gpio85", "gpio86", "gpio88";
+					drive-strength = <16>;
+					bias-disable = <0>;
+				};
+			};
+			spi7_cs0_active: cs0_active {
+				mux {
+					pins = "gpio87";
+					function = "blsp_spi7_cs0";
+				};
+
+				config {
+					pins = "gpio87";
+					drive-strength = <2>;
+					bias-disable = <0>;
+				};
+			};
+		};
+
+		fpc_reset_int {
+			fpc_reset_low: reset_low {
+				mux {
+					pins = "gpio124";
+					function = "fpc_reset_gpio_low";
+				};
+
+				config {
+					pins = "gpio124";
+					drive-strength = <2>;
+					bias-disable;
+					output-low;
+				};
+			};
+
+			fpc_reset_high: reset_high {
+				mux {
+					pins = "gpio124";
+					function = "fpc_reset_gpio_high";
+				};
+
+				config {
+					pins = "gpio124";
+					drive-strength = <2>;
+					bias-disable;
+					output-high;
+				};
+			};
+
+			fpc_int_low: int_low {
+				mux {
+					pins = "gpio48";
+				};
+				config {
+					pins = "gpio48";
+					drive-strength = <2>;
+					bias-pull-down;
+					input-enable;
+				};
+			};
+		};
+
 		wcnss_pmux_5wire {
 			/* Active configuration of bus pins */
 			wcnss_default: wcnss_default {
diff --git a/arch/arm64/boot/dts/qcom/msm8937-pmi8950-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8937-pmi8950-mtp.dtsi
index 1afa230..2d88ad1 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-pmi8950-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-pmi8950-mtp.dtsi
@@ -30,7 +30,7 @@
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
-	/delete-property/ dpdm-supply;
+	dpdm-supply = <&usb_otg>;
 };
 
 &usb_otg {
diff --git a/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi
index 44bdfc9..d6f24d9 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi
@@ -62,7 +62,7 @@
 		pm8937_cx_cdev: regulator-cx-cdev {
 			compatible = "qcom,regulator-cooling-device";
 			regulator-cdev-supply = <&pm8937_s2_floor_level>;
-			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM
+			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM_PLUS
 					RPM_SMD_REGULATOR_LEVEL_RETENTION>;
 			#cooling-cells = <2>;
 		};
diff --git a/arch/arm64/boot/dts/qcom/msm8937.dtsi b/arch/arm64/boot/dts/qcom/msm8937.dtsi
index d22101f..85e3043 100644
--- a/arch/arm64/boot/dts/qcom/msm8937.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937.dtsi
@@ -30,6 +30,11 @@
 	firmware: firmware {
 		android {
 			compatible = "android,firmware";
+			vbmeta {
+				compatible = "android,vbmeta";
+				parts = "vbmeta,boot,system,vendor,dtbo,recovery";
+			};
+
 			fstab {
 				compatible = "android,fstab";
 				vendor {
@@ -37,7 +42,7 @@
 					dev = "/dev/block/platform/soc/7824900.sdhci/by-name/vendor";
 					type = "ext4";
 					mnt_flags = "ro,barrier=1,discard";
-					fsmgr_flags = "wait";
+					fsmgr_flags = "wait,avb";
 					status = "ok";
 				};
 				system {
@@ -45,7 +50,7 @@
 					dev = "/dev/block/platform/soc/7824900.sdhci/by-name/system";
 					type = "ext4";
 					mnt_flags = "ro,barrier=1,discard";
-					fsmgr_flags = "wait";
+					fsmgr_flags = "wait,avb";
 					status = "ok";
 				};
 
@@ -127,7 +132,7 @@
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			reusable;
-			size = <0 0x2400000>;
+			size = <0x400000>;
 		};
 	};
 
@@ -993,7 +998,7 @@
 		reg-names = "wdt-base";
 		interrupts = <0 3 0>, <0 4 0>;
 		qcom,bark-time = <11000>;
-		qcom,pet-time = <10000>;
+		qcom,pet-time = <9360>;
 		qcom,ipi-ping;
 		qcom,wakeup-enable;
 		status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi
index 6e961b1..d47dd75 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi
@@ -207,8 +207,8 @@
 		cam_v_custom1-supply = <&pm8953_l23>;
 		qcom,cam-vreg-name = "cam_vio", "cam_vdig", "cam_vaf",
 						"cam_vana", "cam_v_custom1";
-		qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1220000>;
-		qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1220000>;
+		qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1200000>;
+		qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1200000>;
 		qcom,cam-vreg-op-mode = <0 105000 100000 80000 105000>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_default
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
index 6e961b1..d47dd75 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
@@ -207,8 +207,8 @@
 		cam_v_custom1-supply = <&pm8953_l23>;
 		qcom,cam-vreg-name = "cam_vio", "cam_vdig", "cam_vaf",
 						"cam_vana", "cam_v_custom1";
-		qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1220000>;
-		qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1220000>;
+		qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1200000>;
+		qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1200000>;
 		qcom,cam-vreg-op-mode = <0 105000 100000 80000 105000>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_default
diff --git a/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
index 196a526..d3c2e26 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
@@ -505,6 +505,8 @@
 
 		reg = <0x619c000 0x1000>;
 		cpu = <&CPU0>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm0";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
index b182a25..1b09b3c 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
@@ -23,6 +23,31 @@
 	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
 };
 
+&vendor {
+	mtp_batterydata: qcom,battery-data {
+		qcom,batt-id-range-pct = <15>;
+		#include "batterydata-itech-3000mah.dtsi"
+		#include "batterydata-ascent-3450mAh.dtsi"
+	};
+};
+
+&qpnp_fg {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+&pmi_haptic{
+	qcom,lra-auto-res-mode="qwd";
+	qcom,lra-high-z="opt1";
+	qcom,lra-res-cal-period = <0>;
+	qcom,wave-play-rate-us = <4165>;
+};
+
+&qpnp_smbcharger {
+	qcom,battery-data = <&mtp_batterydata>;
+	qcom,chg-led-sw-controls;
+	qcom,chg-led-support;
+};
+
 &int_codec {
 	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
index b80583e..e3a5b4a 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
@@ -25,3 +25,75 @@
 	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
 };
 
+&int_codec {
+	status = "disabled";
+};
+
+&pmic_analog_codec {
+	status = "disabled";
+};
+
+&wsa881x_i2c_f {
+	status = "disabled";
+};
+
+&wsa881x_i2c_45 {
+	status = "disabled";
+};
+
+&cdc_pri_mi2s_gpios {
+	status = "disabled";
+};
+
+&wsa881x_analog_vi_gpio {
+	status = "disabled";
+};
+
+&wsa881x_analog_clk_gpio {
+	status = "disabled";
+};
+
+&wsa881x_analog_reset_gpio {
+	status = "disabled";
+};
+
+&cdc_comp_gpios {
+	status = "disabled";
+};
+
+&slim_msm {
+	status = "okay";
+};
+
+&dai_slim {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
+
+&wcd9335 {
+	status = "okay";
+};
+
+&cdc_us_euro_sw {
+	status = "okay";
+};
+
+&cdc_quin_mi2s_gpios {
+	status = "okay";
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&ext_codec {
+	qcom,model = "msm8953-tasha-snd-card";
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
index f82b68d..14c4d2b 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
@@ -231,8 +231,8 @@
 			qcom,gpu-pwrlevel@5 {
 				reg = <5>;
 				qcom,gpu-freq = <216000000>;
-				qcom,bus-freq = <1>;
-				qcom,bus-min = <1>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <2>;
 				qcom,bus-max = <4>;
 			};
 
@@ -240,7 +240,7 @@
 			qcom,gpu-pwrlevel@6 {
 				reg = <6>;
 				qcom,gpu-freq = <133300000>;
-				qcom,bus-freq = <1>;
+				qcom,bus-freq = <3>;
 				qcom,bus-min = <1>;
 				qcom,bus-max = <4>;
 			};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ipc.dts b/arch/arm64/boot/dts/qcom/msm8953-ipc.dts
index 89a54af..43c6441 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ipc.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ipc.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,11 +15,11 @@
 
 #include "msm8953.dtsi"
 #include "msm8953-ipc.dtsi"
-
+#include "pmi8950.dtsi"
+#include "msm8953-pmi8950.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 IPC";
 	compatible = "qcom,msm8953-ipc", "qcom,msm8953", "qcom,ipc";
 	qcom,board-id= <12 0>;
 	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
 };
-
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi b/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
index f7671dc..7bc181c 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
@@ -25,6 +25,7 @@
 #include "dsi-panel-lt8912-1080p-video.dtsi"
 #include "dsi-panel-hx8399c-fhd-plus-video.dtsi"
 #include "dsi-panel-hx83100a-800p-video.dtsi"
+#include "dsi-panel-boent51021-1200p-video.dtsi"
 
 &soc {
 	dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -75,7 +76,13 @@
 		23 1e 08 09 05 03 04 a0
 		23 1a 08 09 05 03 04 a0];
 	qcom,esd-check-enabled;
-	qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+	qcom,mdss-dsi-panel-status-value = <0x1c>;
+	qcom,mdss-dsi-panel-on-check-value = <0x1c>;
+	qcom,mdss-dsi-panel-status-read-length = <1>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
 
 };
 
@@ -86,7 +93,13 @@
 		23 1e 08 09 05 03 04 a0
 		23 1a 08 09 05 03 04 a0];
 	qcom,esd-check-enabled;
-	qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+	qcom,mdss-dsi-panel-status-value = <0x1c>;
+	qcom,mdss-dsi-panel-on-check-value = <0x1c>;
+	qcom,mdss-dsi-panel-status-read-length = <1>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
 };
 
 &dsi_r69006_1080p_video {
@@ -111,6 +124,12 @@
 		24 1f 08 09 05 03 04 a0
 		24 1f 08 09 05 03 04 a0
 		24 1c 08 09 05 03 04 a0];
+	qcom,mdss-dsi-h-front-porch = <52>;
+	qcom,mdss-dsi-h-back-porch = <48>;
+	qcom,mdss-dsi-h-pulse-width = <8>;
+	qcom,mdss-dsi-v-back-porch = <8>;
+	qcom,mdss-dsi-v-front-porch = <8>;
+	qcom,mdss-dsi-v-pulse-width = <4>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -157,3 +176,11 @@
 		1f 1c 05 06 03 03 04 a0
 		1f 10 05 06 03 03 04 a0];
 };
+
+&dsi_boent51021_1200p_video {
+	qcom,mdss-dsi-panel-timings-phy-v2 = [25 20 08 0a 06 03 04 a0
+		25 20 08 0a 06 03 04 a0
+		25 20 08 0a 06 03 04 a0
+		25 20 08 0a 06 03 04 a0
+		25 1d 08 0a 06 03 04 a0];
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
index c6ae512..00614b2 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
@@ -21,7 +21,7 @@
 	qcom,board-id = <8 0>;
 };
 
-/{
+&vendor {
 	mtp_batterydata: qcom,battery-data {
 		qcom,batt-id-range-pct = <15>;
 		#include "batterydata-itech-3000mah.dtsi"
@@ -33,6 +33,13 @@
 	qcom,battery-data = <&mtp_batterydata>;
 };
 
+&pmi_haptic{
+	qcom,lra-auto-res-mode="qwd";
+	qcom,lra-high-z="opt1";
+	qcom,lra-res-cal-period = <0>;
+	qcom,wave-play-rate-us = <4165>;
+};
+
 &qpnp_smbcharger {
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
index 97c6db3..539ac59 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
@@ -37,6 +37,13 @@
 	qcom,battery-data = <&mtp_batterydata>;
 };
 
+&pmi_haptic{
+	qcom,lra-auto-res-mode="qwd";
+	qcom,lra-high-z="opt1";
+	qcom,lra-res-cal-period = <0>;
+	qcom,wave-play-rate-us = <4165>;
+};
+
 &qpnp_smbcharger {
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
index cc3c392..e8de4ef 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
@@ -1731,5 +1731,19 @@
 				bias-disable;
 			};
 		};
+
+		ssusb_mode_sel: ssusb_mode_sel {
+			mux {
+				pins = "gpio12";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio12";
+				drive-strength = <2>;
+				bias-disable;
+				input-disable;
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi b/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi
index cb8cdf2..1558010 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi
@@ -88,8 +88,8 @@
 				<&apps_iommu 0x82c 0x01>,
 				<&apps_iommu 0x821 0x10>;
 			buffer-types = <0xfff>;
-			virtual-addr-pool = <0x5dc00000 0x7f000000
-				0xdcc00000 0x1000000>;
+			virtual-addr-pool = <0x79000000 0x28000000
+				0xa1000000 0xc9000000>;
 		};
 
 		secure_bitstream_cb {
@@ -102,7 +102,7 @@
 				<&apps_iommu 0x926 0x0>,
 				<&apps_iommu 0x929 0x2>;
 			buffer-types = <0x241>;
-			virtual-addr-pool = <0x4b000000 0x12c00000>;
+			virtual-addr-pool = <0x51000000 0x28000000>;
 			qcom,secure-context-bank;
 		};
 
@@ -113,7 +113,7 @@
 				<&apps_iommu 0x910 0x0>,
 				<&apps_iommu 0x92c 0x0>;
 			buffer-types = <0x106>;
-			virtual-addr-pool = <0x25800000 0x25800000>;
+			virtual-addr-pool = <0x29000000 0x28000000>;
 			qcom,secure-context-bank;
 		};
 
@@ -125,7 +125,7 @@
 				<&apps_iommu 0x925 0x8>,
 				<&apps_iommu 0x928 0x0>;
 			buffer-types = <0x480>;
-			virtual-addr-pool = <0x1000000 0x24800000>;
+			virtual-addr-pool = <0x1000000 0x28000000>;
 			qcom,secure-context-bank;
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index cd0afec..c256518 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -17,6 +17,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
 #include <dt-bindings/clock/msm-clocks-8953.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953";
@@ -29,9 +30,21 @@
 		bootargs = "core_ctl_disable_cpumask=0-7 kpti=0";
 	};
 
+	vendor: vendor {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0xffffffff>;
+		compatible = "simple-bus";
+	};
+
 	firmware: firmware {
 		android {
 			compatible = "android,firmware";
+			vbmeta {
+				compatible = "android,vbmeta";
+				parts = "vbmeta,boot,system,vendor,dtbo,recovery";
+			};
+
 			fstab {
 				compatible = "android,fstab";
 				vendor {
@@ -39,7 +52,7 @@
 					dev = "/dev/block/platform/soc/7824900.sdhci/by-name/vendor";
 					type = "ext4";
 					mnt_flags = "ro,barrier=1,discard";
-					fsmgr_flags = "wait";
+					fsmgr_flags = "wait,avb";
 					status = "ok";
 				};
 				system {
@@ -47,7 +60,7 @@
 					dev = "/dev/block/platform/soc/7824900.sdhci/by-name/system";
 					type = "ext4";
 					mnt_flags = "ro,barrier=1,discard";
-					fsmgr_flags = "wait";
+					fsmgr_flags = "wait,avb";
 					status = "ok";
 				};
 
@@ -68,7 +81,7 @@
 
 		modem_mem: modem_region@0 {
 			compatible = "removed-dma-pool";
-			no-map-fixup;
+			no-map;
 			reg = <0x0 0x86c00000 0x0 0x6a00000>;
 		};
 
@@ -96,7 +109,7 @@
 			compatible = "shared-dma-pool";
 			reusable;
 			alignment = <0 0x400000>;
-			size = <0 0x09800000>;
+			size = <0 0x0b400000>;
 		};
 
 		qseecom_mem: qseecom_region@0 {
@@ -142,7 +155,7 @@
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			reusable;
-			size = <0 0x2400000>;
+			size = <0x400000>;
 		};
 	};
 
@@ -168,7 +181,18 @@
 		spi3 = &spi_3;
 	};
 
-	soc: soc { };
+	soc: soc {
+		/*
+		 * The ordering of these devices is important to boot time
+		 * for iot projects.
+		 */
+		smem: qcom,smem@86300000 {};
+		rpm_bus: qcom,rpm-smd {};
+		clock_gcc: qcom,gcc@1800000 {};
+		ad_hoc_bus: ad-hoc-bus@580000 {};
+		tlmm: pinctrl@1000000 {};
+		sdhc_1: sdhci@7824900 {};
+	};
 
 };
 
@@ -1218,7 +1242,7 @@
 		reg-names = "wdt-base";
 		interrupts = <0 3 0>, <0 4 0>;
 		qcom,bark-time = <11000>;
-		qcom,pet-time = <10000>;
+		qcom,pet-time = <9360>;
 		qcom,ipi-ping;
 		qcom,wakeup-enable;
 		qcom,scandump-size = <0x40000>;
@@ -1440,8 +1464,10 @@
 
 	sdhc_1: sdhci@7824900 {
 		compatible = "qcom,sdhci-msm";
-		reg = <0x7824900 0x500>, <0x7824000 0x800>, <0x7824e00 0x200>;
-		reg-names = "hc_mem", "core_mem", "cmdq_mem";
+		reg = <0x7824900 0x500>, <0x7824000 0x800>, <0x7824e00 0x200>,
+			<0x0119d000 0x4>;
+		reg-names = "hc_mem", "core_mem", "cmdq_mem",
+				"tlmm_mem";
 
 		interrupts = <0 123 0>, <0 138 0>;
 		interrupt-names = "hc_irq", "pwr_irq";
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 338f82a..2c93de7 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -326,8 +326,8 @@
 		blsp2_spi5: spi@075ba000{
 			compatible = "qcom,spi-qup-v2.2.1";
 			reg = <0x075ba000 0x600>;
-			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>,
+			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
 				 <&gcc GCC_BLSP2_AHB_CLK>;
 			clock-names = "core", "iface";
 			pinctrl-names = "default", "sleep";
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index af29080..5204137 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -101,12 +101,13 @@
 			reg = <0x3100 0x100>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			interrupts = <0x0 0x31 0x0 IRQ_TYPE_NONE>;
+			interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
 			interrupt-names = "eoc-int-en-set";
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1800>;
 			qcom,vadc-poll-eoc;
 			qcom,pmic-revid = <&pm8916_revid>;
+			#thermal-sensor-cells = <1>;
 
 			chan@8 {
 				label = "die_temp";
@@ -145,11 +146,12 @@
 		pm8916_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
-			interrupts = <0x0 0x24 0x0 IRQ_TYPE_NONE>;
+			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
 			label = "pm8916_tz";
 			qcom,channel-num = <8>;
 			qcom,threshold-set = <0>;
 			qcom,temp_alarm-vadc = <&pm8916_vadc>;
+			#thermal-sensor-cells = <0>;
 		};
 
 		pm8916_adc_tm: vadc@3400 {
@@ -157,9 +159,9 @@
 			reg = <0x3400 0x100>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			interrupts = <0x0 0x34 0x0 IRQ_TYPE_NONE>,
-				     <0x0 0x34 0x3 IRQ_TYPE_NONE>,
-				     <0x0 0x34 0x4 IRQ_TYPE_NONE>;
+			interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>,
+				     <0x0 0x34 0x3 IRQ_TYPE_EDGE_RISING>,
+				     <0x0 0x34 0x4 IRQ_TYPE_EDGE_RISING>;
 			interrupt-names = "eoc-int-en-set",
 					  "high-thr-en-set",
 					  "low-thr-en-set";
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index 3159159..d175b80 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -44,8 +44,6 @@
 			interrupt-names = "eoc-int-en-set";
 			qcom,adc-vdd-reference = <1875>;
 			qcom,adc-full-scale-code = <0x70e4>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&quiet_therm_default &smb_therm_default>;
 
 			chan@0 {
 				label = "ref_gnd";
@@ -291,35 +289,20 @@
 		pmi632_gpios: pinctrl@c000 {
 			compatible = "qcom,spmi-gpio";
 			reg = <0xc000 0x800>;
-			interrupts = <0x2 0xc1 0 IRQ_TYPE_NONE>,
+			interrupts = <0x2 0xc0 0 IRQ_TYPE_NONE>,
+					<0x2 0xc1 0 IRQ_TYPE_NONE>,
 					<0x2 0xc2 0 IRQ_TYPE_NONE>,
 					<0x2 0xc3 0 IRQ_TYPE_NONE>,
 					<0x2 0xc4 0 IRQ_TYPE_NONE>,
 					<0x2 0xc5 0 IRQ_TYPE_NONE>,
 					<0x2 0xc6 0 IRQ_TYPE_NONE>,
 					<0x2 0xc7 0 IRQ_TYPE_NONE>;
-			interrupt-names = "pmi632_gpio2", "pmi632_gpio3",
-					"pmi632_gpio4", "pmi632_gpio5",
-					"pmi632_gpio6", "pmi632_gpio7",
-					"pmi632_gpio8";
+			interrupt-names = "pmi632_gpio1", "pmi632_gpio2",
+					"pmi632_gpio3", "pmi632_gpio4",
+					"pmi632_gpio5", "pmi632_gpio6",
+					"pmi632_gpio7", "pmi632_gpio8";
 			gpio-controller;
 			#gpio-cells = <2>;
-			qcom,gpios-disallowed = <1>;
-
-			quiet_therm {
-				quiet_therm_default: quiet_therm_default {
-					pins = "gpio3";
-					bias-high-impedance;
-				};
-			};
-
-			smb_therm {
-				smb_therm_default: smb_therm_default {
-					pins = "gpio4";
-					bias-high-impedance;
-				};
-			};
-
 		};
 
 		pmi632_charger: qcom,qpnp-smb5 {
@@ -332,6 +315,12 @@
 			dpdm-supply = <&qusb_phy>;
 			qcom,auto-recharge-soc = <98>;
 			qcom,chg-vadc = <&pmi632_vadc>;
+			qcom,flash-disable-soc = <10>;
+			qcom,sw-jeita-enable;
+			qcom,step-charging-enable;
+			qcom,hw-die-temp-mitigation;
+			qcom,hw-connector-mitigation;
+			qcom,connector-internal-pull-kohm = <100>;
 
 			qcom,thermal-mitigation
 				= <3000000 2500000 2000000 1500000
@@ -408,7 +397,7 @@
 				interrupts =
 					<0x2 0x13 0x0 IRQ_TYPE_EDGE_BOTH>,
 					<0x2 0x13 0x1 IRQ_TYPE_EDGE_BOTH>,
-					<0x2 0x13 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x2 IRQ_TYPE_EDGE_RISING>,
 					<0x2 0x13 0x3 IRQ_TYPE_EDGE_BOTH>,
 					<0x2 0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
 					<0x2 0x13 0x5 IRQ_TYPE_EDGE_BOTH>,
@@ -541,6 +530,16 @@
 			compatible = "qcom,msm-bcl-soc";
 			#thermal-sensor-cells = <0>;
 		};
+
+		pmi632_pbs_client3: qcom,pbs@7400 {
+			compatible = "qcom,qpnp-pbs";
+			reg = <0x7400 0x100>;
+		};
+
+		pmi632_sdam7: qcom,sdam@b600 {
+			compatible = "qcom,spmi-sdam";
+			reg = <0xb600 0x100>;
+		};
 	};
 
 	pmi632_3: qcom,pmi632@3 {
@@ -553,7 +552,7 @@
 			compatible = "qcom,qpnp-vibrator-ldo";
 			reg = <0x5700 0x100>;
 			qcom,vib-ldo-volt-uv = <3000000>;
-			qcom,vib-overdrive-volt-uv = <3544000>;
+			qcom,disable-overdrive;
 		};
 
 		pmi632_pwm: qcom,pwms@b300 {
@@ -561,6 +560,36 @@
 			reg = <0xb300 0x500>;
 			reg-names = "lpg-base";
 			#pwm-cells = <2>;
+			nvmem-names = "ppg_sdam";
+			nvmem = <&pmi632_sdam7>;
+			qcom,pbs-client = <&pmi632_pbs_client3>;
+			qcom,lut-sdam-base = <0x80>;
+			qcom,lut-patterns = <0 0 0 14 28 42 56 70 84 100
+						100 84 70 56 42 28 14 0 0 0>;
+			lpg@1 {
+				qcom,lpg-chan-id = <1>;
+				qcom,ramp-step-ms = <200>;
+				qcom,ramp-low-index = <0>;
+				qcom,ramp-high-index = <19>;
+				qcom,ramp-pattern-repeat;
+				qcom,lpg-sdam-base = <0x48>;
+			};
+			lpg@2 {
+				qcom,lpg-chan-id = <2>;
+				qcom,ramp-step-ms = <200>;
+				qcom,ramp-low-index = <0>;
+				qcom,ramp-high-index = <19>;
+				qcom,ramp-pattern-repeat;
+				qcom,lpg-sdam-base = <0x56>;
+			};
+			lpg@3 {
+				qcom,lpg-chan-id = <3>;
+				qcom,ramp-step-ms = <200>;
+				qcom,ramp-low-index = <0>;
+				qcom,ramp-high-index = <19>;
+				qcom,ramp-pattern-repeat;
+				qcom,lpg-sdam-base = <0x64>;
+			};
 		};
 
 		pmi632_rgb: qcom,leds@d000 {
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index 8797ea8..fa93918 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -587,13 +587,15 @@
 			};
 		};
 
-		pmi_haptic: qcom,haptic@c000 {
+		pmi_haptic: qcom,haptics@c000 {
 			compatible = "qcom,qpnp-haptics";
 			reg = <0xc000 0x100>;
 			interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>,
 					<0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
 			interrupt-names = "hap-sc-irq", "hap-play-irq";
 			qcom,pmic-revid = <&pmi8950_revid>;
+			vcc_pon-supply = <&pon_perph_reg>;
+			qcom,int-pwm-freq-khz = <505>;
 			qcom,play-mode = "direct";
 			qcom,wave-play-rate-us = <5263>;
 			qcom,actuator-type = <0>;
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-base.dts b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-base.dts
new file mode 100644
index 0000000..e08acea
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-base.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "qcs605-lc.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 LC CDP Base SoC";
+	compatible = "qcom,qcs605";
+	qcom,board-id = <1 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-overlay.dts
new file mode 100644
index 0000000..5987fb3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-overlay.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "qcs605-lc-cdp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QC605 LC Groot + PM8005 CDP";
+	compatible = "qcom,qcs605-cdp", "qcom,qcs605", "qcom,cdp";
+	qcom,msm-id = <347 0x0>;
+	qcom,board-id = <1 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dts b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dts
new file mode 100644
index 0000000..753b496
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "qcs605-lc.dtsi"
+#include "qcs605-lc-cdp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QC605 LC Groot + PM8005 CDP";
+	compatible = "qcom,qcs605-cdp", "qcom,qcs605", "qcom,cdp";
+	qcom,board-id = <1 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dtsi
new file mode 100644
index 0000000..e32128c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dtsi
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "qcs605-lc-pmic-overlay.dtsi"
+
+&qupv3_se9_2uart {
+	status = "disabled";
+};
+
+&qupv3_se12_2uart {
+	status = "ok";
+};
+
+&qupv3_se8_spi {
+	status = "disabled";
+};
+
+&sdhc_1 {
+	vdd-supply = <&pm660_l19>;
+	qcom,vdd-voltage-level = <2960000 2960000>;
+	qcom,vdd-current-level = <0 570000>;
+
+	vdd-io-supply = <&pm660_l8>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <0 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on  &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+	status = "ok";
+};
+
+&tlmm {
+	sdc2_cd_on: cd_on {
+		mux {
+			pins = "gpio116";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio116";
+			drive-strength = <2>;
+			bias-pull-up;
+			};
+		};
+
+	sdc2_cd_off: cd_off {
+		mux {
+			pins = "gpio116";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio116";
+			drive-strength = <2>;
+			bias-disable;
+			};
+	};
+};
+
+&sdhc_2 {
+	/* VDD external regulator is enabled/disabled by pm660_l18 regulator */
+	vdd-io-supply = <&pm660_l18>;
+	qcom,vdd-io-voltage-level = <1800000 2960000>;
+	qcom,vdd-io-current-level = <0 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+	cd-gpios = <&tlmm 116 0x1>;
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
index f8dde39..7e15752 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
@@ -212,6 +212,11 @@
 	vdd_gfx-supply = <&pm8005_s3_level>;
 };
 
+&gpu_cx_gdsc {
+	/delete-property/ parent-supply;
+	parent-supply = <&pm8005_s1_level>;
+};
+
 &gpu_gx_gdsc {
 	/delete-property/ parent-supply;
 	parent-supply = <&pm8005_s3_level>;
diff --git a/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi b/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
index 382ba65..8efb7c1 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -102,7 +102,8 @@
 		pm8005_s1_level: regulator-pm8005-s1-level {
 			regulator-name = "pm8005_s1_level";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-min-microvolt
+				= <RPMH_REGULATOR_LEVEL_RETENTION>;
 			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
 			qcom,min-dropout-voltage-level = <(-1)>;
 		};
@@ -110,7 +111,8 @@
 		pm8005_s1_level_ao: regulator-pm8005-s1-level-ao {
 			regulator-name = "pm8005_s1_level_ao";
 			qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
-			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-min-microvolt
+				= <RPMH_REGULATOR_LEVEL_RETENTION>;
 			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
 			qcom,min-dropout-voltage-level = <(-1)>;
 		};
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index be88a5d..61a812c 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -19,39 +19,39 @@
 };
 
 &pil_modem_mem {
-	reg = <0 0x8b000000 0 0x3e00000>;
+	reg = <0 0x8b000000 0 0x3100000>;
 };
 
 &pil_video_mem {
-	reg = <0 0x8ee00000 0 0x500000>;
+	reg = <0 0x8e100000 0 0x500000>;
 };
 
 &wlan_msa_mem {
-	reg = <0 0x8f300000 0 0x100000>;
+	reg = <0 0x8e600000 0 0x100000>;
 };
 
 &pil_cdsp_mem {
-	reg = <0 0x8f400000 0 0x800000>;
+	reg = <0 0x8e700000 0 0x800000>;
 };
 
 &pil_mba_mem {
-	reg = <0 0x8fc00000 0 0x200000>;
+	reg = <0 0x8ef00000 0 0x200000>;
 };
 
 &pil_adsp_mem {
-	reg = <0 0x8fe00000 0 0x1e00000>;
+	reg = <0 0x8f100000 0 0x1e00000>;
 };
 
 &pil_ipa_fw_mem {
-	reg = <0 0x91c00000 0 0x10000>;
+	reg = <0 0x90f00000 0 0x10000>;
 };
 
 &pil_ipa_gsi_mem {
-	reg = <0 0x91c10000 0 0x5000>;
+	reg = <0 0x90f10000 0 0x5000>;
 };
 
 &pil_gpu_mem {
-	reg = <0 0x91c15000 0 0x2000>;
+	reg = <0 0x90f15000 0 0x2000>;
 };
 
 &adsp_mem {
@@ -90,8 +90,8 @@
 	lmh-dcvs-00 {
 		trips {
 			active-config {
-				temperature = <105000>;
-				hysteresis = <40000>;
+				temperature = <100000>;
+				hysteresis = <35000>;
 			};
 		};
 	};
@@ -99,8 +99,8 @@
 	lmh-dcvs-01 {
 		trips {
 			active-config {
-				temperature = <105000>;
-				hysteresis = <40000>;
+				temperature = <100000>;
+				hysteresis = <35000>;
 			};
 		};
 	};
@@ -108,4 +108,276 @@
 
 &msm_gpu {
 	/delete-node/qcom,gpu-mempools;
+	/delete-node/qcom,gpu-pwrlevel-bins;
+
+	qcom,gpu-pwrlevel-bins {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible="qcom,gpu-pwrlevel-bins";
+
+		qcom,gpu-pwrlevels-0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <0>;
+
+			qcom,initial-pwrlevel = <4>;
+			qcom,ca-target-pwrlevel = <5>;
+
+			/* TURBO_L1 */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <780000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <10>;
+				qcom,bus-max = <11>;
+			};
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <750000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <9>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM_L1 */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <565000000>;
+				qcom,bus-freq = <9>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS_L1 */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <430000000>;
+				qcom,bus-freq = <8>;
+				qcom,bus-min = <7>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <355000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <8>;
+			};
+
+			/* LOW SVS */
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <267000000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <7>;
+			};
+
+			/* MIN SVS */
+			qcom,gpu-pwrlevel@7 {
+				reg = <7>;
+				qcom,gpu-freq = <180000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <4>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@8 {
+				reg = <8>;
+				qcom,gpu-freq = <0>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+		qcom,gpu-pwrlevels-1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <146>;
+
+			qcom,initial-pwrlevel = <3>;
+			qcom,ca-target-pwrlevel = <4>;
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <700000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <9>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM_L1 */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <565000000>;
+				qcom,bus-freq = <9>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS_L1 */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <430000000>;
+				qcom,bus-freq = <8>;
+				qcom,bus-min = <7>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <355000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <8>;
+			};
+
+			/* LOW SVS */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <267000000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <7>;
+			};
+
+			/* MIN SVS */
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <180000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <4>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@7 {
+				reg = <7>;
+				qcom,gpu-freq = <0>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+		qcom,gpu-pwrlevels-2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <163>;
+
+			qcom,initial-pwrlevel = <4>;
+			qcom,ca-target-pwrlevel = <5>;
+
+			/* TURBO_L1 */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <780000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <10>;
+				qcom,bus-max = <11>;
+			};
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <750000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <9>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM_L1 */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <565000000>;
+				qcom,bus-freq = <9>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS_L1 */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <430000000>;
+				qcom,bus-freq = <8>;
+				qcom,bus-min = <7>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <355000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <8>;
+			};
+
+			/* LOW SVS */
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <267000000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <7>;
+			};
+
+			/* MIN SVS */
+			qcom,gpu-pwrlevel@7 {
+				reg = <7>;
+				qcom,gpu-freq = <180000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <4>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@8 {
+				reg = <8>;
+				qcom,gpu-freq = <0>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sda429.dts b/arch/arm64/boot/dts/qcom/sda429.dts
new file mode 100644
index 0000000..6a26f23
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda429.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sda429.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA429 CDP";
+	compatible = "qcom,sda429-cdp", "qcom,sda429", "qcom,cdp";
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+	qcom.pmic-name = "PMI632";
+};
diff --git a/arch/arm64/boot/dts/qcom/sda439.dts b/arch/arm64/boot/dts/qcom/sda439.dts
new file mode 100644
index 0000000..a124c75
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda439.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sda439.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA439";
+	compatible = "qcom,sda439";
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+	qcom.pmic-name = "PMI632";
+};
diff --git a/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts
index c907977..f20c2ba 100644
--- a/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts
@@ -14,8 +14,8 @@
 /dts-v1/;
 
 #include "sda450.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
 #include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA450 + PMI632 MTP S3";
diff --git a/arch/arm64/boot/dts/qcom/sda670-camera-sensor-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda670-camera-sensor-hdk.dtsi
new file mode 100644
index 0000000..484ed64
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-camera-sensor-hdk.dtsi
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	com,camera-flash@0 {
+		cell-index = <0>;
+		reg = <0x00 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash0 &pm660l_flash1>;
+		torch-source = <&pm660l_torch0 &pm660l_torch1>;
+		switch-source = <&pm660l_switch0>;
+		status = "ok";
+	};
+
+	qcom,camera-flash@1 {
+		cell-index = <1>;
+		reg = <0x01 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash0 &pm660l_flash1>;
+		torch-source = <&pm660l_torch0 &pm660l_torch1>;
+		switch-source = <&pm660l_switch0>;
+		status = "ok";
+	};
+
+	led_flash_front: qcom,camera-flash@2 {
+		cell-index = <2>;
+		reg = <0x02 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash2>;
+		torch-source = <&pm660l_torch2>;
+		switch-source = <&pm660l_switch1>;
+		status = "ok";
+	};
+
+	gpio-regulator@0 {
+		compatible = "regulator-fixed";
+		reg = <0x00 0x00>;
+		regulator-name = "actuator_regulator";
+		regulator-min-microvolt = <2800000>;
+		regulator-max-microvolt = <2800000>;
+		regulator-enable-ramp-delay = <100>;
+		enable-active-high;
+		gpio = <&tlmm 27 0>;
+	};
+
+	camera_ldo: gpio-regulator@2 {
+		compatible = "regulator-fixed";
+		reg = <0x02 0x00>;
+		regulator-name = "camera_ldo";
+		regulator-min-microvolt = <1352000>;
+		regulator-max-microvolt = <1352000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 3 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_dvdd_en_default>;
+		vin-supply = <&pm660_s6>;
+	};
+
+	camera_rear_ldo: gpio-regulator@1 {
+		compatible = "regulator-fixed";
+		reg = <0x01 0x00>;
+		regulator-name = "camera_rear_ldo";
+		regulator-min-microvolt = <1352000>;
+		regulator-max-microvolt = <1352000>;
+		regulator-enable-ramp-delay = <135>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 4 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_rear_dvdd_en_default>;
+		vin-supply = <&pm660_s6>;
+	};
+
+	camera_vio_ldo: gpio-regulator@3 {
+		compatible = "regulator-fixed";
+		reg = <0x03 0x00>;
+		regulator-name = "camera_vio_ldo";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 29 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cam_sensor_rear_vio>;
+		vin-supply = <&pm660_s4>;
+	};
+
+	camera_vana_ldo: gpio-regulator@4 {
+		compatible = "regulator-fixed";
+		reg = <0x04 0x00>;
+		regulator-name = "camera_vana_ldo";
+		regulator-min-microvolt = <2850000>;
+		regulator-max-microvolt = <2850000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 8 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cam_sensor_rear_vana>;
+		vin-supply = <&pm660l_bob>;
+	};
+};
+
+&cam_cci {
+	qcom,cam-res-mgr {
+		compatible = "qcom,cam-res-mgr";
+		status = "ok";
+	};
+
+	qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	qcom,actuator@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	qcom,actuator@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	ois_rear: qcom,ois@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,ois";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+		status = "disabled";
+	};
+
+	qcom,eeprom@0 {
+		cell-index = <0>;
+		reg = <0>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-load-current = <0 80000 105000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,eeprom@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-load-current = <105000 0 80000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,eeprom@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-load-current = <105000 0 80000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x0>;
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear>;
+		ois-src = <&ois_rear>;
+		eeprom-src = <&eeprom_rear>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x1>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear_aux>;
+		actuator-src = <&actuator_rear_aux>;
+		eeprom-src = <&eeprom_rear_aux>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@2 {
+		cell-index = <2>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x02>;
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		actuator-src = <&actuator_front>;
+		led-flash-src = <&led_flash_front>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-hdk-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-hdk-overlay.dts
new file mode 100644
index 0000000..a7299a4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-hdk-overlay.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "sda670-hdk.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 HDK";
+	compatible = "qcom,sda670-hdk", "qcom,sda670", "qcom,hdk";
+	qcom,board-id = <0x01001F 0x00>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-hdk.dts b/arch/arm64/boot/dts/qcom/sda670-hdk.dts
new file mode 100644
index 0000000..ed9eec9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-hdk.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sda670.dtsi"
+#include "sda670-hdk.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 HDK";
+	compatible = "qcom,sda670-hdk", "qcom,sda670", "qcom,hdk";
+	qcom,board-id = <0x01001F 0x00>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi
new file mode 100644
index 0000000..ef8667b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi
@@ -0,0 +1,106 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-qrd.dtsi"
+#include "sdm670-external-codec.dtsi"
+#include "sda670-camera-sensor-hdk.dtsi"
+
+&dsi_dual_nt36850_truly_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&qrd_batterydata {
+	#include "fg-gen3-batterydata-mlp466076-3250mah.dtsi"
+};
+
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_hx8399_truly_cmd_display {
+	qcom,dsi-display-active;
+};
+
+&ufsphy_mem {
+	compatible = "qcom,ufs-phy-qmp-v3";
+
+	vdda-phy-supply = <&pm660l_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm660_l1>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_mem {
+	vdd-hba-supply = <&ufs_phy_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm660l_l4>;
+	vcc-voltage-level = <2960000 2960000>;
+	vccq2-supply = <&pm660_l8>;
+	vcc-max-microamp = <600000>;
+	vccq2-max-microamp = <600000>;
+
+	qcom,vddp-ref-clk-supply = <&pm660_l1>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	status = "ok";
+};
+
+&qusb_phy0 {
+	qcom,qusb-phy-host-init-seq =
+			/* <value reg_offset> */
+			   <0x23 0x210 /* PWR_CTRL1 */
+			    0x03 0x04  /* PLL_ANALOG_CONTROLS_TWO */
+			    0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+			    0x80 0x2c  /* PLL_CMODE */
+			    0x0a 0x184 /* PLL_LOCK_DELAY */
+			    0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+			    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+			    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+			    0x21 0x214 /* PWR_CTRL2 */
+			    0x00 0x220 /* IMP_CTRL1 */
+			    0x58 0x224 /* IMP_CTRL2 */
+			    0x77 0x240 /* TUNE1 */
+			    0x29 0x244 /* TUNE2 */
+			    0xca 0x248 /* TUNE3 */
+			    0x04 0x24c /* TUNE4 */
+			    0x03 0x250 /* TUNE5 */
+			    0x00 0x23c /* CHG_CTRL2 */
+			    0x22 0x210>; /* PWR_CTRL1 */
+	qcom,qusb-phy-init-seq =
+			/* <value reg_offset> */
+			   <0x23 0x210 /* PWR_CTRL1 */
+			    0x03 0x04  /* PLL_ANALOG_CONTROLS_TWO */
+			    0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+			    0x80 0x2c  /* PLL_CMODE */
+			    0x0a 0x184 /* PLL_LOCK_DELAY */
+			    0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+			    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+			    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+			    0x21 0x214 /* PWR_CTRL2 */
+			    0x25 0x220 /* IMP_CTRL1 */
+			    0x58 0x224 /* IMP_CTRL2 */
+			    0x65 0x240 /* TUNE1 */
+			    0x29 0x244 /* TUNE2 */
+			    0xca 0x248 /* TUNE3 */
+			    0x04 0x24c /* TUNE4 */
+			    0x03 0x250 /* TUNE5 */
+			    0x00 0x23c /* CHG_CTRL2 */
+			    0x22 0x210>; /* PWR_CTRL1 */
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670.dtsi b/arch/arm64/boot/dts/qcom/sda670.dtsi
index d19aac3..0861718 100644
--- a/arch/arm64/boot/dts/qcom/sda670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda670.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,3 +17,13 @@
 	model = "Qualcomm Technologies, Inc. SDA670";
 	qcom,msm-id = <337 0x0>;
 };
+
+&soc {
+	qcom,rmnet-ipa {
+		status="disabled";
+	};
+};
+
+&ipa_hw {
+	status="disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-sdxpoorwills.dtsi b/arch/arm64/boot/dts/qcom/sda845-sdxpoorwills.dtsi
index 0b678a8..c4b4a50 100644
--- a/arch/arm64/boot/dts/qcom/sda845-sdxpoorwills.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-sdxpoorwills.dtsi
@@ -32,7 +32,6 @@
 			/* MDM PON conrol*/
 			pins = "gpio10";
 			function = "normal";
-			output-low;
 			power-source = <0>;
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi b/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi
index c76ef2b..19d1370 100644
--- a/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi
@@ -10,7 +10,7 @@
  * GNU General Public License for more details.
  */
 
-&cam_sensor_mclk0_active{
+&cam_sensor_mclk0_active {
 	/* MCLK0 */
 	mux {
 		pins = "gpio13";
@@ -20,7 +20,7 @@
 	config {
 		pins = "gpio13";
 		bias-disable; /* No PULL */
-		drive-strength = <8>; /* 2 MA */
+		drive-strength = <8>; /* 8 MA */
 	};
 };
 
@@ -34,19 +34,75 @@
 	config {
 		pins = "gpio13";
 		bias-pull-down; /* PULL DOWN */
-		drive-strength = <8>; /* 2 MA */
+		drive-strength = <8>; /* 8 MA */
+	};
+};
+
+&cam_sensor_mclk1_active {
+	/* MCLK1 */
+	mux {
+		pins = "gpio14";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio14";
+		bias-disable; /* No PULL */
+		drive-strength = <8>; /* 8 MA */
+	};
+};
+
+&cam_sensor_mclk1_suspend {
+	/* MCLK1 */
+	mux {
+		pins = "gpio14";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio14";
+		bias-pull-down; /* PULL DOWN */
+		drive-strength = <8>; /* 8 MA */
+	};
+};
+
+&cam_sensor_mclk2_active {
+	/* MCLK2 */
+	mux {
+		pins = "gpio15";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio15";
+		bias-disable; /* No PULL */
+		drive-strength = <8>; /* 8 MA */
+	};
+};
+
+&cam_sensor_mclk2_suspend {
+	/* MCLK2 */
+	mux {
+		pins = "gpio15";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio15";
+		bias-pull-down; /* PULL DOWN */
+		drive-strength = <8>; /* 8 MA */
 	};
 };
 
 &cam_sensor_rear_active {
 	/* RESET, AVDD LDO */
 	mux {
-		pins = "gpio8","gpio79";
+		pins = "gpio8", "gpio29";
 		function = "gpio";
 	};
 
 	config {
-		pins = "gpio8","gpio79";
+		pins = "gpio8", "gpio29";
 		bias-disable; /* No PULL */
 		drive-strength = <2>; /* 2 MA */
 	};
@@ -55,43 +111,115 @@
 &cam_sensor_rear_suspend {
 	/* RESET, AVDD LDO */
 	mux {
-		pins = "gpio8","gpio79";
+		pins = "gpio8", "gpio29";
 		function = "gpio";
 	};
 
 	config {
-		pins = "gpio8","gpio79";
+		pins = "gpio8", "gpio29";
 		bias-pull-down; /* PULL DOWN */
 		drive-strength = <2>; /* 2 MA */
 		output-low;
 	};
 };
 
-&cam_sensor_front_active{
-	/* RESET  AVDD_LDO*/
+&cam_sensor_front_active {
+	/* RESET  AVDD_LDO */
 	mux {
-		pins = "gpio26", "gpio8";
+		pins = "gpio26", "gpio12";
 		function = "gpio";
 	};
 
 	config {
-		pins = "gpio26", "gpio8";
+		pins = "gpio26", "gpio12";
 		bias-disable; /* No PULL */
 		drive-strength = <2>; /* 2 MA */
 	};
 };
 
-&cam_sensor_front_suspend{
+&cam_sensor_front_suspend {
 	/* RESET */
 	mux {
-		pins = "gpio26", "gpio8";
+		pins = "gpio26", "gpio12";
 		function = "gpio";
 	};
 
 	config {
-		pins = "gpio26", "gpio8";
+		pins = "gpio26", "gpio12";
 		bias-pull-down; /* PULL DOWN */
 		drive-strength = <2>; /* 2 MA */
 		output-low;
 	};
 };
+
+&cam_sensor_iris_active {
+	/* RESET  AVDD_LDO */
+	mux {
+		pins = "gpio21", "gpio122";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio21", "gpio122";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+&cam_sensor_iris_suspend {
+	/* RESET  AVDD_LDO */
+	mux {
+		pins = "gpio21", "gpio122";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio21", "gpio122";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+		output-low;
+	};
+};
+
+&cam_sensor_rear_vana {
+	/* AVDD_LDO */
+	mux {
+		pins = "gpio7";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio7";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+&cam_res_mgr_active {
+	/* AVDD_LDO */
+	mux {
+		pins = "gpio79";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio79";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+&cam_res_mgr_suspend {
+	/* AVDD_LDO */
+	mux {
+		pins = "gpio79";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio79";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+		output-low;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-svr.dtsi b/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
index ce62781..1062ca6 100644
--- a/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
@@ -14,9 +14,12 @@
 #include "sdm845-pinctrl-overlay.dtsi"
 #include "sda845-svr-pinctrl-overlay.dtsi"
 #include "sdm845-camera-sensor-svr.dtsi"
-#include "smb1355.dtsi"
 #include <dt-bindings/clock/mdss-10nm-pll-clk.h>
 
+&qupv3_se10_i2c {
+#include "smb1355.dtsi"
+};
+
 &vendor {
 	bluetooth: bt_wcn3990 {
 		compatible = "qca,wcn3990";
@@ -249,6 +252,24 @@
 	#cooling-cells = <2>;
 };
 
+&pm8998_l10 {
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+	qcom,init-voltage = <1800000>;
+};
+
+&pm8998_l15 {
+	regulator-min-microvolt = <1504000>;
+	regulator-max-microvolt = <1504000>;
+	qcom,init-voltage = <1504000>;
+};
+
+&pm8998_l16 {
+	regulator-min-microvolt = <3312000>;
+	regulator-max-microvolt = <3312000>;
+	qcom,init-voltage = <3312000>;
+};
+
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm429-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-cdp-overlay.dts
index 93a9ae9..5f0db81 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-cdp-overlay.dts
@@ -14,13 +14,9 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm429-cdp.dtsi"
 
 / {
 	model = "CDP";
 	qcom,board-id = <1 3>;
-	qcom,msm-id = <354 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm429-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-mtp-overlay.dts
index 3a339da..571f0fc 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-mtp-overlay.dts
@@ -14,13 +14,9 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm429-mtp.dtsi"
 
 / {
 	model = "MTP";
 	qcom,board-id = <8 2>;
-	qcom,msm-id = <354 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm429-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-qrd-overlay.dts
index 8abccb7..8e12295 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-qrd-overlay.dts
@@ -14,13 +14,9 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm429-qrd.dtsi"
 
 / {
 	model = "QRD";
 	qcom,board-id = <0xb 3>;
-	qcom,msm-id = <354 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm429.dtsi b/arch/arm64/boot/dts/qcom/sdm429.dtsi
index 19df054..b52ee2d 100644
--- a/arch/arm64/boot/dts/qcom/sdm429.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429.dtsi
@@ -146,6 +146,16 @@
 };
 
 &soc {
+	devfreq_spdm_cpu {
+		status = "disabled";
+	};
+
+	devfreq_spdm_gov {
+		status = "disabled";
+	};
+};
+
+&soc {
 	/delete-node/ qcom,cpu-clock-8939@b111050;
 	clock_cpu: qcom,cpu-clock-8939@b111050 {
 		compatible = "qcom,cpu-clock-sdm429";
diff --git a/arch/arm64/boot/dts/qcom/sdm439-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm439-audio.dtsi
index dba56a4..a6b2371 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-audio.dtsi
@@ -25,6 +25,65 @@
 		qcom,msm-vdd-wsa-switch-voltage = <1800000>;
 		qcom,msm-vdd-wsa-switch-current = <10000>;
 	};
+
+	clock_audio_native: audio_ext_clk_native {
+		status = "disabled";
+		compatible = "qcom,audio-ref-clk";
+		#clock-cells = <1>;
+		qcom,codec-mclk-clk-freq = <11289600>;
+		qcom,audio-ref-clk-gpio = <&tlmm 66 0>;
+		qcom,lpass-mclk-id = "pri_mclk";
+		pinctrl-names = "sleep", "active";
+		pinctrl-0 = <&cdc_mclk2_sleep>;
+		pinctrl-1 = <&cdc_mclk2_active>;
+	};
+};
+
+
+&clock_audio {
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&tasha_mclk_default>;
+	pinctrl-1 = <&tasha_mclk_default>;
+	qcom,audio-ref-clk-gpio = <&pm8953_gpios 1 0>;
+};
+
+&wcd9335 {
+	cdc-vdd-buck-supply = <&dbu1>;
+	qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+	qcom,cdc-vdd-buck-current = <650000>;
+
+	cdc-buck-sido-supply = <&dbu1>;
+	qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+	qcom,cdc-buck-sido-current = <150000>;
+
+	cdc-vdd-tx-h-supply = <&dbu1>;
+	qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+	qcom,cdc-vdd-tx-h-current = <25000>;
+
+	cdc-vdd-rx-h-supply = <&dbu1>;
+	qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+	qcom,cdc-vdd-rx-h-current = <25000>;
+
+	cdc-vdd-px-supply = <&dbu1>;
+	qcom,cdc-vdd-px-voltage = <1800000 1800000>;
+	qcom,cdc-vdd-px-current = <10000>;
+
+	cdc-vdd-mic-bias-supply = <&pm8953_l13>;
+	qcom,cdc-vdd-mic-bias-voltage = <3075000 3075000>;
+	qcom,cdc-vdd-mic-bias-current = <15000>;
+};
+
+&pm8953_gpios {
+	tasha_mclk {
+		tasha_mclk_default: tasha_mclk_default{
+			pins = "gpio1";
+			function = "func1";
+			qcom,drive-strength = <2>;
+			power-source = <0>;
+			bias-disable;
+			output-low;
+		};
+	};
 };
 
 &pm8953_1 {
@@ -77,7 +136,7 @@
 		qcom,cdc-vdd-pa-current = <260000>;
 
 		cdc-vdd-mic-bias-supply = <&pm8953_l13>;
-		qcom,cdc-vdd-mic-bias-voltage = <3125000 3125000>;
+		qcom,cdc-vdd-mic-bias-voltage = <3075000 3075000>;
 		qcom,cdc-vdd-mic-bias-current = <5000>;
 
 		qcom,cdc-mclk-clk-rate = <9600000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-cdp.dtsi
index 5e2c740..eae8c56 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-cdp.dtsi
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/msm-clocks-8952.h>
+
 &cci {
 	actuator0: qcom,actuator@0 {
 		cell-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-mtp.dtsi
index 5e2c740..eae8c56 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-mtp.dtsi
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/msm-clocks-8952.h>
+
 &cci {
 	actuator0: qcom,actuator@0 {
 		cell-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-qrd.dtsi
index c2c9c79..ef0e977 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-qrd.dtsi
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/msm-clocks-8952.h>
+
 &cci {
 	actuator0: qcom,actuator@0 {
 		cell-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm439-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm439-cdp-overlay.dts
index 6d6f99b..87239b9 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm439-cdp-overlay.dts
@@ -14,13 +14,9 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm439-cdp.dtsi"
 
 / {
 	model = "CDP";
 	qcom,board-id = <1 2>;
-	qcom,msm-id = <353 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
index fd66f4b..cef5534 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
@@ -233,12 +233,25 @@
 
 &dsi_hx8399c_hd_vid {
 	/delete-property/ qcom,mdss-dsi-panel-timings;
-	qcom,mdss-dsi-panel-timings-phy-12nm = [08 06 0a 02 00 04 02 08];
+	qcom,mdss-dsi-panel-timings-phy-12nm = [09 06 0a 02 00 05 02 08];
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
 	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
 	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
 	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-status-read-length = <4>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
 };
 
 &dsi_nt35695b_truly_fhd_cmd {
@@ -555,3 +568,27 @@
 	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
 	/delete-node/ qcom,mdss-dsi-display-timings;
 };
+
+&dsi_truly_1080_vid {
+	/delete-property/ qcom,mdss-dsi-panel-timings;
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 02 08 06 0e];
+	qcom,mdss-dsi-t-clk-post = <0x02>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_truly_1080_cmd {
+	/delete-property/ qcom,mdss-dsi-panel-timings;
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 02 08 06 0e];
+	qcom,mdss-dsi-t-clk-post = <0x02>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-ext-audio-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-ext-audio-mtp.dtsi
new file mode 100644
index 0000000..a74fb75
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm439-ext-audio-mtp.dtsi
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&int_codec {
+	status = "disabled";
+};
+
+&wsa881x_i2c_f {
+	status = "disabled";
+};
+
+&wsa881x_i2c_45 {
+	status = "disabled";
+};
+
+&cdc_pri_mi2s_gpios {
+	status = "disabled";
+};
+
+&wsa881x_analog_vi_gpio {
+	status = "disabled";
+};
+
+&wsa881x_analog_clk_gpio {
+	status = "disabled";
+};
+
+&wsa881x_analog_reset_gpio {
+	status = "disabled";
+};
+
+&slim_msm {
+	status = "okay";
+};
+
+&dai_slim {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
+
+&wcd9335 {
+	status = "okay";
+};
+
+&cdc_us_euro_sw {
+	status = "okay";
+};
+
+&cdc_quin_mi2s_gpios {
+	status = "okay";
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&ext_codec {
+	status = "okay";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm439-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm439-external-codec-mtp-overlay.dts
new file mode 100644
index 0000000..37741b2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm439-external-codec-mtp-overlay.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include "sdm439-mtp.dtsi"
+#include "sdm439-external-codec.dtsi"
+
+/ {
+	model = "MTP";
+	qcom,board-id = <8 3>;
+	qcom,msm-id = <353 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-external-codec-mtp.dts b/arch/arm64/boot/dts/qcom/sdm439-external-codec-mtp.dts
new file mode 100644
index 0000000..e2a86bb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm439-external-codec-mtp.dts
@@ -0,0 +1,25 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm439.dtsi"
+#include "sdm439-mtp.dtsi"
+#include "sdm439-external-codec.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM439 Audio Codec MTP";
+	compatible = "qcom,sdm439-mtp", "qcom,sdm439", "qcom,mtp";
+	qcom,board-id = <8 3>;
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-external-codec.dtsi b/arch/arm64/boot/dts/qcom/sdm439-external-codec.dtsi
new file mode 100644
index 0000000..83864ef
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm439-external-codec.dtsi
@@ -0,0 +1,12 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm439-ext-audio-mtp.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm439-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm439-mtp-overlay.dts
index df8a0d7..a7c5f4e 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm439-mtp-overlay.dts
@@ -14,13 +14,9 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm439-mtp.dtsi"
 
 / {
 	model = "MTP";
 	qcom,board-id = <8 1>;
-	qcom,msm-id = <353 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
index 29e0d72..913d079 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
@@ -208,10 +208,441 @@
 
 &dsi_hx8399c_hd_vid {
 	/delete-property/ qcom,mdss-dsi-panel-timings;
-	qcom,mdss-dsi-panel-timings-phy-12nm = [08 06 0a 02 00 04 02 08];
+	qcom,mdss-dsi-panel-timings-phy-12nm = [09 06 0a 02 00 05 02 08];
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
 	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
 	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
 	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-status-read-length = <4>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
+};
+
+&dsi_nt35695b_truly_fhd_cmd {
+	qcom,mdss-dsi-panel-width = <1080>;
+	qcom,mdss-dsi-panel-height = <1920>;
+	qcom,mdss-dsi-h-front-porch = <120>;
+	qcom,mdss-dsi-h-back-porch = <60>;
+	qcom,mdss-dsi-h-pulse-width = <12>;
+	qcom,mdss-dsi-h-sync-skew = <0>;
+	qcom,mdss-dsi-v-back-porch = <2>;
+	qcom,mdss-dsi-v-front-porch = <12>;
+	qcom,mdss-dsi-v-pulse-width = <2>;
+	qcom,mdss-dsi-h-sync-pulse = <0>;
+	qcom,mdss-dsi-h-left-border = <0>;
+	qcom,mdss-dsi-h-right-border = <0>;
+	qcom,mdss-dsi-v-top-border = <0>;
+	qcom,mdss-dsi-v-bottom-border = <0>;
+	qcom,mdss-dsi-panel-framerate = <60>;
+	qcom,mdss-dsi-on-command =
+		[15 01 00 00 10 00 02 ff 20
+		15 01 00 00 00 00 02 fb 01
+		15 01 00 00 00 00 02 00 01
+		15 01 00 00 00 00 02 01 55
+		15 01 00 00 00 00 02 02 45
+		15 01 00 00 00 00 02 03 55
+		15 01 00 00 00 00 02 05 50
+		15 01 00 00 00 00 02 06 a8
+		15 01 00 00 00 00 02 07 ad
+		15 01 00 00 00 00 02 08 0c
+		15 01 00 00 00 00 02 0b aa
+		15 01 00 00 00 00 02 0c aa
+		15 01 00 00 00 00 02 0e b0
+		15 01 00 00 00 00 02 0f b3
+		15 01 00 00 00 00 02 11 28
+		15 01 00 00 00 00 02 12 10
+		15 01 00 00 00 00 02 13 01
+		15 01 00 00 00 00 02 14 4a
+		15 01 00 00 00 00 02 15 12
+		15 01 00 00 00 00 02 16 12
+		15 01 00 00 00 00 02 30 01
+		15 01 00 00 00 00 02 72 11
+		15 01 00 00 00 00 02 58 82
+		15 01 00 00 00 00 02 59 00
+		15 01 00 00 00 00 02 5a 02
+		15 01 00 00 00 00 02 5b 00
+		15 01 00 00 00 00 02 5c 82
+		15 01 00 00 00 00 02 5d 80
+		15 01 00 00 00 00 02 5e 02
+		15 01 00 00 00 00 02 5f 00
+		15 01 00 00 00 00 02 ff 24
+		15 01 00 00 00 00 02 fb 01
+		15 01 00 00 00 00 02 00 01
+		15 01 00 00 00 00 02 01 0b
+		15 01 00 00 00 00 02 02 0c
+		15 01 00 00 00 00 02 03 89
+		15 01 00 00 00 00 02 04 8a
+		15 01 00 00 00 00 02 05 0f
+		15 01 00 00 00 00 02 06 10
+		15 01 00 00 00 00 02 07 10
+		15 01 00 00 00 00 02 08 1c
+		15 01 00 00 00 00 02 09 00
+		15 01 00 00 00 00 02 0a 00
+		15 01 00 00 00 00 02 0b 00
+		15 01 00 00 00 00 02 0c 00
+		15 01 00 00 00 00 02 0d 13
+		15 01 00 00 00 00 02 0e 15
+		15 01 00 00 00 00 02 0f 17
+		15 01 00 00 00 00 02 10 01
+		15 01 00 00 00 00 02 11 0b
+		15 01 00 00 00 00 02 12 0c
+		15 01 00 00 00 00 02 13 89
+		15 01 00 00 00 00 02 14 8a
+		15 01 00 00 00 00 02 15 0f
+		15 01 00 00 00 00 02 16 10
+		15 01 00 00 00 00 02 17 10
+		15 01 00 00 00 00 02 18 1c
+		15 01 00 00 00 00 02 19 00
+		15 01 00 00 00 00 02 1a 00
+		15 01 00 00 00 00 02 1b 00
+		15 01 00 00 00 00 02 1c 00
+		15 01 00 00 00 00 02 1d 13
+		15 01 00 00 00 00 02 1e 15
+		15 01 00 00 00 00 02 1f 17
+		15 01 00 00 00 00 02 20 00
+		15 01 00 00 00 00 02 21 01
+		15 01 00 00 00 00 02 22 00
+		15 01 00 00 00 00 02 23 40
+		15 01 00 00 00 00 02 24 40
+		15 01 00 00 00 00 02 25 6d
+		15 01 00 00 00 00 02 26 40
+		15 01 00 00 00 00 02 27 40
+		15 01 00 00 00 00 02 29 d8
+		15 01 00 00 00 00 02 2a 2a
+		15 01 00 00 00 00 02 4b 03
+		15 01 00 00 00 00 02 4c 11
+		15 01 00 00 00 00 02 4d 10
+		15 01 00 00 00 00 02 4e 01
+		15 01 00 00 00 00 02 4f 01
+		15 01 00 00 00 00 02 50 10
+		15 01 00 00 00 00 02 51 00
+		15 01 00 00 00 00 02 52 80
+		15 01 00 00 00 00 02 53 00
+		15 01 00 00 00 00 02 54 07
+		15 01 00 00 00 00 02 55 25
+		15 01 00 00 00 00 02 56 00
+		15 01 00 00 00 00 02 58 07
+		15 01 00 00 00 00 02 5b 43
+		15 01 00 00 00 00 02 5c 00
+		15 01 00 00 00 00 02 5f 73
+		15 01 00 00 00 00 02 60 73
+		15 01 00 00 00 00 02 63 22
+		15 01 00 00 00 00 02 64 00
+		15 01 00 00 00 00 02 67 08
+		15 01 00 00 00 00 02 68 04
+		15 01 00 00 00 00 02 7a 80
+		15 01 00 00 00 00 02 7b 91
+		15 01 00 00 00 00 02 7c d8
+		15 01 00 00 00 00 02 7d 60
+		15 01 00 00 00 00 02 93 06
+		15 01 00 00 00 00 02 94 06
+		15 01 00 00 00 00 02 8a 00
+		15 01 00 00 00 00 02 9b 0f
+		15 01 00 00 00 00 02 b3 c0
+		15 01 00 00 00 00 02 b4 00
+		15 01 00 00 00 00 02 b5 00
+		15 01 00 00 00 00 02 b6 21
+		15 01 00 00 00 00 02 b7 22
+		15 01 00 00 00 00 02 b8 07
+		15 01 00 00 00 00 02 b9 07
+		15 01 00 00 00 00 02 ba 22
+		15 01 00 00 00 00 02 bd 20
+		15 01 00 00 00 00 02 be 07
+		15 01 00 00 00 00 02 bf 07
+		15 01 00 00 00 00 02 c1 6d
+		15 01 00 00 00 00 02 c4 24
+		15 01 00 00 00 00 02 e3 00
+		15 01 00 00 00 00 02 ec 00
+		15 01 00 00 00 00 02 ff 10
+		15 01 00 00 00 00 02 bb 10
+		15 01 00 00 00 00 02 35 00
+		05 01 00 00 78 00 02 11 00
+		05 01 00 00 78 00 02 29 00];
+	qcom,mdss-dsi-off-command = [05 01 00 00 14
+		00 02 28 00 05 01 00 00 78 00 02 10 00];
+	qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 03 08 06 0e];
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,ulps-enabled;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9c>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+	qcom,mdss-dsi-panel-status-read-length = <1>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
+	/delete-node/ qcom,mdss-dsi-display-timings;
+};
+
+&dsi_nt35695b_truly_fhd_video {
+	qcom,mdss-dsi-panel-width = <1080>;
+	qcom,mdss-dsi-panel-height = <1920>;
+	qcom,mdss-dsi-h-front-porch = <120>;
+	qcom,mdss-dsi-h-back-porch = <60>;
+	qcom,mdss-dsi-h-pulse-width = <12>;
+	qcom,mdss-dsi-h-sync-skew = <0>;
+	qcom,mdss-dsi-h-sync-pulse = <0>;
+	qcom,mdss-dsi-v-back-porch = <2>;
+	qcom,mdss-dsi-v-front-porch = <12>;
+	qcom,mdss-dsi-v-pulse-width = <2>;
+	qcom,mdss-dsi-h-left-border = <0>;
+	qcom,mdss-dsi-h-right-border = <0>;
+	qcom,mdss-dsi-v-top-border = <0>;
+	qcom,mdss-dsi-v-bottom-border = <0>;
+	qcom,mdss-dsi-panel-framerate = <60>;
+	qcom,mdss-dsi-on-command =
+		[15 01 00 00 10 00 02 ff 20
+		15 01 00 00 00 00 02 fb 01
+		15 01 00 00 00 00 02 00 01
+		15 01 00 00 00 00 02 01 55
+		15 01 00 00 00 00 02 02 45
+		15 01 00 00 00 00 02 03 55
+		15 01 00 00 00 00 02 05 50
+		15 01 00 00 00 00 02 06 a8
+		15 01 00 00 00 00 02 07 ad
+		15 01 00 00 00 00 02 08 0c
+		15 01 00 00 00 00 02 0b aa
+		15 01 00 00 00 00 02 0c aa
+		15 01 00 00 00 00 02 0e b0
+		15 01 00 00 00 00 02 0f b3
+		15 01 00 00 00 00 02 11 28
+		15 01 00 00 00 00 02 12 10
+		15 01 00 00 00 00 02 13 01
+		15 01 00 00 00 00 02 14 4a
+		15 01 00 00 00 00 02 15 12
+		15 01 00 00 00 00 02 16 12
+		15 01 00 00 00 00 02 30 01
+		15 01 00 00 00 00 02 72 11
+		15 01 00 00 00 00 02 58 82
+		15 01 00 00 00 00 02 59 00
+		15 01 00 00 00 00 02 5a 02
+		15 01 00 00 00 00 02 5b 00
+		15 01 00 00 00 00 02 5c 82
+		15 01 00 00 00 00 02 5d 80
+		15 01 00 00 00 00 02 5e 02
+		15 01 00 00 00 00 02 5f 00
+		15 01 00 00 00 00 02 ff 24
+		15 01 00 00 00 00 02 fb 01
+		15 01 00 00 00 00 02 00 01
+		15 01 00 00 00 00 02 01 0b
+		15 01 00 00 00 00 02 02 0c
+		15 01 00 00 00 00 02 03 89
+		15 01 00 00 00 00 02 04 8a
+		15 01 00 00 00 00 02 05 0f
+		15 01 00 00 00 00 02 06 10
+		15 01 00 00 00 00 02 07 10
+		15 01 00 00 00 00 02 08 1c
+		15 01 00 00 00 00 02 09 00
+		15 01 00 00 00 00 02 0a 00
+		15 01 00 00 00 00 02 0b 00
+		15 01 00 00 00 00 02 0c 00
+		15 01 00 00 00 00 02 0d 13
+		15 01 00 00 00 00 02 0e 15
+		15 01 00 00 00 00 02 0f 17
+		15 01 00 00 00 00 02 10 01
+		15 01 00 00 00 00 02 11 0b
+		15 01 00 00 00 00 02 12 0c
+		15 01 00 00 00 00 02 13 89
+		15 01 00 00 00 00 02 14 8a
+		15 01 00 00 00 00 02 15 0f
+		15 01 00 00 00 00 02 16 10
+		15 01 00 00 00 00 02 17 10
+		15 01 00 00 00 00 02 18 1c
+		15 01 00 00 00 00 02 19 00
+		15 01 00 00 00 00 02 1a 00
+		15 01 00 00 00 00 02 1b 00
+		15 01 00 00 00 00 02 1c 00
+		15 01 00 00 00 00 02 1d 13
+		15 01 00 00 00 00 02 1e 15
+		15 01 00 00 00 00 02 1f 17
+		15 01 00 00 00 00 02 20 00
+		15 01 00 00 00 00 02 21 01
+		15 01 00 00 00 00 02 22 00
+		15 01 00 00 00 00 02 23 40
+		15 01 00 00 00 00 02 24 40
+		15 01 00 00 00 00 02 25 6d
+		15 01 00 00 00 00 02 26 40
+		15 01 00 00 00 00 02 27 40
+		15 01 00 00 00 00 02 29 d8
+		15 01 00 00 00 00 02 2a 2a
+		15 01 00 00 00 00 02 4b 03
+		15 01 00 00 00 00 02 4c 11
+		15 01 00 00 00 00 02 4d 10
+		15 01 00 00 00 00 02 4e 01
+		15 01 00 00 00 00 02 4f 01
+		15 01 00 00 00 00 02 50 10
+		15 01 00 00 00 00 02 51 00
+		15 01 00 00 00 00 02 52 80
+		15 01 00 00 00 00 02 53 00
+		15 01 00 00 00 00 02 54 07
+		15 01 00 00 00 00 02 55 25
+		15 01 00 00 00 00 02 56 00
+		15 01 00 00 00 00 02 58 07
+		15 01 00 00 00 00 02 5b 43
+		15 01 00 00 00 00 02 5c 00
+		15 01 00 00 00 00 02 5f 73
+		15 01 00 00 00 00 02 60 73
+		15 01 00 00 00 00 02 63 22
+		15 01 00 00 00 00 02 64 00
+		15 01 00 00 00 00 02 67 08
+		15 01 00 00 00 00 02 68 04
+		15 01 00 00 00 00 02 7a 80
+		15 01 00 00 00 00 02 7b 91
+		15 01 00 00 00 00 02 7c d8
+		15 01 00 00 00 00 02 7d 60
+		15 01 00 00 00 00 02 93 06
+		15 01 00 00 00 00 02 94 06
+		15 01 00 00 00 00 02 8a 00
+		15 01 00 00 00 00 02 9b 0f
+		15 01 00 00 00 00 02 b3 c0
+		15 01 00 00 00 00 02 b4 00
+		15 01 00 00 00 00 02 b5 00
+		15 01 00 00 00 00 02 b6 21
+		15 01 00 00 00 00 02 b7 22
+		15 01 00 00 00 00 02 b8 07
+		15 01 00 00 00 00 02 b9 07
+		15 01 00 00 00 00 02 ba 22
+		15 01 00 00 00 00 02 bd 20
+		15 01 00 00 00 00 02 be 07
+		15 01 00 00 00 00 02 bf 07
+		15 01 00 00 00 00 02 c1 6d
+		15 01 00 00 00 00 02 c4 24
+		15 01 00 00 00 00 02 e3 00
+		15 01 00 00 00 00 02 ec 00
+		15 01 00 00 00 00 02 ff 10
+		15 01 00 00 00 00 02 bb 03
+		05 01 00 00 78 00 02 11 00
+		05 01 00 00 78 00 02 29 00];
+	qcom,mdss-dsi-off-command = [05 01 00 00
+		14 00 02 28 00 05 01 00 00 78 00
+		02 10 00];
+	qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 03 08 06 0e];
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	/delete-node/ qcom,mdss-dsi-display-timings;
+};
+
+&dsi_truly_1080_vid {
+	/delete-property/ qcom,mdss-dsi-panel-timings;
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 02 08 06 0e];
+	qcom,mdss-dsi-t-clk-post = <0x02>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_truly_1080_cmd {
+	/delete-property/ qcom,mdss-dsi-panel-timings;
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 02 08 06 0e];
+	qcom,mdss-dsi-t-clk-post = <0x02>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_gpios {
+	smb_en {
+		smb_en_default: smb_en_default {
+			pins = "gpio2";
+			function = "func1";
+			output-enable;
+		};
+	};
+
+	pmi632_sense {
+		/* GPIO 7 and 8 are external-sense pins for PMI632 */
+		pmi632_sense_default: pmi632_sense_default {
+			pins = "gpio7", "gpio8";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+
+	pmi632_ctm {
+		/* Disable GPIO1 for h/w base mitigation */
+		pmi632_ctm_default: pmi632_ctm_default {
+			pins = "gpio1";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+};
+
+&tlmm {
+	smb_int_default: smb_int_default {
+		mux {
+			pins = "gpio61";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio61";
+			drive-strength = <2>;
+			bias-pull-up;
+			input-enable;
+		};
+	};
+};
+
+&smb1355_0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <61 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_0: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
+
+&smb1355_1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <61 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_1: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi b/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi
index d34c34a..615489e 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi
@@ -271,6 +271,21 @@
 			};
 		};
 	};
+
+	pa-therm0 {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8953_adc_tm 0x36>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+	};
 };
 
 &pm8953_vadc {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi b/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi
index bc2ba9f..9c4120c 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi
@@ -43,6 +43,27 @@
 	qcom,battery-data = <&mtp_batterydata>;
 };
 
+&pmi632_vadc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&quiet_therm_default &smb_therm_default>;
+};
+
+&pmi632_gpios {
+	quiet_therm {
+		quiet_therm_default: quiet_therm_default {
+			pins = "gpio3";
+			bias-high-impedance;
+		};
+	};
+
+	smb_therm {
+		smb_therm_default: smb_therm_default {
+			pins = "gpio4";
+			bias-high-impedance;
+		};
+	};
+};
+
 &pm8953_typec {
 	status = "disabled";
 };
@@ -113,75 +134,168 @@
 		thermal-governor = "step_wise";
 
 		trips {
-			batt_trip1: batt-trip1 {
-				temperature = <41000>;
-				hysteresis = <2000>;
-				type = "passive";
-			};
-			cpus_trip: cpus-trip {
+			quiet_modem_439_trip1: quiet-modem-trip0 {
 				temperature = <44000>;
-				hysteresis = <0>;
-				type = "passive";
-			};
-			batt_trip2: batt-trip2 {
-				temperature = <45000>;
 				hysteresis = <4000>;
 				type = "passive";
 			};
-			gpu_trip: gpu-trip {
+			quiet_modem_439_trip2: quiet-modem-trip1 {
+				temperature = <46000>;
+				hysteresis = <4000>;
+				type = "passive";
+			};
+			quiet_cpus_439_trip: quiet-cpus-trip {
+				temperature = <48000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			quiet_modem_439_trip3: quiet-modem-trip2 {
+				temperature = <48000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			quiet_batt_439_trip1: quiet-batt-trip1 {
+				temperature = <48000>;
+				hysteresis = <4000>;
+				type = "passive";
+			};
+			quiet_gpu_439_trip: quiet-gpu-trip {
 				temperature = <50000>;
 				hysteresis = <0>;
 				type = "passive";
 			};
+			quiet_batt_439_trip2: quiet-batt-trip2 {
+				temperature = <50000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			quiet_batt_439_trip3: quiet-batt-trip3 {
+				temperature = <52000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+
+			quiet_batt_439_trip4: quiet-batt-trip4 {
+				temperature = <54000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			quiet_modem_439_trip4: quiet-modem-trip3 {
+				temperature = <55000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+			quiet_batt_439_trip5: quiet-batt-trip5 {
+				temperature = <56000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
 		};
 
 		cooling-maps {
 			skin_cpu0 {
-				trip = <&cpus_trip>;
-				/* throttle from fmax to 1094400KHz */
-				cooling-device = <&CPU0 THERMAL_NO_LIMIT 5>;
+				trip = <&quiet_cpus_439_trip>;
+				/* throttle from fmax to 1497600KHz */
+				cooling-device = <&CPU0 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu1 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU1 THERMAL_NO_LIMIT 5>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU1 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu2 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU2 THERMAL_NO_LIMIT 5>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU2 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu3 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU3 THERMAL_NO_LIMIT 5>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU3 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu4 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU4 THERMAL_NO_LIMIT 3>;
+				trip = <&quiet_cpus_439_trip>;
+				/* throttle from fmax to 1171200KHz */
+				cooling-device = <&CPU4 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu5 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU5 THERMAL_NO_LIMIT 3>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU5 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu6 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU6 THERMAL_NO_LIMIT 3>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU6 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu7 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU7 THERMAL_NO_LIMIT 3>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU7 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_gpu {
-				trip = <&gpu_trip>;
-				/* throttle from fmax to 375000000Hz */
-				cooling-device = <&msm_gpu THERMAL_NO_LIMIT 2>;
+				trip = <&quiet_gpu_439_trip>;
+				/* throttle from fmax to 510000000Hz */
+				cooling-device = <&msm_gpu THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-2)>;
+			};
+			modem_proc_lvl1 {
+				trip = <&quiet_modem_439_trip1>;
+				cooling-device = <&modem_proc 1 1>;
+			};
+			modem_proc_lvl2 {
+				trip = <&quiet_modem_439_trip4>;
+				cooling-device = <&modem_proc 3 3>;
+			};
+			modem_lvl1 {
+				trip = <&quiet_modem_439_trip2>;
+				cooling-device = <&modem_pa 1 1>;
+			};
+			modem_lvl2 {
+				trip = <&quiet_modem_439_trip3>;
+				cooling-device = <&modem_pa 2 2>;
+			};
+			modem_lvl3 {
+				trip = <&quiet_modem_439_trip4>;
+				cooling-device = <&modem_pa 3 3>;
 			};
 			battery_lvl1 {
-				trip = <&batt_trip1>;
-				cooling-device = <&pmi632_charger 4 4>;
+				trip = <&quiet_batt_439_trip1>;
+				cooling-device = <&pmi632_charger 1 1>;
 			};
 			battery_lvl2 {
-				trip = <&batt_trip2>;
+				trip = <&quiet_batt_439_trip2>;
+				cooling-device = <&pmi632_charger 2 2>;
+			};
+			battery_lvl3 {
+				trip = <&quiet_batt_439_trip3>;
+				cooling-device = <&pmi632_charger 3 3>;
+			};
+			battery_lvl4 {
+				trip = <&quiet_batt_439_trip4>;
+				cooling-device = <&pmi632_charger 4 4>;
+			};
+			battery_lvl5 {
+				trip = <&quiet_batt_439_trip5>;
 				cooling-device = <&pmi632_charger 5 5>;
 			};
 		};
 	};
+
+	quiet-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pmi632_adc_tm 0x53>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm439-qrd-overlay.dts
index ac059c4d..46a7856 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm439-qrd-overlay.dts
@@ -14,13 +14,9 @@
 /dts-v1/;
 /plugin/;
 
- #include <dt-bindings/gpio/gpio.h>
- #include <dt-bindings/clock/msm-clocks-8953.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm439-qrd.dtsi"
 
 / {
 	model = "QRD";
 	qcom,board-id = <0xb 2>;
-	qcom,msm-id = <353 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439-qrd.dts b/arch/arm64/boot/dts/qcom/sdm439-qrd.dts
index b8a9f2b..4a2fbff 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm439-qrd.dts
@@ -22,10 +22,3 @@
 	qcom,board-id = <0xb 2>;
 	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
 };
-
-&pmi632_vadc {
-	chan@4a {
-		qcom,scale-function = <22>;
-	};
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
index bfe7bfa..a3c5b1f 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
@@ -51,6 +51,11 @@
 	qcom,msm-mbhc-hphl-swh = <1>;
 	qcom,msm-mbhc-gnd-swh = <0>;
 	qcom,msm-hs-micbias-type = "external";
+	/delete-property/ qcom,quin-mi2s-gpios;
+};
+
+&cdc_quin_mi2s_gpios {
+	status = "disabled";
 };
 
 &wsa881x_i2c_f {
@@ -110,7 +115,7 @@
 	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
 	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
 
-	cd-gpios = <&tlmm 67 0x1>;
+	cd-gpios = <&tlmm 67 0x0>;
 
 	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
 								200000000>;
@@ -136,6 +141,24 @@
 			gpio-key,wakeup;
 		};
 	};
+
+	fpc1020 {
+		compatible = "fpc,fpc1020";
+		interrupt-parent = <&tlmm>;
+		interrupts = <48 0>;
+		fpc,gpio_rst = <&tlmm 124 0x0>;
+		fpc,gpio_irq = <&tlmm 48 0>;
+		vcc_spi-supply = <&pm8953_l5>;
+		vdd_io-supply  = <&pm8953_l5>;
+		vdd_ana-supply = <&pm8953_l5>;
+		fpc,enable-on-boot;
+		pinctrl-names = "fpc1020_reset_reset",
+				"fpc1020_reset_active",
+				"fpc1020_irq_active";
+		pinctrl-0 = <&fpc_reset_low>;
+		pinctrl-1 = <&fpc_reset_high>;
+		pinctrl-2 = <&fpc_int_low>;
+	};
 };
 
 &tlmm {
@@ -300,10 +323,108 @@
 
 &dsi_hx8399c_hd_vid {
 	/delete-property/ qcom,mdss-dsi-panel-timings;
-	qcom,mdss-dsi-panel-timings-phy-12nm = [08 06 0a 02 00 04 02 08];
+	qcom,mdss-dsi-panel-timings-phy-12nm = [09 06 0a 02 00 05 02 08];
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
 	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
 	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
 	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-status-read-length = <4>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
+};
+
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_vadc {
+	chan@4a {
+		qcom,scale-function = <22>;
+	};
+};
+
+&pmi632_gpios {
+	smb_en {
+		smb_en_default: smb_en_default {
+			pins = "gpio2";
+			function = "func1";
+			output-enable;
+		};
+	};
+
+	pmi632_sense {
+		/* GPIO 7 and 8 are external-sense pins for PMI632 */
+		pmi632_sense_default: pmi632_sense_default {
+			pins = "gpio7", "gpio8";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+
+	pmi632_ctm {
+		/* Disable GPIO1 for h/w base mitigation */
+		pmi632_ctm_default: pmi632_ctm_default {
+			pins = "gpio1";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+};
+
+&tlmm {
+	smb_int_default: smb_int_default {
+		mux {
+			pins = "gpio61";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio61";
+			drive-strength = <2>;
+			bias-pull-up;
+			input-enable;
+		};
+	};
+};
+
+&smb1355_0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <61 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_0: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
+
+&smb1355_1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <61 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_1: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439-rcm-overlay.dts b/arch/arm64/boot/dts/qcom/sdm439-rcm-overlay.dts
new file mode 100644
index 0000000..be0de06
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm439-rcm-overlay.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "sdm439-rcm.dtsi"
+
+/ {
+	model = "RCM";
+	qcom,board-id = <21 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-rcm.dts b/arch/arm64/boot/dts/qcom/sdm439-rcm.dts
new file mode 100644
index 0000000..71d02a0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm439-rcm.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm439.dtsi"
+#include "sdm439-rcm.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM439 RCM";
+	compatible = "qcom,sdm439-cdp", "qcom,sdm439", "qcom,cdp";
+	qcom,board-id = <21 1>;
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-rcm.dtsi b/arch/arm64/boot/dts/qcom/sdm439-rcm.dtsi
new file mode 100644
index 0000000..4ba4c00
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm439-rcm.dtsi
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm439-cdp.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
index f325925..414e8fe 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
@@ -67,7 +67,7 @@
 		pm8953_cx_cdev: regulator-cx-cdev {
 			compatible = "qcom,regulator-cooling-device";
 			regulator-cdev-supply = <&pm8953_s2_floor_level>;
-			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM
+			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM_PLUS
 					RPM_SMD_REGULATOR_LEVEL_RETENTION>;
 			#cooling-cells = <2>;
 		};
@@ -378,12 +378,12 @@
 			reg = <0x2000 0x100>;
 			regulator-name = "pm8953_s5";
 			regulator-min-microvolt = <490000>;
-			regulator-max-microvolt = <910000>;
+			regulator-max-microvolt = <960000>;
 
 			pm8953_s5_limit: avs-limit-regulator {
 				regulator-name = "pm8953_s5_avs_limit";
 				regulator-min-microvolt = <490000>;
-				regulator-max-microvolt = <910000>;
+				regulator-max-microvolt = <960000>;
 			};
 		};
 	};
@@ -421,7 +421,7 @@
 		regulator-max-microvolt = <5>;
 
 		qcom,cpr-fuse-corners = <3>;
-		qcom,cpr-voltage-ceiling = <760000 795000 910000>;
+		qcom,cpr-voltage-ceiling = <810000 845000 960000>;
 		qcom,cpr-voltage-floor =   <700000 700000 790000>;
 		vdd-apc-supply = <&pm8953_s5>;
 		mem-acc-supply = <&apc_mem_acc_vreg>;
@@ -468,5 +468,33 @@
 		qcom,cpr-quot-adjust-scaling-factor-max = <0 1400 1400>;
 		qcom,cpr-voltage-scaling-factor-max = <0 2000 2000>;
 		qcom,cpr-scaled-init-voltage-as-ceiling;
+
+		qcom,cpr-fuse-version-map =
+			/* <Speed-bin pvs-version cpr-rev ... ... ...> */
+			<(-1)    (-1)   ( 0)   (-1)    (-1)    (-1)>,
+			<(-1)    (-1)   ( 1)   (-1)    (-1)    (-1)>,
+			<(-1)    (-1)   (-1)   (-1)    (-1)    (-1)>;
+
+		qcom,cpr-quotient-adjustment =
+			<66      77      66>, /* SVSP/NOM/TUR:30/35/30 mV */
+			<(-74) (-57)  (-30)>, /* SVSP/NOM/TUR:-34/-26/-14 mV */
+			<0        0       0>;
+
+		qcom,cpr-floor-to-ceiling-max-range =
+			<50000 50000 50000 65000 65000>,
+			<50000 50000 50000 65000 65000>,
+			<50000 50000 50000 65000 65000>;
+
+		qcom,cpr-voltage-ceiling-override =
+			<(-1) (-1) 810000 845000 885000 960000 960000>;
+
+		qcom,cpr-enable;
+	};
+
+	dbu1: dbu1 {
+		compatible = "regulator-fixed";
+		regulator-name = "dbu1";
+		startup-delay-us = <0>;
+		enable-active-high;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439.dtsi b/arch/arm64/boot/dts/qcom/sdm439.dtsi
index 2df468f..be05b6e 100644
--- a/arch/arm64/boot/dts/qcom/sdm439.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439.dtsi
@@ -14,6 +14,7 @@
 #include "msm8937.dtsi"
 #include "sdm439-pm8953.dtsi"
 #include "sdm439-pmi632.dtsi"
+#include "sdm439-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM439";
@@ -49,6 +50,7 @@
 		qcom,governor-per-policy;
 
 		qcom,cpufreq-table-0 =
+			 <  960000 >,
 			 < 1305600 >,
 			 < 1497600 >,
 			 < 1708800 >,
@@ -278,6 +280,7 @@
 
 	qcom,speed0-bin-v0-c1 =
 		<          0 0>,
+		<  960000000 1>,
 		< 1305600000 1>,
 		< 1497600000 2>,
 		< 1708800000 3>,
@@ -298,6 +301,7 @@
 
 	qcom,speed1-bin-v0-c1 =
 		<          0 0>,
+		<  960000000 1>,
 		< 1305600000 1>,
 		< 1497600000 2>,
 		< 1708800000 3>,
@@ -324,6 +328,16 @@
 	vdd_hf_pll-supply = <&pm8953_l7_ao>;
 };
 
+&soc {
+	devfreq_spdm_cpu {
+		status = "disabled";
+	};
+
+	devfreq_spdm_gov {
+		status = "disabled";
+	};
+};
+
 &clock_gcc_mdss {
 	compatible = "qcom,gcc-mdss-sdm439";
 	clocks = <&mdss_dsi0_pll clk_dsi0pll_pixel_clk_src>,
@@ -340,10 +354,10 @@
 	reg = <0x001a94400 0x400>,
 		<0x0184d074 0x8>;
 	reg-names = "pll_base", "gdsc_base";
-	/delete-property/ qcom,dsi-pll-ssc-en;
-	/delete-property/ qcom,dsi-pll-ssc-mode;
-	/delete-property/ qcom,ssc-frequency-hz;
-	/delete-property/ qcom,ssc-ppm;
+	qcom,dsi-pll-ssc-en;
+	qcom,dsi-pll-ssc-mode = "down-spread";
+	qcom,ssc-frequency-hz = <31500>;
+	qcom,ssc-ppm = <5000>;
 };
 
 &mdss_dsi1_pll {
@@ -351,10 +365,10 @@
 	reg = <0x001a96400 0x400>,
 		<0x0184d074 0x8>;
 	reg-names = "pll_base", "gdsc_base";
-	/delete-property/ qcom,dsi-pll-ssc-en;
-	/delete-property/ qcom,dsi-pll-ssc-mode;
-	/delete-property/ qcom,ssc-frequency-hz;
-	/delete-property/ qcom,ssc-ppm;
+	qcom,dsi-pll-ssc-en;
+	qcom,dsi-pll-ssc-mode = "down-spread";
+	qcom,ssc-frequency-hz = <31500>;
+	qcom,ssc-ppm = <5000>;
 };
 
 &mdss_dsi {
@@ -621,3 +635,17 @@
 		};
 	};
 };
+
+&mdss_mdp {
+	qcom,vbif-settings = <0xd0 0x20>;
+};
+
+&thermal_zones {
+	hexa-cpu-max-step {
+		trips {
+			cpu-trip {
+				temperature = <95000>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-cdp-s2-overlay.dts b/arch/arm64/boot/dts/qcom/sdm450-cdp-s2-overlay.dts
index e12ad51..97c9389 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-cdp-s2-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-cdp-s2-overlay.dts
@@ -18,7 +18,6 @@
 
 / {
 	model = "CDP S2";
-	compatible = "qcom,cdp";
 	qcom,board-id = <1 2>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm450-mtp-s3-overlay.dts b/arch/arm64/boot/dts/qcom/sdm450-mtp-s3-overlay.dts
index ae522a5..3dd7200 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-mtp-s3-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-mtp-s3-overlay.dts
@@ -18,7 +18,6 @@
 
 / {
 	model = "MTP S3";
-	compatible = "qcom,mtp";
 	qcom,board-id = <8 3>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
index b9aadc1..b73b49a 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
@@ -14,8 +14,8 @@
 /dts-v1/;
 
 #include "sdm450.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
 #include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM450 + PMI632 MTP S3";
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
index 64d9e64..5db626b 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
@@ -46,3 +46,82 @@
 	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
 	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
 };
+
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_gpios {
+	smb_en {
+		smb_en_default: smb_en_default {
+			pins = "gpio2";
+			function = "func1";
+			output-enable;
+		};
+	};
+
+	pmi632_sense {
+		/* GPIO 7 and 8 are external-sense pins for PMI632 */
+		pmi632_sense_default: pmi632_sense_default {
+			pins = "gpio7", "gpio8";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+
+	pmi632_ctm {
+		/* Disable GPIO1 for h/w base mitigation */
+		pmi632_ctm_default: pmi632_ctm_default {
+			pins = "gpio1";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+};
+
+&tlmm {
+	smb_int_default: smb_int_default {
+		mux {
+			pins = "gpio59";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio59";
+			drive-strength = <2>;
+			bias-pull-up;
+			input-enable;
+		};
+	};
+};
+
+&smb1355_0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_0: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
+
+&smb1355_1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_1: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
index 5c127bc..6e39327 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
@@ -41,6 +41,27 @@
 	};
 };
 
+&pmi632_vadc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&quiet_therm_default &smb_therm_default>;
+};
+
+&pmi632_gpios {
+	quiet_therm {
+		quiet_therm_default: quiet_therm_default {
+			pins = "gpio3";
+			bias-high-impedance;
+		};
+	};
+
+	smb_therm {
+		smb_therm_default: smb_therm_default {
+			pins = "gpio4";
+			bias-high-impedance;
+		};
+	};
+};
+
 &pmi632_qg {
 	qcom,battery-data = <&mtp_batterydata>;
 };
@@ -351,3 +372,24 @@
 		};
 	};
 };
+
+&tlmm {
+	pmx_mdss {
+		mdss_dsi_active: mdss_dsi_active {
+			 mux {
+				 pins = "gpio61";
+			 };
+			 config {
+				 pins = "gpio61";
+			 };
+		 };
+		mdss_dsi_suspend: mdss_dsi_suspend {
+			  mux {
+				  pins = "gpio61";
+			  };
+			  config {
+				  pins = "gpio61";
+			  };
+		  };
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4-overlay.dts b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4-overlay.dts
index 558c3c6..e8dca18 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4-overlay.dts
@@ -18,7 +18,6 @@
 
 / {
 	model = "QRD SKU4";
-	compatible = "qcom,qrd";
 	qcom,board-id = <0xb 1>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
index 386bd71..40fdb91 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
@@ -177,3 +177,81 @@
 	};
 };
 
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_gpios {
+	smb_en {
+		smb_en_default: smb_en_default {
+			pins = "gpio2";
+			function = "func1";
+			output-enable;
+		};
+	};
+
+	pmi632_sense {
+		/* GPIO 7 and 8 are external-sense pins for PMI632 */
+		pmi632_sense_default: pmi632_sense_default {
+			pins = "gpio7", "gpio8";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+
+	pmi632_ctm {
+		/* Disable GPIO1 for h/w base mitigation */
+		pmi632_ctm_default: pmi632_ctm_default {
+			pins = "gpio1";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+};
+
+&tlmm {
+	smb_int_default: smb_int_default {
+		mux {
+			pins = "gpio59";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio59";
+			drive-strength = <2>;
+			bias-pull-up;
+			input-enable;
+		};
+	};
+};
+
+&smb1355_0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_0: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
+
+&smb1355_1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_1: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
index 2669d1f..9d6543f 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
@@ -24,33 +24,3 @@
 	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
 };
 
-
-&soc {
-	gpio_keys {
-		/delete-node/home;
-	};
-};
-
-&tlmm {
-	tlmm_gpio_key {
-		gpio_key_active: gpio_key_active {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-
-		gpio_key_suspend: gpio_key_suspend {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-	};
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-cpu.dtsi b/arch/arm64/boot/dts/qcom/sdm632-cpu.dtsi
index de1bd1f..c37750a 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-cpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632-cpu.dtsi
@@ -258,7 +258,7 @@
 			busy-cost-data = <
 				633600	722
 				902400	1287
-				1036800	1739
+				1094400	1739
 				1401600	2819
 				1555200	3532
 				1804800	5038
@@ -287,7 +287,7 @@
 			busy-cost-data = <
 				633600	68
 				902400	103
-				1036800	132
+				1094400	132
 				1401600	193
 				1555200	233
 				1804800	292
diff --git a/arch/arm64/boot/dts/qcom/sdm632-ext-audio-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm632-ext-audio-mtp.dtsi
new file mode 100644
index 0000000..b34e3d8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm632-ext-audio-mtp.dtsi
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&int_codec {
+	status = "disabled";
+};
+
+&pmic_analog_codec {
+	status = "disabled";
+};
+
+&wsa881x_i2c_f {
+	status = "disabled";
+};
+
+&wsa881x_i2c_45 {
+	status = "disabled";
+};
+
+&cdc_pri_mi2s_gpios {
+	status = "disabled";
+};
+
+&wsa881x_analog_vi_gpio {
+	status = "disabled";
+};
+
+&wsa881x_analog_clk_gpio {
+	status = "disabled";
+};
+
+&wsa881x_analog_reset_gpio {
+	status = "disabled";
+};
+
+&cdc_comp_gpios {
+	status = "disabled";
+};
+
+&slim_msm {
+	status = "okay";
+};
+
+&dai_slim {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
+
+&wcd9335 {
+	status = "okay";
+
+	dc-vdd-buck-supply = <&dbu3>;
+	qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+	qcom,cdc-vdd-buck-current = <650000>;
+
+	cdc-buck-sido-supply = <&dbu3>;
+	qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+	qcom,cdc-buck-sido-current = <150000>;
+
+	cdc-vdd-tx-h-supply = <&dbu3>;
+	qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+	qcom,cdc-vdd-tx-h-current = <25000>;
+
+	cdc-vdd-rx-h-supply = <&dbu3>;
+	qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+	qcom,cdc-vdd-rx-h-current = <25000>;
+
+	cdc-vdd-px-supply = <&dbu3>;
+	qcom,cdc-vdd-px-voltage = <1800000 1800000>;
+	qcom,cdc-vdd-px-current = <10000>;
+};
+
+&cdc_us_euro_sw {
+	status = "okay";
+};
+
+&cdc_quin_mi2s_gpios {
+	status = "okay";
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&ext_codec {
+	qcom,model = "msm8953-tasha-snd-card";
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3-overlay.dts b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3-overlay.dts
index eb5afbd..c3e0ba9 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3-overlay.dts
@@ -18,7 +18,6 @@
 
 / {
 	model = "Ext Codec CDP S3";
-	compatible = "qcom,cdp";
 	qcom,board-id = <1 3>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
index 17ae9d1..60b149d 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
@@ -24,34 +24,3 @@
 	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
 };
 
-
-&soc {
-	gpio_keys {
-		/delete-node/home;
-	};
-};
-
-&tlmm {
-	tlmm_gpio_key {
-		gpio_key_active: gpio_key_active {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-
-		gpio_key_suspend: gpio_key_suspend {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-	};
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-mtp-s4-overlay.dts b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-mtp-s4-overlay.dts
index 5656fd0..63b51ab 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-mtp-s4-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-mtp-s4-overlay.dts
@@ -18,7 +18,6 @@
 
 / {
 	model = "Ext Codec MTP S4";
-	compatible = "qcom,mtp";
 	qcom,board-id = <8 4>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-mtp-s4.dtsi b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-mtp-s4.dtsi
index d8326ff..fd1eb3d 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-mtp-s4.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-mtp-s4.dtsi
@@ -12,4 +12,5 @@
  */
 
 #include "sdm450-pmi632-mtp-s3.dtsi"
+#include "sdm632-ext-audio-mtp.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
index 3662cf3..1dd1163 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
@@ -14,8 +14,8 @@
 /dts-v1/;
 
 #include "sdm632.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
 #include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM632 + PMI632 MTP S3";
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
index 4d68901..e0e6b4b 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
@@ -25,34 +25,3 @@
 	qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
 };
 
-
-&soc {
-	gpio_keys {
-		/delete-node/home;
-	};
-};
-
-&tlmm {
-	tlmm_gpio_key {
-		gpio_key_active: gpio_key_active {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-
-		gpio_key_suspend: gpio_key_suspend {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-	};
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
index 6ca2940..413e85f 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
@@ -25,34 +25,3 @@
 	qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
 };
 
-
-&soc {
-	gpio_keys {
-		/delete-node/home;
-	};
-};
-
-&tlmm {
-	tlmm_gpio_key {
-		gpio_key_active: gpio_key_active {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-
-		gpio_key_suspend: gpio_key_suspend {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-	};
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts
index d2a9cf1..aea6bff 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts
@@ -14,8 +14,8 @@
 /dts-v1/;
 
 #include "sdm632.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
 #include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
 #include "sdm632-pm8004.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm632-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm632-qrd-overlay.dts
index e6217e5..4b7f6b2 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-qrd-overlay.dts
@@ -18,7 +18,6 @@
 
 / {
 	model = "QRD";
-	compatible = "qcom,qrd";
 	qcom,board-id = <0xb 3>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm632-rcm.dts b/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
index fe7ab38..68f0ea0 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
@@ -24,32 +24,3 @@
 	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
 };
 
-&soc {
-	gpio_keys {
-		/delete-node/home;
-	};
-};
-
-&tlmm {
-	tlmm_gpio_key {
-		gpio_key_active: gpio_key_active {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-
-		gpio_key_suspend: gpio_key_suspend {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-	};
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi b/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
index 14ba3b4..aa20680 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
@@ -13,3 +13,34 @@
 
 #include "sdm450-pmi632-cdp-s2.dtsi"
 
+&soc {
+	gpio_keys {
+		home {
+			status = "disabled";
+		};
+	};
+};
+
+&tlmm {
+	tlmm_gpio_key {
+		gpio_key_active {
+			mux {
+				pins = "gpio85", "gpio86", "gpio87";
+			};
+
+			config {
+				pins = "gpio85", "gpio86", "gpio87";
+			};
+		};
+
+		gpio_key_suspend {
+			mux {
+				pins = "gpio85", "gpio86", "gpio87";
+			};
+
+			config {
+				pins = "gpio85", "gpio86", "gpio87";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm632-regulator.dtsi
index a7580e0..33ac930 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632-regulator.dtsi
@@ -114,6 +114,8 @@
 		qcom,cpr-count-repeat = <14>;
 		qcom,cpr-down-error-step-limit = <1>;
 		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-reset-step-quot-loop-en;
+		qcom,cpr-thread-has-always-vote-en;
 
 		qcom,apm-ctrl = <&apc_apm>;
 		qcom,apm-threshold-voltage = <875000>;
@@ -131,10 +133,13 @@
 			"APCS_ALIAS0_APM_CTLER_STATUS",
 			"APCS0_CPR_CORE_ADJ_MODE_REG";
 
+		qcom,cpr-enable;
+		qcom,cpr-hw-closed-loop;
+
 		thread@0 {
 			qcom,cpr-thread-id = <0>;
 			qcom,cpr-consecutive-up = <0>;
-			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-consecutive-down = <0>;
 			qcom,cpr-up-threshold = <2>;
 			qcom,cpr-down-threshold = <1>;
 
@@ -156,6 +161,10 @@
 					<500000  500000 500000 500000 500000
 					 500000  500000>;
 
+				qcom,cpr-floor-to-ceiling-max-range =
+					<50000  50000 50000 50000 50000
+					 50000  50000>;
+
 				qcom,mem-acc-voltage = <1 1 2 2 2 2 3>;
 
 				qcom,corner-frequencies =
@@ -176,13 +185,19 @@
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					< 0       0      0 10000>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<(-10000) 0      0 10000>;
 			};
 		};
 
 		thread@1 {
 			qcom,cpr-thread-id = <1>;
 			qcom,cpr-consecutive-up = <0>;
-			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-consecutive-down = <0>;
 			qcom,cpr-up-threshold = <2>;
 			qcom,cpr-down-threshold = <1>;
 
@@ -204,6 +219,10 @@
 					<500000 500000 500000 500000 500000
 					 500000 500000>;
 
+				qcom,cpr-floor-to-ceiling-max-range =
+					<50000  50000 50000 50000 50000
+					 50000  50000>;
+
 				qcom,mem-acc-voltage = <1 1 2 2 2 2 3>;
 
 				qcom,corner-frequencies =
@@ -227,9 +246,9 @@
 
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
 					/* Speed bin 0; CPR rev 0..7 */
-					< 30000      0      0     0>,
-					< 30000      0      0     0>,
-					<     0      0      0     0>,
+					< 30000      0  10000 20000>,
+					< 30000      0  10000 20000>,
+					<     0      0  10000 20000>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
@@ -247,9 +266,9 @@
 					<     0      0      0     0>,
 
 					/* Speed bin 2; CPR rev 0..7 */
-					< 30000      0      0     0>,
-					< 30000      0      0     0>,
-					<     0      0      0     0>,
+					< 30000      0  10000 20000>,
+					< 30000      0  10000 20000>,
+					<     0      0  10000 20000>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
@@ -287,9 +306,9 @@
 					<     0      0      0     0>,
 
 					/* Speed bin 6; CPR rev 0..7 */
-					< 30000      0      0     0>,
-					< 30000      0      0     0>,
-					<     0      0      0     0>,
+					< 30000      0  10000 20000>,
+					< 30000      0  10000 20000>,
+					<     0      0  10000 20000>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
@@ -310,7 +329,7 @@
 					/* Speed bin 0; CPR rev 0..7 */
 					< 30000      0      0     0>,
 					< 30000      0      0     0>,
-					<     0      0      0     0>,
+					<(-10000)    0      0     0>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
@@ -330,7 +349,7 @@
 					/* Speed bin 2; CPR rev 0..7 */
 					< 30000      0      0     0>,
 					< 30000      0      0     0>,
-					<     0      0      0     0>,
+					<(-10000)    0      0     0>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
@@ -370,7 +389,7 @@
 					/* Speed bin 6; CPR rev 0..7 */
 					< 30000      0      0     0>,
 					< 30000      0      0     0>,
-					<     0      0      0     0>,
+					<(-10000)    0      0     0>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
 					<     0      0      0     0>,
@@ -432,4 +451,11 @@
 		mem-acc-supply = <&gfx_mem_acc>;
 		qcom,mem-acc-corner-map = <1 1 1 2 2 2 2>;
 	};
+
+	dbu3: dbu3 {
+		compatible = "regulator-fixed";
+		regulator-name = "dbu3";
+		startup-delay-us = <0>;
+		enable-active-high;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm632.dtsi b/arch/arm64/boot/dts/qcom/sdm632.dtsi
index b54e831..ee2476e 100644
--- a/arch/arm64/boot/dts/qcom/sdm632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632.dtsi
@@ -38,8 +38,29 @@
 	compatible = "qcom,cc-debug-sdm632";
 };
 
+&soc {
+	devfreq_spdm_cpu {
+		status = "disabled";
+	};
+
+	devfreq_spdm_gov {
+		status = "disabled";
+	};
+};
+
 &clock_gcc_gfx {
 	compatible = "qcom,gcc-gfx-sdm632";
+	qcom,gfxfreq-corner =
+		 <         0   0 >,
+		 < 133330000   1 >,  /* Min SVS   */
+		 < 216000000   2 >,  /* Low SVS   */
+		 < 320000000   3 >,  /* SVS       */
+		 < 400000000   4 >,  /* SVS Plus  */
+		 < 510000000   5 >,  /* NOM       */
+		 < 560000000   6 >,  /* Nom Plus  */
+		 < 650000000   7 >,  /* Turbo     */
+		 < 700000000   7 >,  /* Turbo     */
+		 < 725000000   7 >;  /* Turbo     */
 };
 
 &thermal_zones {
@@ -994,3 +1015,97 @@
 };
 
 #include "sdm632-coresight.dtsi"
+
+/* GPU Overrides*/
+&msm_gpu {
+
+	qcom,ca-target-pwrlevel = <4>;
+	qcom,initial-pwrlevel = <5>;
+	/delete-node/qcom,gpu-pwrlevels;
+
+	/* Power levels */
+	qcom,gpu-pwrlevels {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "qcom,gpu-pwrlevels";
+		/* TURBO LD0 */
+		qcom,gpu-pwrlevel@0 {
+			reg = <0>;
+			qcom,gpu-freq = <725000000>;
+			qcom,bus-freq = <10>;
+			qcom,bus-min = <10>;
+			qcom,bus-max = <10>;
+		};
+
+		/* TURBO */
+		qcom,gpu-pwrlevel@1 {
+			reg = <1>;
+			qcom,gpu-freq = <650000000>;
+			qcom,bus-freq = <10>;
+			qcom,bus-min = <10>;
+			qcom,bus-max = <10>;
+		};
+
+		/* NOM+ */
+		qcom,gpu-pwrlevel@2 {
+			reg = <2>;
+			qcom,gpu-freq = <560000000>;
+			qcom,bus-freq = <10>;
+			qcom,bus-min = <8>;
+			qcom,bus-max = <10>;
+		};
+
+		/* NOM */
+		qcom,gpu-pwrlevel@3 {
+			reg = <3>;
+			qcom,gpu-freq = <510000000>;
+			qcom,bus-freq = <9>;
+			qcom,bus-min = <6>;
+			qcom,bus-max = <10>;
+		};
+
+		/* SVS+ */
+		qcom,gpu-pwrlevel@4 {
+			reg = <4>;
+			qcom,gpu-freq = <400000000>;
+			qcom,bus-freq = <7>;
+			qcom,bus-min = <5>;
+			qcom,bus-max = <8>;
+		};
+
+		/* SVS */
+		qcom,gpu-pwrlevel@5 {
+			reg = <5>;
+			qcom,gpu-freq = <320000000>;
+			qcom,bus-freq = <4>;
+			qcom,bus-min = <2>;
+			qcom,bus-max = <6>;
+		};
+
+		/* Low SVS */
+		qcom,gpu-pwrlevel@6 {
+			reg = <6>;
+			qcom,gpu-freq = <216000000>;
+			qcom,bus-freq = <3>;
+			qcom,bus-min = <2>;
+			qcom,bus-max = <4>;
+		};
+
+		qcom,gpu-pwrlevel@7 {
+			reg = <7>;
+			qcom,gpu-freq = <133300000>;
+			qcom,bus-freq = <3>;
+			qcom,bus-min = <1>;
+			qcom,bus-max = <4>;
+		};
+		/* XO */
+		qcom,gpu-pwrlevel@8 {
+			reg = <8>;
+			qcom,gpu-freq = <19200000>;
+			qcom,bus-freq = <0>;
+			qcom,bus-min = <0>;
+			qcom,bus-max = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
index c8f7ac0..78047bd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+
 &soc {
 	led_flash_rear: qcom,camera-flash@0 {
 		cell-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
index 9402294..348ba6f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
@@ -411,6 +411,7 @@
 		qcom,cpas-hw-ver = <0x170110>; /* Titan v170 v1.1.0 */
 		nvmem-cells = <&minor_rev>;
 		nvmem-cell-names = "minor_rev";
+		camnoc-axi-min-ib-bw = <3000000000>;
 		regulator-names = "camss-vdd";
 		camss-vdd-supply = <&titan_top_gdsc>;
 		clock-names = "gcc_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index da4d27d..95fb25a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -38,6 +38,8 @@
 	qcom,vddp-ref-clk-supply = <&pm660_l1>;
 	qcom,vddp-ref-clk-max-microamp = <100>;
 
+	force-ufshc-probe;
+
 	status = "ok";
 };
 
@@ -296,6 +298,17 @@
 	qcom,platform-te-gpio = <&tlmm 10 0>;
 };
 
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+};
+
 &dsi_dual_nt35597_truly_video_display {
 	qcom,dsi-display-active;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index 7764837..7162257 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -14,7 +14,10 @@
 #include "sdm670-pmic-overlay.dtsi"
 #include "sdm670-sde-display.dtsi"
 #include "sdm670-camera-sensor-mtp.dtsi"
+
+&qupv3_se10_i2c {
 #include "smb1355.dtsi"
+};
 
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
@@ -355,6 +358,17 @@
 	qcom,platform-te-gpio = <&tlmm 10 0>;
 };
 
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+};
+
 &dsi_dual_nt35597_truly_video_display {
 	qcom,dsi-display-active;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
index bdcd039..8ed821a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -134,7 +134,9 @@
 				#size-cells = <0>;
 				qcom,psci-mode-shift = <0>;
 				qcom,psci-mode-mask = <0xf>;
-				qcom,disable-prediction;
+				qcom,ref-stddev = <100>;
+				qcom,tmr-add = <100>;
+				qcom,ref-premature-cnt = <3>;
 				qcom,cpu = <&CPU6 &CPU7>;
 
 				qcom,pm-cpu-level@0 { /* C1 */
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
index 27be1fd..f63d442 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
@@ -10,6 +10,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
 &pm660_0{
 	pm660_charger: qcom,qpnp-smb2 {
 		compatible = "qcom,qpnp-smb2";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts
index 36d485e..67b5ebe 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,7 +19,7 @@
 #include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku1.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku1.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku1.dtsi
new file mode 100644
index 0000000..2c1cde6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku1.dtsi
@@ -0,0 +1,14 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm670-qrd.dtsi"
+#include "sdm670-audio-overlay.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
index 73d1909..d5edb36 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
@@ -13,13 +13,7 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD SKU2";
@@ -30,22 +24,3 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
-
-&dsi_dual_nt36850_truly_cmd_display {
-	/delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
-	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
-	qcom,mdss-dsi-bl-min-level = <1>;
-	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,panel-mode-gpio = <&tlmm 76 0>;
-	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
-	qcom,platform-reset-gpio = <&tlmm 75 0>;
-	qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
-	qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
index 680bc17..9f871c5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,7 @@
 /dts-v1/;
 
 #include "sdm670.dtsi"
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD SKU2";
@@ -24,22 +24,3 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
-
-&dsi_dual_nt36850_truly_cmd_display {
-	/delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
-	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
-	qcom,mdss-dsi-bl-min-level = <1>;
-	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,panel-mode-gpio = <&tlmm 76 0>;
-	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
-	qcom,platform-reset-gpio = <&tlmm 75 0>;
-	qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
-	qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dtsi
new file mode 100644
index 0000000..cdb652e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dtsi
@@ -0,0 +1,32 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-qrd.dtsi"
+#include "sdm670-audio-overlay.dtsi"
+
+&dsi_dual_nt36850_truly_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_hx8399_truly_cmd_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd.dts
index c22afa4..318939f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,7 @@
 /dts-v1/;
 
 #include "sdm670.dtsi"
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku1.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index 43f1465..5ff2c32 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -10,13 +10,16 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/gpio/gpio.h>
 #include "sdm670-camera-sensor-qrd.dtsi"
 #include "sdm670-pmic-overlay.dtsi"
-#include "sdm670-audio-overlay.dtsi"
-#include "smb1355.dtsi"
 #include "sdm670-sde-display.dtsi"
 
+&qupv3_se10_i2c {
+#include "smb1355.dtsi"
+};
+
 &qupv3_se9_2uart {
 	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
index 9d3f37d..3efadcb 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
@@ -133,7 +133,8 @@
 		pm660l_s3_level: regulator-pm660l-s3-level {
 			regulator-name = "pm660l_s3_level";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-min-microvolt
+				= <RPMH_REGULATOR_LEVEL_RETENTION>;
 			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
 			qcom,min-dropout-voltage-level = <(-1)>;
 		};
@@ -141,7 +142,8 @@
 		pm660l_s3_level_ao: regulator-pm660l-s3-level-ao {
 			regulator-name = "pm660l_s3_level_ao";
 			qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
-			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-min-microvolt
+				= <RPMH_REGULATOR_LEVEL_RETENTION>;
 			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
 			qcom,min-dropout-voltage-level = <(-1)>;
 		};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 48deca6..395e88c 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -468,7 +468,7 @@
 
 	dsi_hx8399_truly_cmd_display: qcom,dsi-display@16 {
 		compatible = "qcom,dsi-display";
-		label = "dsi_hx8399_truly_cmd_display";
+		label = "dsi_hx8399_truly_fhd_video_display";
 		qcom,display-type = "primary";
 
 		qcom,dsi-ctrl = <&mdss_dsi0>;
@@ -858,6 +858,7 @@
 	qcom,mdss-dsi-panel-status-value = <0x9c>;
 	qcom,mdss-dsi-panel-on-check-value = <0x9c>;
 	qcom,mdss-dsi-panel-status-read-length = <1>;
+	qcom,mdss-dsi-panel-cmds-only-by-right;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 23 08
diff --git a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
index a0b4a22..f5e9489 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
@@ -129,15 +129,19 @@
 
 		/* Clocks */
 		clock-names = "core_clk", "iface_clk", "bus_clk",
-			"core0_clk", "core0_bus_clk";
+			"core0_clk", "core0_bus_clk", "core1_clk",
+			"core1_bus_clk";
 		clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
 			<&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
 			<&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
 			<&clock_videocc VIDEO_CC_VCODEC0_CORE_CLK>,
-			<&clock_videocc VIDEO_CC_VCODEC0_AXI_CLK>;
+			<&clock_videocc VIDEO_CC_VCODEC0_AXI_CLK>,
+			<&clock_videocc VIDEO_CC_VCODEC1_CORE_CLK>,
+			<&clock_videocc VIDEO_CC_VCODEC1_AXI_CLK>;
 		qcom,proxy-clock-names = "core_clk", "iface_clk",
-			"bus_clk", "core0_clk", "core0_bus_clk";
-		qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0>;
+			"bus_clk", "core0_clk", "core0_bus_clk",
+			"core1_clk", "core1_bus_clk";
+		qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0 0x4 0x4>;
 		qcom,allowed-clock-rates = <100000000 200000000 330000000
 			364700000>;
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 4b39207..575e448 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -730,6 +730,7 @@
 		      <0x17a60000 0x100000>;    /* GICR * 8 */
 		interrupts = <1 9 4>;
 		interrupt-parent = <&intc>;
+		ignored-save-restore-irqs = <38>;
 	};
 
 	pdc: interrupt-controller@b220000{
@@ -1866,6 +1867,7 @@
 		ufs-qcom-crypto = <&ufs_ice>;
 
 		lanes-per-direction = <1>;
+		spm-level = <5>;
 		dev-ref-clk-freq = <0>; /* 19.2 MHz */
 
 		clock-names =
@@ -1879,7 +1881,7 @@
 			"rx_lane0_sync_clk";
 		clocks =
 			<&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
+			<&clock_gcc UFS_PHY_AXI_UFS_VOTE_CLK>,
 			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
 			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
 			<&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
@@ -2216,6 +2218,7 @@
 		status = "ok";
 		memory-region = <&pil_modem_mem>;
 		qcom,mem-protect-id = <0xF>;
+		qcom,complete-ramdump;
 
 		/* GPIO inputs from mss */
 		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
@@ -2382,7 +2385,7 @@
 		clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
 			<&clock_gcc GCC_SDCC1_APPS_CLK>,
 			<&clock_gcc GCC_SDCC1_ICE_CORE_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>;
+			<&clock_gcc UFS_PHY_AXI_EMMC_VOTE_CLK>;
 		clock-names = "iface_clk", "core_clk", "ice_core_clk",
 			"bus_aggr_clk";
 
@@ -2966,6 +2969,7 @@
 };
 
 &gpu_cx_gdsc {
+	parent-supply = <&pm660l_s3_level>;
 	status = "ok";
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
index 803616d..08c3433 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
@@ -13,13 +13,7 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku1.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
index ab3ce4d..91891ba 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
@@ -13,13 +13,7 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD SKU2";
@@ -30,22 +24,3 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
-
-&dsi_dual_nt36850_truly_cmd_display {
-	/delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
-	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
-	qcom,mdss-dsi-bl-min-level = <1>;
-	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,panel-mode-gpio = <&tlmm 76 0>;
-	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
-	qcom,platform-reset-gpio = <&tlmm 75 0>;
-	qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
-	qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts
index 76b2862..f674893 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts
@@ -14,7 +14,7 @@
 /dts-v1/;
 
 #include "sdm710.dtsi"
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD SKU2";
@@ -24,22 +24,3 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
-
-&dsi_dual_nt36850_truly_cmd_display {
-	/delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
-	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
-	qcom,mdss-dsi-bl-min-level = <1>;
-	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,panel-mode-gpio = <&tlmm 76 0>;
-	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
-	qcom,platform-reset-gpio = <&tlmm 75 0>;
-	qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
-	qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd.dts
index e3cb7cc..4eb414f 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd.dts
@@ -15,6 +15,7 @@
 
 #include "sdm710.dtsi"
 #include "sdm670-qrd.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi
index 9903d19..9b88356 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi
@@ -36,6 +36,7 @@
 		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
 		qcom,num-gsi-evt-buffs = <0x3>;
 		qcom,use-pdc-interrupts;
+		qcom,pm-qos-latency = <44>;
 		extcon = <0>, <0>, <&eud>, <0>, <0>;
 
 		clocks = <&clock_gcc GCC_USB30_PRIM_MASTER_CLK>,
@@ -128,6 +129,7 @@
 		vdd-supply = <&pm8998_l1>;
 		vdda18-supply = <&pm8998_l12>;
 		vdda33-supply = <&pm8998_l24>;
+		qcom,override-bias-ctrl2;
 		qcom,vdd-voltage-level = <0 880000 880000>;
 		qcom,qusb-phy-reg-offset =
 			<0x240 /* QUSB2PHY_PORT_TUNE1 */
@@ -402,6 +404,7 @@
 			snps,hird-threshold = /bits/ 8 <0x10>;
 			snps,usb3_lpm_capable;
 			usb-core-id = <1>;
+			dr_mode = "host";
 		};
 	};
 
@@ -416,6 +419,7 @@
 		vdd-supply = <&pm8998_l1>;
 		vdda18-supply = <&pm8998_l12>;
 		vdda33-supply = <&pm8998_l24>;
+		qcom,override-bias-ctrl2;
 		qcom,vdd-voltage-level = <0 880000 880000>;
 		qcom,qusb-phy-reg-offset =
 			<0x240 /* QUSB2PHY_PORT_TUNE1 */
@@ -426,7 +430,8 @@
 			 0x254 /* QUSB2PHY_TEST1 */
 			 0x198 /* PLL_BIAS_CONTROL_2 */
 			 0x228 /* QUSB2PHY_SQ_CTRL1 */
-			 0x22c>; /* QUSB2PHY_SQ_CTRL2 */
+			 0x22c /* QUSB2PHY_SQ_CTRL2 */
+			 0x27c>; /* QUSB2PHY_DEBUG_CTRL1 */
 
 		qcom,qusb-phy-init-seq =
 			/* <value reg_offset> */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi
index d387f93..aa068e5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi
@@ -220,7 +220,7 @@
 				 &cam_sensor_rear2_suspend>;
 		gpios = <&tlmm 15 0>,
 			<&tlmm 9 0>,
-			<&tlmm 8 0>;
+			<&tlmm 7 0>;
 		gpio-reset = <1>;
 		gpio-vana = <2>;
 		gpio-req-tbl-num = <0 1 2>;
@@ -261,7 +261,7 @@
 				 &cam_sensor_front_suspend>;
 		gpios = <&tlmm 14 0>,
 			<&tlmm 28 0>,
-			<&tlmm 8 0>;
+			<&tlmm 7 0>;
 		gpio-reset = <1>;
 		gpio-vana = <2>;
 		gpio-req-tbl-num = <0 1 2>;
@@ -441,37 +441,39 @@
 		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <0>;
-		led-flash-src = <&led_flash_iris>;
-		cam_vio-supply = <&pm8998_lvs1>;
-		cam_vana-supply = <&pmi8998_bob>;
-		cam_vdig-supply = <&camera_ldo>;
+		cam_vio-supply = <&pm8998_l9>;
+		cam_vana-supply = <&pm8998_l16>;
+		cam_vdig-supply = <&pm8998_l10>;
 		cam_clk-supply = <&titan_top_gdsc>;
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 3312000 1050000 0>;
-		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-min-voltage = <1800000 3312000 1800000 0>;
+		rgltr-max-voltage = <1800000 3312000 1800000 0>;
 		rgltr-load-current = <0 80000 105000 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
-		pinctrl-0 = <&cam_sensor_mclk3_active
+		pinctrl-0 = <&cam_sensor_mclk2_active
 				 &cam_sensor_iris_active>;
-		pinctrl-1 = <&cam_sensor_mclk3_suspend
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
 				 &cam_sensor_iris_suspend>;
-		gpios = <&tlmm 16 0>,
-			<&tlmm 9 0>,
-			<&tlmm 8 0>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 21 0>,
+			<&tlmm 122 0>,
+			<&tlmm 59 0>;
 		gpio-reset = <1>;
 		gpio-vana = <2>;
-		gpio-req-tbl-num = <0 1 2>;
-		gpio-req-tbl-flags = <1 0 0>;
+		gpio-vdig = <3>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 0 0 0>;
 		gpio-req-tbl-label = "CAMIF_MCLK3",
 					"CAM_RESET3",
-					"CAM_VANA1";
+					"CAM_VANA3",
+					"CAM_VDIG3";
 		sensor-mode = <0>;
 		cci-master = <1>;
 		status = "ok";
-		clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
 		clock-names = "cam_clk";
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index fd6a0c7..2e2de74 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -381,6 +381,7 @@
 		interrupt-names = "cpas_camnoc";
 		interrupts = <0 459 0>;
 		qcom,cpas-hw-ver = <0x170100>; /* Titan v170 v1.0.0 */
+		camnoc-axi-min-ib-bw = <3000000000>;
 		regulator-names = "camss-vdd";
 		camss-vdd-supply = <&titan_top_gdsc>;
 		clock-names = "gcc_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
index d6be6d4..0a02bfb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -265,6 +265,10 @@
 	/delete-property/ vdd_mx-supply;
 };
 
+&gpu_cx_gdsc {
+	/delete-property/ parent-supply;
+};
+
 &gpu_gx_gdsc {
 	/delete-property/ parent-supply;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 349c4c0..274a862 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -14,7 +14,10 @@
 #include "sdm845-pmic-overlay.dtsi"
 #include "sdm845-pinctrl-overlay.dtsi"
 #include "sdm845-camera-sensor-mtp.dtsi"
+
+&qupv3_se10_i2c {
 #include "smb1355.dtsi"
+};
 
 &vendor {
 	bluetooth: bt_wcn3990 {
@@ -338,22 +341,13 @@
 };
 
 &usb1 {
-	status = "okay";
 	extcon = <&extcon_usb1>;
 };
 
-&qusb_phy1 {
-	status = "okay";
-};
-
 &ext_5v_boost {
 	status = "ok";
 };
 
-&usb_qmp_phy {
-	status = "okay";
-};
-
 &pm8998_vadc {
 	chan@83 {
 		label = "vph_pwr";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 6034b6d..f5a979c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -13,9 +13,12 @@
 #include "sdm845-pmic-overlay.dtsi"
 #include "sdm845-pinctrl-overlay.dtsi"
 #include "sdm845-camera-sensor-qrd.dtsi"
-#include "smb1355.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 
+&qupv3_se10_i2c {
+#include "smb1355.dtsi"
+};
+
 &vendor {
 	bluetooth: bt_wcn3990 {
 		compatible = "qca,wcn3990";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index a5c6ab5..b2b0000 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -18,7 +18,10 @@
 
 #include "sdm845-pmic-overlay.dtsi"
 #include "sdm845-pinctrl-overlay.dtsi"
+
+&qupv3_se10_i2c {
 #include "smb1355.dtsi"
+};
 
 &vendor {
 	bluetooth: bt_wcn3990 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index ec8665b..34beda4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -142,7 +142,8 @@
 		pm8998_s9_level: regulator-s9-level {
 			regulator-name = "pm8998_s9_level";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-min-microvolt
+				= <RPMH_REGULATOR_LEVEL_RETENTION>;
 			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
 			qcom,min-dropout-voltage-level = <(-1)>;
 		};
@@ -150,7 +151,8 @@
 		pm8998_s9_level_ao: regulator-s9-level-ao {
 			regulator-name = "pm8998_s9_level_ao";
 			qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
-			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-min-microvolt
+				= <RPMH_REGULATOR_LEVEL_RETENTION>;
 			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
 			qcom,min-dropout-voltage-level = <(-1)>;
 		};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index bfcebf6..6132722 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -258,33 +258,13 @@
 
 		/* data and reg bus scale settings */
 		qcom,sde-data-bus {
-			qcom,msm-bus,name = "mdss_sde_mnoc";
+			qcom,msm-bus,name = "mdss_sde";
 			qcom,msm-bus,num-cases = <3>;
 			qcom,msm-bus,num-paths = <2>;
 			qcom,msm-bus,vectors-KBps =
-			    <22 773 0 0>, <23 773 0 0>,
-			    <22 773 0 6400000>, <23 773 0 6400000>,
-			    <22 773 0 6400000>, <23 773 0 6400000>;
-		};
-
-		qcom,sde-llcc-bus {
-			qcom,msm-bus,name = "mdss_sde_llcc";
-			qcom,msm-bus,num-cases = <3>;
-			qcom,msm-bus,num-paths = <1>;
-			qcom,msm-bus,vectors-KBps =
-			    <132 770 0 0>,
-			    <132 770 0 6400000>,
-			    <132 770 0 6400000>;
-		};
-
-		qcom,sde-ebi-bus {
-			qcom,msm-bus,name = "mdss_sde_ebi";
-			qcom,msm-bus,num-cases = <3>;
-			qcom,msm-bus,num-paths = <1>;
-			qcom,msm-bus,vectors-KBps =
-			    <129 512 0 0>,
-			    <129 512 0 6400000>,
-			    <129 512 0 6400000>;
+				<22 512 0 0>, <23 512 0 0>,
+				<22 512 0 6400000>, <23 512 0 6400000>,
+				<22 512 0 6400000>, <23 512 0 6400000>;
 		};
 
 		qcom,sde-reg-bus {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index 85419c8..97cb981 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -302,6 +302,7 @@
 		interrupt-names = "cpas_camnoc";
 		interrupts = <0 459 0>;
 		qcom,cpas-hw-ver = <0x170110>; /* Titan v170 v1.1.0 */
+		camnoc-axi-min-ib-bw = <3000000000>;
 		regulator-names = "camss-vdd";
 		camss-vdd-supply = <&titan_top_gdsc>;
 		clock-names = "gcc_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index ba76273..229d06b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -51,7 +51,7 @@
 			compatible = "qcom,memshare-peripheral";
 			qcom,peripheral-size = <0x500000>;
 			qcom,client-id = <1>;
-			qcom,allocate-boot-time;
+			qcom,allocate-on-request;
 			label = "modem";
 		};
 	};
@@ -470,6 +470,7 @@
 			2784000 35000
 			2803200 40000
 			2841600 50000
+			2956800 60000
 		>;
 		idle-cost-data = <
 			100 80 60 40
@@ -537,6 +538,7 @@
 			2784000 165
 			2803200 170
 			2841600 180
+			2956800 190
 		>;
 		idle-cost-data = <
 			4 3 2 1
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index e9a913f..19eeb7b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -776,6 +776,7 @@
 		      <0x17a60000 0x100000>;    /* GICR * 8 */
 		interrupts = <1 9 4>;
 		interrupt-parent = <&intc>;
+		ignored-save-restore-irqs = <38>;
 	};
 
 	pdc: interrupt-controller@b220000{
@@ -2974,7 +2975,6 @@
 				 <DCC_READ 0x011360b0 1 0>,
 				 <DCC_READ 0x0113e030 2 0>,
 				 <DCC_READ 0x01141000 1 0>,
-				 <DCC_READ 0x01142028 1 0>,
 				 <DCC_READ 0x01148058 4 0>,
 				 <DCC_READ 0x01160410 3 0>,
 				 <DCC_READ 0x011604a0 1 0>,
@@ -2985,7 +2985,6 @@
 				 <DCC_READ 0x011b6044 4 0>,
 				 <DCC_READ 0x011be030 2 0>,
 				 <DCC_READ 0x011c1000 1 0>,
-				 <DCC_READ 0x011c2028 1 0>,
 				 <DCC_READ 0x011c8058 4 0>,
 				 <DCC_READ 0x011e0410 3 0>,
 				 <DCC_READ 0x011e04a0 1 0>,
@@ -2997,7 +2996,6 @@
 				 <DCC_READ 0x012360B0 1 0>,
 				 <DCC_READ 0x0123E030 2 0>,
 				 <DCC_READ 0x01241000 1 0>,
-				 <DCC_READ 0x01242028 1 0>,
 				 <DCC_READ 0x01248058 4 0>,
 				 <DCC_READ 0x01260410 3 0>,
 				 <DCC_READ 0x012604a0 1 0>,
@@ -3010,7 +3008,6 @@
 				 <DCC_READ 0x012b60b0 1 0>,
 				 <DCC_READ 0x012be030 2 0>,
 				 <DCC_READ 0x012c1000 1 0>,
-				 <DCC_READ 0x012c2028 1 0>,
 				 <DCC_READ 0x012c8058 4 0>,
 				 <DCC_READ 0x012e0410 3 0>,
 				 <DCC_READ 0x012e04a0 1 0>,
@@ -3019,7 +3016,6 @@
 				 <DCC_READ 0x012e6418 1 0>,
 				 <DCC_READ 0x01380900 8 0>,
 				 <DCC_READ 0x01380d00 5 0>,
-				 <DCC_READ 0x01350110 4 0>,
 				 <DCC_READ 0x01430280 1 0>,
 				 <DCC_READ 0x01430288 1 0>,
 				 <DCC_READ 0x0143028c 7 0>,
@@ -3028,7 +3024,6 @@
 				 <DCC_READ 0x011360b0 1 0>,
 				 <DCC_READ 0x0113e030 2 0>,
 				 <DCC_READ 0x01141000 1 0>,
-				 <DCC_READ 0x01142028 1 0>,
 				 <DCC_READ 0x01148058 4 0>,
 				 <DCC_READ 0x01160410 3 0>,
 				 <DCC_READ 0x011604a0 1 0>,
@@ -3039,7 +3034,6 @@
 				 <DCC_READ 0x011b6044 4 0>,
 				 <DCC_READ 0x011be030 2 0>,
 				 <DCC_READ 0x011c1000 1 0>,
-				 <DCC_READ 0x011c2028 1 0>,
 				 <DCC_READ 0x011c8058 4 0>,
 				 <DCC_READ 0x011e0410 3 0>,
 				 <DCC_READ 0x011e04a0 1 0>,
@@ -3051,7 +3045,6 @@
 				 <DCC_READ 0x012360b0 1 0>,
 				 <DCC_READ 0x0123e030 2 0>,
 				 <DCC_READ 0x01241000 1 0>,
-				 <DCC_READ 0x01242028 1 0>,
 				 <DCC_READ 0x01248058 4 0>,
 				 <DCC_READ 0x01260410 3 0>,
 				 <DCC_READ 0x012604a0 1 0>,
@@ -3064,7 +3057,6 @@
 				 <DCC_READ 0x012b60b0 1 0>,
 				 <DCC_READ 0x012be030 2 0>,
 				 <DCC_READ 0x012C1000 1 0>,
-				 <DCC_READ 0x012C2028 1 0>,
 				 <DCC_READ 0x012C8058 4 0>,
 				 <DCC_READ 0x012e0410 3 0>,
 				 <DCC_READ 0x012e04a0 1 0>,
@@ -3073,7 +3065,6 @@
 				 <DCC_READ 0x012e6418 1 0>,
 				 <DCC_READ 0x01380900 8 0>,
 				 <DCC_READ 0x01380d00 5 0>,
-				 <DCC_READ 0x01350110 4 0>,
 				 <DCC_READ 0x01430280 1 0>,
 				 <DCC_READ 0x01430288 1 0>,
 				 <DCC_READ 0x0143028c 7 0>,
@@ -4069,6 +4060,7 @@
 };
 
 &gpu_cx_gdsc {
+	parent-supply = <&pm8998_s9_level>;
 	status = "ok";
 };
 
diff --git a/arch/arm64/boot/dts/qcom/smb1355.dtsi b/arch/arm64/boot/dts/qcom/smb1355.dtsi
index 3412b25d..5939440 100644
--- a/arch/arm64/boot/dts/qcom/smb1355.dtsi
+++ b/arch/arm64/boot/dts/qcom/smb1355.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,98 +12,96 @@
 
 #include <dt-bindings/interrupt-controller/irq.h>
 
-&qupv3_se10_i2c {
-	smb1355_0: qcom,smb1355@8 {
-		compatible = "qcom,i2c-pmic";
-		reg = <0x8>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupt-parent = <&spmi_bus>;
-		interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
-		interrupt_names = "smb1355_0";
-		interrupt-controller;
-		#interrupt-cells = <3>;
-		qcom,periph-map = <0x10 0x12 0x13 0x16>;
+smb1355_0: qcom,smb1355@8 {
+	compatible = "qcom,i2c-pmic";
+	reg = <0x8>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupt-parent = <&spmi_bus>;
+	interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+	interrupt_names = "smb1355_0";
+	interrupt-controller;
+	#interrupt-cells = <3>;
+	qcom,periph-map = <0x10 0x12 0x13 0x16>;
 
-		smb1355_revid_0: qcom,revid@100 {
-			compatible = "qcom,qpnp-revid";
-			reg = <0x100 0x100>;
-		};
-
-		smb1355_charger_0: qcom,smb1355-charger@1000 {
-			compatible = "qcom,smb1355";
-			qcom,pmic-revid = <&smb1355_revid_0>;
-			reg = <0x1000 0x700>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			interrupt-parent = <&smb1355_0>;
-			status = "disabled";
-
-			io-channels = <&pmi8998_rradc 2>,
-				      <&pmi8998_rradc 12>;
-			io-channel-names = "charger_temp",
-					   "charger_temp_max";
-
-			qcom,chgr@1000 {
-				reg = <0x1000 0x100>;
-				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
-				interrupt-names = "chg-state-change";
-			};
-
-			qcom,chgr-misc@1600 {
-				reg = <0x1600 0x100>;
-				interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
-					     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
-				interrupt-names = "wdog-bark",
-						  "temperature-change";
-			};
-		};
+	smb1355_revid_0: qcom,revid@100 {
+		compatible = "qcom,qpnp-revid";
+		reg = <0x100 0x100>;
 	};
 
-	smb1355_1: qcom,smb1355@c {
-		compatible = "qcom,i2c-pmic";
-		reg = <0xc>;
+	smb1355_charger_0: qcom,smb1355-charger@1000 {
+		compatible = "qcom,smb1355";
+		qcom,pmic-revid = <&smb1355_revid_0>;
+		reg = <0x1000 0x700>;
 		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupt-parent = <&spmi_bus>;
-		interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
-		interrupt_names = "smb1355_1";
-		interrupt-controller;
-		#interrupt-cells = <3>;
-		qcom,periph-map = <0x10 0x12 0x13 0x16>;
+		#size-cells = <1>;
+		interrupt-parent = <&smb1355_0>;
+		status = "disabled";
 
-		smb1355_revid_1: qcom,revid@100 {
-			compatible = "qcom,qpnp-revid";
-			reg = <0x100 0x100>;
+		io-channels = <&pmi8998_rradc 2>,
+			      <&pmi8998_rradc 12>;
+		io-channel-names = "charger_temp",
+				   "charger_temp_max";
+
+		qcom,chgr@1000 {
+			reg = <0x1000 0x100>;
+			interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "chg-state-change";
 		};
 
-		smb1355_charger_1: qcom,smb1355-charger@1000 {
-			compatible = "qcom,smb1355";
-			qcom,pmic-revid = <&smb1355_revid_1>;
-			reg = <0x1000 0x700>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			interrupt-parent = <&smb1355_1>;
-			status = "disabled";
+		qcom,chgr-misc@1600 {
+			reg = <0x1600 0x100>;
+			interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog-bark",
+					  "temperature-change";
+		};
+	};
+};
 
-			io-channels = <&pmi8998_rradc 2>,
-				      <&pmi8998_rradc 12>;
-			io-channel-names = "charger_temp",
-					   "charger_temp_max";
+smb1355_1: qcom,smb1355@c {
+	compatible = "qcom,i2c-pmic";
+	reg = <0xc>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupt-parent = <&spmi_bus>;
+	interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+	interrupt_names = "smb1355_1";
+	interrupt-controller;
+	#interrupt-cells = <3>;
+	qcom,periph-map = <0x10 0x12 0x13 0x16>;
 
-			qcom,chgr@1000 {
-				reg = <0x1000 0x100>;
-				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
-				interrupt-names = "chg-state-change";
-			};
+	smb1355_revid_1: qcom,revid@100 {
+		compatible = "qcom,qpnp-revid";
+		reg = <0x100 0x100>;
+	};
 
-			qcom,chgr-misc@1600 {
-				reg = <0x1600 0x100>;
-				interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
-					     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
-				interrupt-names = "wdog-bark",
-						  "temperature-change";
-			};
+	smb1355_charger_1: qcom,smb1355-charger@1000 {
+		compatible = "qcom,smb1355";
+		qcom,pmic-revid = <&smb1355_revid_1>;
+		reg = <0x1000 0x700>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		interrupt-parent = <&smb1355_1>;
+		status = "disabled";
+
+		io-channels = <&pmi8998_rradc 2>,
+			      <&pmi8998_rradc 12>;
+		io-channel-names = "charger_temp",
+				   "charger_temp_max";
+
+		qcom,chgr@1000 {
+			reg = <0x1000 0x100>;
+			interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "chg-state-change";
+		};
+
+		qcom,chgr-misc@1600 {
+			reg = <0x1600 0x100>;
+			interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog-bark",
+					  "temperature-change";
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/smb1390.dtsi b/arch/arm64/boot/dts/qcom/smb1390.dtsi
new file mode 100644
index 0000000..92ac103
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/smb1390.dtsi
@@ -0,0 +1,61 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+smb1390: qcom,smb1390@10 {
+	compatible = "qcom,i2c-pmic";
+	reg = <0x10>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupt-parent = <&spmi_bus>;
+	interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+	interrupt-names = "smb1390";
+	interrupt-controller;
+	#interrupt-cells = <3>;
+	qcom,periph-map = <0x10>;
+
+	smb1390_revid: qcom,revid {
+		compatible = "qcom,qpnp-revid";
+		reg = <0x100>;
+	};
+
+	smb1390_charger: qcom,charge_pump {
+		compatible = "qcom,smb1390-charger";
+		qcom,pmic-revid = <&smb1390_revid>;
+		interrupt-parent = <&smb1390>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&smb1390_die_temp_default>;
+		qcom,smb-vadc = <&pm8998_vadc>;
+		qcom,channel-num = <0x14>;
+		status = "disabled";
+
+		qcom,core {
+			interrupts = <0x10 0x0 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x2 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x3 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x4 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x5 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x6 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x7 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "switcher-off-window",
+					  "switcher-off-fault",
+					  "tsd-fault",
+					  "irev-fault",
+					  "vph-ov-hard",
+					  "vph-ov-soft",
+					  "ilim",
+					  "temp-alarm";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp-overlay.dts
new file mode 100644
index 0000000..c3aa763
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp-overlay.dts
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sxr1120-lc-cdp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 CDP";
+	compatible = "qcom,sxr1120-cdp", "qcom,sxr1120", "qcom,cdp";
+	qcom,msm-id = <370 0x0>;
+	qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dts
new file mode 100644
index 0000000..6237873
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sxr1120-lc.dtsi"
+#include "sxr1120-lc-cdp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 CDP";
+	compatible = "qcom,sxr1120-cdp", "qcom,sxr1120", "qcom,cdp";
+	qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dtsi b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dtsi
new file mode 100644
index 0000000..d4d42c5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dtsi
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "qcs605-lc-cdp.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp-overlay.dts
new file mode 100644
index 0000000..e90f3b4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp-overlay.dts
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sxr1120-lc-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 Ext. Audio Codec CDP";
+	compatible = "qcom,sxr1120-cdp", "qcom,sxr1120", "qcom,cdp";
+	qcom,msm-id = <370 0x0>;
+	qcom,board-id = <1 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp.dts
new file mode 100644
index 0000000..76c424d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sxr1120-lc.dtsi"
+#include "sxr1120-lc-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 Ext. Audio Codec CDP";
+	compatible = "qcom,sxr1120-cdp", "qcom,sxr1120", "qcom,cdp";
+	qcom,board-id = <1 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp-overlay.dts
new file mode 100644
index 0000000..946298f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp-overlay.dts
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sxr1120-lc-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 Ext. Audio Codec MTP";
+	compatible = "qcom,sxr1120-mtp", "qcom,sxr1120", "qcom,mtp";
+	qcom,msm-id = <370 0x0>;
+	qcom,board-id = <8 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp.dts
new file mode 100644
index 0000000..e53bbe3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sxr1120-lc.dtsi"
+#include "sxr1120-lc-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 Ext. Audio Codec MTP";
+	compatible = "qcom,sxr1120-mtp", "qcom,sxr1120", "qcom,mtp";
+	qcom,board-id = <8 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp-overlay.dts
new file mode 100644
index 0000000..8af46ef
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp-overlay.dts
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sxr1120-lc-mtp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 MTP";
+	compatible = "qcom,sxr1120-mtp", "qcom,sxr1120", "qcom,mtp";
+	qcom,msm-id = <370 0x0>;
+	qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dts
new file mode 100644
index 0000000..ffcdeda
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sxr1120-lc.dtsi"
+#include "sxr1120-lc-mtp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 MTP";
+	compatible = "qcom,sxr1120-mtp", "qcom,sxr1120", "qcom,mtp";
+	qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dtsi b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dtsi
new file mode 100644
index 0000000..270aa0e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dtsi
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "qcs605-lc-mtp.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc.dts
new file mode 100644
index 0000000..5967388
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sxr1120-lc.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC SoC";
+	compatible = "qcom,sxr1120";
+	qcom,board-id = <0 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc.dtsi b/arch/arm64/boot/dts/qcom/sxr1120-lc.dtsi
new file mode 100644
index 0000000..1413e2c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc.dtsi
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "qcs605-lc.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 SoC";
+	compatible = "qcom,sxr1120";
+	qcom,msm-id = <370 0x0>;
+};
diff --git a/arch/arm64/configs/msm8937-perf_defconfig b/arch/arm64/configs/msm8937-perf_defconfig
index 72a8fb4..be3fae7 100644
--- a/arch/arm64/configs/msm8937-perf_defconfig
+++ b/arch/arm64/configs/msm8937-perf_defconfig
@@ -103,6 +103,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -114,6 +115,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -224,6 +226,8 @@
 CONFIG_RMNET_DATA_FC=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
+# CONFIG_BT_BREDR is not set
+# CONFIG_BT_LE is not set
 CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
@@ -242,7 +246,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
+CONFIG_FPR_FPC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -254,10 +258,8 @@
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
 CONFIG_SCSI_UFS_QCOM_ICE=y
-CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
-CONFIG_DM_DEBUG=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
 CONFIG_DM_UEVENT=y
@@ -309,6 +311,10 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
@@ -334,19 +340,17 @@
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
-CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_MSM8937=y
 CONFIG_PINCTRL_MSM8917=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_QPNP_PIN=y
-CONFIG_GPIO_QPNP_PIN_DEBUG=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -368,14 +372,13 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
 CONFIG_REGULATOR_MEM_ACC=y
-CONFIG_REGULATOR_MSM_GFX_LDO=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
 CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP=y
@@ -388,9 +391,7 @@
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
 CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
 CONFIG_MSM_CAMERA_SENSOR=y
 CONFIG_MSM_CPP=y
 CONFIG_MSM_CCI=y
@@ -414,7 +415,6 @@
 CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
 CONFIG_MSMB_JPEG=y
 CONFIG_MSM_FD=y
-CONFIG_MSM_JPEGDMA=y
 CONFIG_MSM_VIDC_3X_V4L2=y
 CONFIG_MSM_VIDC_3X_GOVERNORS=y
 CONFIG_MSM_SDE_ROTATOR=y
@@ -428,7 +428,7 @@
 CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
 CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
@@ -489,11 +489,12 @@
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
 CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
-CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
@@ -559,7 +560,6 @@
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
@@ -572,12 +572,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -589,6 +591,8 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -596,8 +600,6 @@
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_ECRYPT_FS=y
-CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_SDCARD_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
diff --git a/arch/arm64/configs/msm8937_defconfig b/arch/arm64/configs/msm8937_defconfig
index fc7dc6c..ebcc83e 100644
--- a/arch/arm64/configs/msm8937_defconfig
+++ b/arch/arm64/configs/msm8937_defconfig
@@ -107,6 +107,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -118,6 +119,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -230,6 +232,8 @@
 CONFIG_RMNET_DATA_FC=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
+# CONFIG_BT_BREDR is not set
+# CONFIG_BT_LE is not set
 CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
@@ -248,7 +252,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
+CONFIG_FPR_FPC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -260,10 +264,8 @@
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
 CONFIG_SCSI_UFS_QCOM_ICE=y
-CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
-CONFIG_DM_DEBUG=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
 CONFIG_DM_UEVENT=y
@@ -315,6 +317,10 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
@@ -342,19 +348,17 @@
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
-CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_MSM8937=y
 CONFIG_PINCTRL_MSM8917=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_QPNP_PIN=y
-CONFIG_GPIO_QPNP_PIN_DEBUG=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -376,14 +380,13 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
 CONFIG_REGULATOR_MEM_ACC=y
-CONFIG_REGULATOR_MSM_GFX_LDO=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
 CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP=y
@@ -422,7 +425,6 @@
 CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
 CONFIG_MSMB_JPEG=y
 CONFIG_MSM_FD=y
-CONFIG_MSM_JPEGDMA=y
 CONFIG_MSM_VIDC_3X_V4L2=y
 CONFIG_MSM_VIDC_3X_GOVERNORS=y
 CONFIG_MSM_SDE_ROTATOR=y
@@ -437,7 +439,7 @@
 CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
 CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
@@ -498,12 +500,13 @@
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
 CONFIG_MMC_RING_BUFFER=y
 CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
-CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
@@ -556,7 +559,6 @@
 CONFIG_QCOM_SCM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
-CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_MSM_DEBUG_LAR_UNLOCK=y
@@ -577,7 +579,6 @@
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
@@ -590,12 +591,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -607,6 +610,8 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -615,8 +620,6 @@
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_ECRYPT_FS=y
-CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_SDCARD_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=y
@@ -637,7 +640,6 @@
 CONFIG_DEBUG_OBJECTS_WORK=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
-CONFIG_SLUB_DEBUG_ON=y
 CONFIG_DEBUG_KMEMLEAK=y
 CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
 CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 7d64ca7..fca320e 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -18,6 +18,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -67,6 +68,7 @@
 CONFIG_PROCESS_RECLAIM=y
 CONFIG_SECCOMP=y
 CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_PSCI_BP_HARDENING=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -102,6 +104,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -113,6 +116,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -341,9 +345,9 @@
 CONFIG_GPIO_QPNP_PIN=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -365,7 +369,9 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR=y
@@ -540,6 +546,7 @@
 CONFIG_MSM_RMNET_BAM=y
 CONFIG_MSM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
@@ -578,12 +585,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -595,6 +604,8 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index de329bd..a3547be 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -19,6 +19,7 @@
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -70,6 +71,7 @@
 CONFIG_PROCESS_RECLAIM=y
 CONFIG_SECCOMP=y
 CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_PSCI_BP_HARDENING=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -106,6 +108,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -117,6 +120,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -350,9 +354,9 @@
 CONFIG_GPIO_QPNP_PIN=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -374,7 +378,9 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR=y
@@ -551,6 +557,7 @@
 CONFIG_MSM_RMNET_BAM=y
 CONFIG_MSM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
@@ -564,7 +571,6 @@
 CONFIG_QCOM_SCM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
-CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_MSM_DEBUG_LAR_UNLOCK=y
@@ -598,12 +604,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -615,6 +623,8 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index a411a88..b231503 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -67,6 +67,7 @@
 CONFIG_PROCESS_RECLAIM=y
 CONFIG_SECCOMP=y
 CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_PSCI_BP_HARDENING=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -102,9 +103,11 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
@@ -113,6 +116,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -287,6 +291,7 @@
 CONFIG_PPPOPNS=y
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_LAN78XX=y
 CONFIG_USB_USBNET=y
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
@@ -300,7 +305,6 @@
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVMEM is not set
 # CONFIG_DEVKMEM is not set
@@ -575,9 +579,7 @@
 CONFIG_QCOM_QFPROM=y
 CONFIG_SENSORS_SSC=y
 CONFIG_MSM_TZ_LOG=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_EXT4_ENCRYPTION=y
 CONFIG_QUOTA=y
@@ -617,6 +619,7 @@
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 3e8d865..d049436 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -72,6 +72,7 @@
 CONFIG_PROCESS_RECLAIM=y
 CONFIG_SECCOMP=y
 CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_PSCI_BP_HARDENING=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -107,9 +108,11 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
@@ -118,6 +121,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -293,6 +297,7 @@
 CONFIG_PPPOPNS=y
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_LAN78XX=y
 CONFIG_USB_USBNET=y
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
@@ -307,8 +312,9 @@
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_DIAG_CHAR=y
@@ -593,9 +599,7 @@
 CONFIG_QCOM_QFPROM=y
 CONFIG_SENSORS_SSC=y
 CONFIG_MSM_TZ_LOG=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_EXT4_ENCRYPTION=y
 CONFIG_QUOTA=y
@@ -677,6 +681,7 @@
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index a1fd1dc..e35e571 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -65,6 +65,7 @@
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
 CONFIG_SECCOMP=y
 CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_PSCI_BP_HARDENING=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -82,6 +83,7 @@
 # CONFIG_PM_WAKELOCKS_GC is not set
 CONFIG_CPU_IDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -101,9 +103,11 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
@@ -112,6 +116,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -548,6 +553,7 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
@@ -571,8 +577,11 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -604,6 +613,7 @@
 CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 04608cd..ab983e0 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -69,6 +69,7 @@
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
 CONFIG_SECCOMP=y
 CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_PSCI_BP_HARDENING=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -85,6 +86,7 @@
 CONFIG_PM_DEBUG=y
 CONFIG_CPU_IDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -104,9 +106,11 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
@@ -115,6 +119,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -465,7 +470,6 @@
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
 CONFIG_EDAC_KRYO3XX_ARM64=y
-CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_CE=y
 CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
@@ -567,6 +571,7 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_MSM_REMOTEQDSS=y
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_BIMC_BWMON=y
@@ -592,8 +597,11 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -669,6 +677,7 @@
 CONFIG_CORESIGHT_TGU=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/arm64/crypto/sha256-core.S b/arch/arm64/crypto/sha256-core.S
new file mode 100644
index 0000000..3ce82cc
--- /dev/null
+++ b/arch/arm64/crypto/sha256-core.S
@@ -0,0 +1,2061 @@
+// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License").  You may not use
+// this file except in compliance with the License.  You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+
+// ====================================================================
+// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// project. The module is, however, dual licensed under OpenSSL and
+// CRYPTOGAMS licenses depending on where you obtain it. For further
+// details see http://www.openssl.org/~appro/cryptogams/.
+//
+// Permission to use under GPLv2 terms is granted.
+// ====================================================================
+//
+// SHA256/512 for ARMv8.
+//
+// Performance in cycles per processed byte and improvement coefficient
+// over code generated with "default" compiler:
+//
+//		SHA256-hw	SHA256(*)	SHA512
+// Apple A7	1.97		10.5 (+33%)	6.73 (-1%(**))
+// Cortex-A53	2.38		15.5 (+115%)	10.0 (+150%(***))
+// Cortex-A57	2.31		11.6 (+86%)	7.51 (+260%(***))
+// Denver	2.01		10.5 (+26%)	6.70 (+8%)
+// X-Gene			20.0 (+100%)	12.8 (+300%(***))
+// Mongoose	2.36		13.0 (+50%)	8.36 (+33%)
+//
+// (*)	Software SHA256 results are of lesser relevance, presented
+//	mostly for informational purposes.
+// (**)	The result is a trade-off: it's possible to improve it by
+//	10% (or by 1 cycle per round), but at the cost of 20% loss
+//	on Cortex-A53 (or by 4 cycles per round).
+// (***)	Super-impressive coefficients over gcc-generated code are
+//	indication of some compiler "pathology", most notably code
+//	generated with -mgeneral-regs-only is significanty faster
+//	and the gap is only 40-90%.
+//
+// October 2016.
+//
+// Originally it was reckoned that it makes no sense to implement NEON
+// version of SHA256 for 64-bit processors. This is because performance
+// improvement on most wide-spread Cortex-A5x processors was observed
+// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+// observed that 32-bit NEON SHA256 performs significantly better than
+// 64-bit scalar version on *some* of the more recent processors. As
+// result 64-bit NEON version of SHA256 was added to provide best
+// all-round performance. For example it executes ~30% faster on X-Gene
+// and Mongoose. [For reference, NEON version of SHA512 is bound to
+// deliver much less improvement, likely *negative* on Cortex-A5x.
+// Which is why NEON support is limited to SHA256.]
+
+#ifndef	__KERNEL__
+# include "arm_arch.h"
+#endif
+
+.text
+
+.extern	OPENSSL_armcap_P
+.globl	sha256_block_data_order
+.type	sha256_block_data_order,%function
+.align	6
+sha256_block_data_order:
+#ifndef	__KERNEL__
+# ifdef	__ILP32__
+	ldrsw	x16,.LOPENSSL_armcap_P
+# else
+	ldr	x16,.LOPENSSL_armcap_P
+# endif
+	adr	x17,.LOPENSSL_armcap_P
+	add	x16,x16,x17
+	ldr	w16,[x16]
+	tst	w16,#ARMV8_SHA256
+	b.ne	.Lv8_entry
+	tst	w16,#ARMV7_NEON
+	b.ne	.Lneon_entry
+#endif
+	stp	x29,x30,[sp,#-128]!
+	add	x29,sp,#0
+
+	stp	x19,x20,[sp,#16]
+	stp	x21,x22,[sp,#32]
+	stp	x23,x24,[sp,#48]
+	stp	x25,x26,[sp,#64]
+	stp	x27,x28,[sp,#80]
+	sub	sp,sp,#4*4
+
+	ldp	w20,w21,[x0]				// load context
+	ldp	w22,w23,[x0,#2*4]
+	ldp	w24,w25,[x0,#4*4]
+	add	x2,x1,x2,lsl#6	// end of input
+	ldp	w26,w27,[x0,#6*4]
+	adr	x30,.LK256
+	stp	x0,x2,[x29,#96]
+
+.Loop:
+	ldp	w3,w4,[x1],#2*4
+	ldr	w19,[x30],#4			// *K++
+	eor	w28,w21,w22				// magic seed
+	str	x1,[x29,#112]
+#ifndef	__AARCH64EB__
+	rev	w3,w3			// 0
+#endif
+	ror	w16,w24,#6
+	add	w27,w27,w19			// h+=K[i]
+	eor	w6,w24,w24,ror#14
+	and	w17,w25,w24
+	bic	w19,w26,w24
+	add	w27,w27,w3			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w20,w21			// a^b, b^c in next round
+	eor	w16,w16,w6,ror#11	// Sigma1(e)
+	ror	w6,w20,#2
+	add	w27,w27,w17			// h+=Ch(e,f,g)
+	eor	w17,w20,w20,ror#9
+	add	w27,w27,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w23,w23,w27			// d+=h
+	eor	w28,w28,w21			// Maj(a,b,c)
+	eor	w17,w6,w17,ror#13	// Sigma0(a)
+	add	w27,w27,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w27,w27,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w4,w4			// 1
+#endif
+	ldp	w5,w6,[x1],#2*4
+	add	w27,w27,w17			// h+=Sigma0(a)
+	ror	w16,w23,#6
+	add	w26,w26,w28			// h+=K[i]
+	eor	w7,w23,w23,ror#14
+	and	w17,w24,w23
+	bic	w28,w25,w23
+	add	w26,w26,w4			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w27,w20			// a^b, b^c in next round
+	eor	w16,w16,w7,ror#11	// Sigma1(e)
+	ror	w7,w27,#2
+	add	w26,w26,w17			// h+=Ch(e,f,g)
+	eor	w17,w27,w27,ror#9
+	add	w26,w26,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w22,w22,w26			// d+=h
+	eor	w19,w19,w20			// Maj(a,b,c)
+	eor	w17,w7,w17,ror#13	// Sigma0(a)
+	add	w26,w26,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w26,w26,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w5,w5			// 2
+#endif
+	add	w26,w26,w17			// h+=Sigma0(a)
+	ror	w16,w22,#6
+	add	w25,w25,w19			// h+=K[i]
+	eor	w8,w22,w22,ror#14
+	and	w17,w23,w22
+	bic	w19,w24,w22
+	add	w25,w25,w5			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w26,w27			// a^b, b^c in next round
+	eor	w16,w16,w8,ror#11	// Sigma1(e)
+	ror	w8,w26,#2
+	add	w25,w25,w17			// h+=Ch(e,f,g)
+	eor	w17,w26,w26,ror#9
+	add	w25,w25,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w21,w21,w25			// d+=h
+	eor	w28,w28,w27			// Maj(a,b,c)
+	eor	w17,w8,w17,ror#13	// Sigma0(a)
+	add	w25,w25,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w25,w25,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w6,w6			// 3
+#endif
+	ldp	w7,w8,[x1],#2*4
+	add	w25,w25,w17			// h+=Sigma0(a)
+	ror	w16,w21,#6
+	add	w24,w24,w28			// h+=K[i]
+	eor	w9,w21,w21,ror#14
+	and	w17,w22,w21
+	bic	w28,w23,w21
+	add	w24,w24,w6			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w25,w26			// a^b, b^c in next round
+	eor	w16,w16,w9,ror#11	// Sigma1(e)
+	ror	w9,w25,#2
+	add	w24,w24,w17			// h+=Ch(e,f,g)
+	eor	w17,w25,w25,ror#9
+	add	w24,w24,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w20,w20,w24			// d+=h
+	eor	w19,w19,w26			// Maj(a,b,c)
+	eor	w17,w9,w17,ror#13	// Sigma0(a)
+	add	w24,w24,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w24,w24,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w7,w7			// 4
+#endif
+	add	w24,w24,w17			// h+=Sigma0(a)
+	ror	w16,w20,#6
+	add	w23,w23,w19			// h+=K[i]
+	eor	w10,w20,w20,ror#14
+	and	w17,w21,w20
+	bic	w19,w22,w20
+	add	w23,w23,w7			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w24,w25			// a^b, b^c in next round
+	eor	w16,w16,w10,ror#11	// Sigma1(e)
+	ror	w10,w24,#2
+	add	w23,w23,w17			// h+=Ch(e,f,g)
+	eor	w17,w24,w24,ror#9
+	add	w23,w23,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w27,w27,w23			// d+=h
+	eor	w28,w28,w25			// Maj(a,b,c)
+	eor	w17,w10,w17,ror#13	// Sigma0(a)
+	add	w23,w23,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w23,w23,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w8,w8			// 5
+#endif
+	ldp	w9,w10,[x1],#2*4
+	add	w23,w23,w17			// h+=Sigma0(a)
+	ror	w16,w27,#6
+	add	w22,w22,w28			// h+=K[i]
+	eor	w11,w27,w27,ror#14
+	and	w17,w20,w27
+	bic	w28,w21,w27
+	add	w22,w22,w8			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w23,w24			// a^b, b^c in next round
+	eor	w16,w16,w11,ror#11	// Sigma1(e)
+	ror	w11,w23,#2
+	add	w22,w22,w17			// h+=Ch(e,f,g)
+	eor	w17,w23,w23,ror#9
+	add	w22,w22,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w26,w26,w22			// d+=h
+	eor	w19,w19,w24			// Maj(a,b,c)
+	eor	w17,w11,w17,ror#13	// Sigma0(a)
+	add	w22,w22,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w22,w22,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w9,w9			// 6
+#endif
+	add	w22,w22,w17			// h+=Sigma0(a)
+	ror	w16,w26,#6
+	add	w21,w21,w19			// h+=K[i]
+	eor	w12,w26,w26,ror#14
+	and	w17,w27,w26
+	bic	w19,w20,w26
+	add	w21,w21,w9			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w22,w23			// a^b, b^c in next round
+	eor	w16,w16,w12,ror#11	// Sigma1(e)
+	ror	w12,w22,#2
+	add	w21,w21,w17			// h+=Ch(e,f,g)
+	eor	w17,w22,w22,ror#9
+	add	w21,w21,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w25,w25,w21			// d+=h
+	eor	w28,w28,w23			// Maj(a,b,c)
+	eor	w17,w12,w17,ror#13	// Sigma0(a)
+	add	w21,w21,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w21,w21,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w10,w10			// 7
+#endif
+	ldp	w11,w12,[x1],#2*4
+	add	w21,w21,w17			// h+=Sigma0(a)
+	ror	w16,w25,#6
+	add	w20,w20,w28			// h+=K[i]
+	eor	w13,w25,w25,ror#14
+	and	w17,w26,w25
+	bic	w28,w27,w25
+	add	w20,w20,w10			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w21,w22			// a^b, b^c in next round
+	eor	w16,w16,w13,ror#11	// Sigma1(e)
+	ror	w13,w21,#2
+	add	w20,w20,w17			// h+=Ch(e,f,g)
+	eor	w17,w21,w21,ror#9
+	add	w20,w20,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w24,w24,w20			// d+=h
+	eor	w19,w19,w22			// Maj(a,b,c)
+	eor	w17,w13,w17,ror#13	// Sigma0(a)
+	add	w20,w20,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w20,w20,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w11,w11			// 8
+#endif
+	add	w20,w20,w17			// h+=Sigma0(a)
+	ror	w16,w24,#6
+	add	w27,w27,w19			// h+=K[i]
+	eor	w14,w24,w24,ror#14
+	and	w17,w25,w24
+	bic	w19,w26,w24
+	add	w27,w27,w11			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w20,w21			// a^b, b^c in next round
+	eor	w16,w16,w14,ror#11	// Sigma1(e)
+	ror	w14,w20,#2
+	add	w27,w27,w17			// h+=Ch(e,f,g)
+	eor	w17,w20,w20,ror#9
+	add	w27,w27,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w23,w23,w27			// d+=h
+	eor	w28,w28,w21			// Maj(a,b,c)
+	eor	w17,w14,w17,ror#13	// Sigma0(a)
+	add	w27,w27,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w27,w27,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w12,w12			// 9
+#endif
+	ldp	w13,w14,[x1],#2*4
+	add	w27,w27,w17			// h+=Sigma0(a)
+	ror	w16,w23,#6
+	add	w26,w26,w28			// h+=K[i]
+	eor	w15,w23,w23,ror#14
+	and	w17,w24,w23
+	bic	w28,w25,w23
+	add	w26,w26,w12			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w27,w20			// a^b, b^c in next round
+	eor	w16,w16,w15,ror#11	// Sigma1(e)
+	ror	w15,w27,#2
+	add	w26,w26,w17			// h+=Ch(e,f,g)
+	eor	w17,w27,w27,ror#9
+	add	w26,w26,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w22,w22,w26			// d+=h
+	eor	w19,w19,w20			// Maj(a,b,c)
+	eor	w17,w15,w17,ror#13	// Sigma0(a)
+	add	w26,w26,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w26,w26,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w13,w13			// 10
+#endif
+	add	w26,w26,w17			// h+=Sigma0(a)
+	ror	w16,w22,#6
+	add	w25,w25,w19			// h+=K[i]
+	eor	w0,w22,w22,ror#14
+	and	w17,w23,w22
+	bic	w19,w24,w22
+	add	w25,w25,w13			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w26,w27			// a^b, b^c in next round
+	eor	w16,w16,w0,ror#11	// Sigma1(e)
+	ror	w0,w26,#2
+	add	w25,w25,w17			// h+=Ch(e,f,g)
+	eor	w17,w26,w26,ror#9
+	add	w25,w25,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w21,w21,w25			// d+=h
+	eor	w28,w28,w27			// Maj(a,b,c)
+	eor	w17,w0,w17,ror#13	// Sigma0(a)
+	add	w25,w25,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w25,w25,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w14,w14			// 11
+#endif
+	ldp	w15,w0,[x1],#2*4
+	add	w25,w25,w17			// h+=Sigma0(a)
+	str	w6,[sp,#12]
+	ror	w16,w21,#6
+	add	w24,w24,w28			// h+=K[i]
+	eor	w6,w21,w21,ror#14
+	and	w17,w22,w21
+	bic	w28,w23,w21
+	add	w24,w24,w14			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w25,w26			// a^b, b^c in next round
+	eor	w16,w16,w6,ror#11	// Sigma1(e)
+	ror	w6,w25,#2
+	add	w24,w24,w17			// h+=Ch(e,f,g)
+	eor	w17,w25,w25,ror#9
+	add	w24,w24,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w20,w20,w24			// d+=h
+	eor	w19,w19,w26			// Maj(a,b,c)
+	eor	w17,w6,w17,ror#13	// Sigma0(a)
+	add	w24,w24,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w24,w24,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w15,w15			// 12
+#endif
+	add	w24,w24,w17			// h+=Sigma0(a)
+	str	w7,[sp,#0]
+	ror	w16,w20,#6
+	add	w23,w23,w19			// h+=K[i]
+	eor	w7,w20,w20,ror#14
+	and	w17,w21,w20
+	bic	w19,w22,w20
+	add	w23,w23,w15			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w24,w25			// a^b, b^c in next round
+	eor	w16,w16,w7,ror#11	// Sigma1(e)
+	ror	w7,w24,#2
+	add	w23,w23,w17			// h+=Ch(e,f,g)
+	eor	w17,w24,w24,ror#9
+	add	w23,w23,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w27,w27,w23			// d+=h
+	eor	w28,w28,w25			// Maj(a,b,c)
+	eor	w17,w7,w17,ror#13	// Sigma0(a)
+	add	w23,w23,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w23,w23,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w0,w0			// 13
+#endif
+	ldp	w1,w2,[x1]
+	add	w23,w23,w17			// h+=Sigma0(a)
+	str	w8,[sp,#4]
+	ror	w16,w27,#6
+	add	w22,w22,w28			// h+=K[i]
+	eor	w8,w27,w27,ror#14
+	and	w17,w20,w27
+	bic	w28,w21,w27
+	add	w22,w22,w0			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w23,w24			// a^b, b^c in next round
+	eor	w16,w16,w8,ror#11	// Sigma1(e)
+	ror	w8,w23,#2
+	add	w22,w22,w17			// h+=Ch(e,f,g)
+	eor	w17,w23,w23,ror#9
+	add	w22,w22,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w26,w26,w22			// d+=h
+	eor	w19,w19,w24			// Maj(a,b,c)
+	eor	w17,w8,w17,ror#13	// Sigma0(a)
+	add	w22,w22,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w22,w22,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w1,w1			// 14
+#endif
+	ldr	w6,[sp,#12]
+	add	w22,w22,w17			// h+=Sigma0(a)
+	str	w9,[sp,#8]
+	ror	w16,w26,#6
+	add	w21,w21,w19			// h+=K[i]
+	eor	w9,w26,w26,ror#14
+	and	w17,w27,w26
+	bic	w19,w20,w26
+	add	w21,w21,w1			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w22,w23			// a^b, b^c in next round
+	eor	w16,w16,w9,ror#11	// Sigma1(e)
+	ror	w9,w22,#2
+	add	w21,w21,w17			// h+=Ch(e,f,g)
+	eor	w17,w22,w22,ror#9
+	add	w21,w21,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w25,w25,w21			// d+=h
+	eor	w28,w28,w23			// Maj(a,b,c)
+	eor	w17,w9,w17,ror#13	// Sigma0(a)
+	add	w21,w21,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w21,w21,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w2,w2			// 15
+#endif
+	ldr	w7,[sp,#0]
+	add	w21,w21,w17			// h+=Sigma0(a)
+	str	w10,[sp,#12]
+	ror	w16,w25,#6
+	add	w20,w20,w28			// h+=K[i]
+	ror	w9,w4,#7
+	and	w17,w26,w25
+	ror	w8,w1,#17
+	bic	w28,w27,w25
+	ror	w10,w21,#2
+	add	w20,w20,w2			// h+=X[i]
+	eor	w16,w16,w25,ror#11
+	eor	w9,w9,w4,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w21,w22			// a^b, b^c in next round
+	eor	w16,w16,w25,ror#25	// Sigma1(e)
+	eor	w10,w10,w21,ror#13
+	add	w20,w20,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w8,w8,w1,ror#19
+	eor	w9,w9,w4,lsr#3	// sigma0(X[i+1])
+	add	w20,w20,w16			// h+=Sigma1(e)
+	eor	w19,w19,w22			// Maj(a,b,c)
+	eor	w17,w10,w21,ror#22	// Sigma0(a)
+	eor	w8,w8,w1,lsr#10	// sigma1(X[i+14])
+	add	w3,w3,w12
+	add	w24,w24,w20			// d+=h
+	add	w20,w20,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w3,w3,w9
+	add	w20,w20,w17			// h+=Sigma0(a)
+	add	w3,w3,w8
+.Loop_16_xx:
+	ldr	w8,[sp,#4]
+	str	w11,[sp,#0]
+	ror	w16,w24,#6
+	add	w27,w27,w19			// h+=K[i]
+	ror	w10,w5,#7
+	and	w17,w25,w24
+	ror	w9,w2,#17
+	bic	w19,w26,w24
+	ror	w11,w20,#2
+	add	w27,w27,w3			// h+=X[i]
+	eor	w16,w16,w24,ror#11
+	eor	w10,w10,w5,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w20,w21			// a^b, b^c in next round
+	eor	w16,w16,w24,ror#25	// Sigma1(e)
+	eor	w11,w11,w20,ror#13
+	add	w27,w27,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w9,w9,w2,ror#19
+	eor	w10,w10,w5,lsr#3	// sigma0(X[i+1])
+	add	w27,w27,w16			// h+=Sigma1(e)
+	eor	w28,w28,w21			// Maj(a,b,c)
+	eor	w17,w11,w20,ror#22	// Sigma0(a)
+	eor	w9,w9,w2,lsr#10	// sigma1(X[i+14])
+	add	w4,w4,w13
+	add	w23,w23,w27			// d+=h
+	add	w27,w27,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w4,w4,w10
+	add	w27,w27,w17			// h+=Sigma0(a)
+	add	w4,w4,w9
+	ldr	w9,[sp,#8]
+	str	w12,[sp,#4]
+	ror	w16,w23,#6
+	add	w26,w26,w28			// h+=K[i]
+	ror	w11,w6,#7
+	and	w17,w24,w23
+	ror	w10,w3,#17
+	bic	w28,w25,w23
+	ror	w12,w27,#2
+	add	w26,w26,w4			// h+=X[i]
+	eor	w16,w16,w23,ror#11
+	eor	w11,w11,w6,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w27,w20			// a^b, b^c in next round
+	eor	w16,w16,w23,ror#25	// Sigma1(e)
+	eor	w12,w12,w27,ror#13
+	add	w26,w26,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w10,w10,w3,ror#19
+	eor	w11,w11,w6,lsr#3	// sigma0(X[i+1])
+	add	w26,w26,w16			// h+=Sigma1(e)
+	eor	w19,w19,w20			// Maj(a,b,c)
+	eor	w17,w12,w27,ror#22	// Sigma0(a)
+	eor	w10,w10,w3,lsr#10	// sigma1(X[i+14])
+	add	w5,w5,w14
+	add	w22,w22,w26			// d+=h
+	add	w26,w26,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w5,w5,w11
+	add	w26,w26,w17			// h+=Sigma0(a)
+	add	w5,w5,w10
+	ldr	w10,[sp,#12]
+	str	w13,[sp,#8]
+	ror	w16,w22,#6
+	add	w25,w25,w19			// h+=K[i]
+	ror	w12,w7,#7
+	and	w17,w23,w22
+	ror	w11,w4,#17
+	bic	w19,w24,w22
+	ror	w13,w26,#2
+	add	w25,w25,w5			// h+=X[i]
+	eor	w16,w16,w22,ror#11
+	eor	w12,w12,w7,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w26,w27			// a^b, b^c in next round
+	eor	w16,w16,w22,ror#25	// Sigma1(e)
+	eor	w13,w13,w26,ror#13
+	add	w25,w25,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w11,w11,w4,ror#19
+	eor	w12,w12,w7,lsr#3	// sigma0(X[i+1])
+	add	w25,w25,w16			// h+=Sigma1(e)
+	eor	w28,w28,w27			// Maj(a,b,c)
+	eor	w17,w13,w26,ror#22	// Sigma0(a)
+	eor	w11,w11,w4,lsr#10	// sigma1(X[i+14])
+	add	w6,w6,w15
+	add	w21,w21,w25			// d+=h
+	add	w25,w25,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w6,w6,w12
+	add	w25,w25,w17			// h+=Sigma0(a)
+	add	w6,w6,w11
+	ldr	w11,[sp,#0]
+	str	w14,[sp,#12]
+	ror	w16,w21,#6
+	add	w24,w24,w28			// h+=K[i]
+	ror	w13,w8,#7
+	and	w17,w22,w21
+	ror	w12,w5,#17
+	bic	w28,w23,w21
+	ror	w14,w25,#2
+	add	w24,w24,w6			// h+=X[i]
+	eor	w16,w16,w21,ror#11
+	eor	w13,w13,w8,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w25,w26			// a^b, b^c in next round
+	eor	w16,w16,w21,ror#25	// Sigma1(e)
+	eor	w14,w14,w25,ror#13
+	add	w24,w24,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w12,w12,w5,ror#19
+	eor	w13,w13,w8,lsr#3	// sigma0(X[i+1])
+	add	w24,w24,w16			// h+=Sigma1(e)
+	eor	w19,w19,w26			// Maj(a,b,c)
+	eor	w17,w14,w25,ror#22	// Sigma0(a)
+	eor	w12,w12,w5,lsr#10	// sigma1(X[i+14])
+	add	w7,w7,w0
+	add	w20,w20,w24			// d+=h
+	add	w24,w24,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w7,w7,w13
+	add	w24,w24,w17			// h+=Sigma0(a)
+	add	w7,w7,w12
+	ldr	w12,[sp,#4]
+	str	w15,[sp,#0]
+	ror	w16,w20,#6
+	add	w23,w23,w19			// h+=K[i]
+	ror	w14,w9,#7
+	and	w17,w21,w20
+	ror	w13,w6,#17
+	bic	w19,w22,w20
+	ror	w15,w24,#2
+	add	w23,w23,w7			// h+=X[i]
+	eor	w16,w16,w20,ror#11
+	eor	w14,w14,w9,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w24,w25			// a^b, b^c in next round
+	eor	w16,w16,w20,ror#25	// Sigma1(e)
+	eor	w15,w15,w24,ror#13
+	add	w23,w23,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w13,w13,w6,ror#19
+	eor	w14,w14,w9,lsr#3	// sigma0(X[i+1])
+	add	w23,w23,w16			// h+=Sigma1(e)
+	eor	w28,w28,w25			// Maj(a,b,c)
+	eor	w17,w15,w24,ror#22	// Sigma0(a)
+	eor	w13,w13,w6,lsr#10	// sigma1(X[i+14])
+	add	w8,w8,w1
+	add	w27,w27,w23			// d+=h
+	add	w23,w23,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w8,w8,w14
+	add	w23,w23,w17			// h+=Sigma0(a)
+	add	w8,w8,w13
+	ldr	w13,[sp,#8]
+	str	w0,[sp,#4]
+	ror	w16,w27,#6
+	add	w22,w22,w28			// h+=K[i]
+	ror	w15,w10,#7
+	and	w17,w20,w27
+	ror	w14,w7,#17
+	bic	w28,w21,w27
+	ror	w0,w23,#2
+	add	w22,w22,w8			// h+=X[i]
+	eor	w16,w16,w27,ror#11
+	eor	w15,w15,w10,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w23,w24			// a^b, b^c in next round
+	eor	w16,w16,w27,ror#25	// Sigma1(e)
+	eor	w0,w0,w23,ror#13
+	add	w22,w22,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w14,w14,w7,ror#19
+	eor	w15,w15,w10,lsr#3	// sigma0(X[i+1])
+	add	w22,w22,w16			// h+=Sigma1(e)
+	eor	w19,w19,w24			// Maj(a,b,c)
+	eor	w17,w0,w23,ror#22	// Sigma0(a)
+	eor	w14,w14,w7,lsr#10	// sigma1(X[i+14])
+	add	w9,w9,w2
+	add	w26,w26,w22			// d+=h
+	add	w22,w22,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w9,w9,w15
+	add	w22,w22,w17			// h+=Sigma0(a)
+	add	w9,w9,w14
+	ldr	w14,[sp,#12]
+	str	w1,[sp,#8]
+	ror	w16,w26,#6
+	add	w21,w21,w19			// h+=K[i]
+	ror	w0,w11,#7
+	and	w17,w27,w26
+	ror	w15,w8,#17
+	bic	w19,w20,w26
+	ror	w1,w22,#2
+	add	w21,w21,w9			// h+=X[i]
+	eor	w16,w16,w26,ror#11
+	eor	w0,w0,w11,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w22,w23			// a^b, b^c in next round
+	eor	w16,w16,w26,ror#25	// Sigma1(e)
+	eor	w1,w1,w22,ror#13
+	add	w21,w21,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w15,w15,w8,ror#19
+	eor	w0,w0,w11,lsr#3	// sigma0(X[i+1])
+	add	w21,w21,w16			// h+=Sigma1(e)
+	eor	w28,w28,w23			// Maj(a,b,c)
+	eor	w17,w1,w22,ror#22	// Sigma0(a)
+	eor	w15,w15,w8,lsr#10	// sigma1(X[i+14])
+	add	w10,w10,w3
+	add	w25,w25,w21			// d+=h
+	add	w21,w21,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w10,w10,w0
+	add	w21,w21,w17			// h+=Sigma0(a)
+	add	w10,w10,w15
+	ldr	w15,[sp,#0]
+	str	w2,[sp,#12]
+	ror	w16,w25,#6
+	add	w20,w20,w28			// h+=K[i]
+	ror	w1,w12,#7
+	and	w17,w26,w25
+	ror	w0,w9,#17
+	bic	w28,w27,w25
+	ror	w2,w21,#2
+	add	w20,w20,w10			// h+=X[i]
+	eor	w16,w16,w25,ror#11
+	eor	w1,w1,w12,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w21,w22			// a^b, b^c in next round
+	eor	w16,w16,w25,ror#25	// Sigma1(e)
+	eor	w2,w2,w21,ror#13
+	add	w20,w20,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w0,w0,w9,ror#19
+	eor	w1,w1,w12,lsr#3	// sigma0(X[i+1])
+	add	w20,w20,w16			// h+=Sigma1(e)
+	eor	w19,w19,w22			// Maj(a,b,c)
+	eor	w17,w2,w21,ror#22	// Sigma0(a)
+	eor	w0,w0,w9,lsr#10	// sigma1(X[i+14])
+	add	w11,w11,w4
+	add	w24,w24,w20			// d+=h
+	add	w20,w20,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w11,w11,w1
+	add	w20,w20,w17			// h+=Sigma0(a)
+	add	w11,w11,w0
+	ldr	w0,[sp,#4]
+	str	w3,[sp,#0]
+	ror	w16,w24,#6
+	add	w27,w27,w19			// h+=K[i]
+	ror	w2,w13,#7
+	and	w17,w25,w24
+	ror	w1,w10,#17
+	bic	w19,w26,w24
+	ror	w3,w20,#2
+	add	w27,w27,w11			// h+=X[i]
+	eor	w16,w16,w24,ror#11
+	eor	w2,w2,w13,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w20,w21			// a^b, b^c in next round
+	eor	w16,w16,w24,ror#25	// Sigma1(e)
+	eor	w3,w3,w20,ror#13
+	add	w27,w27,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w1,w1,w10,ror#19
+	eor	w2,w2,w13,lsr#3	// sigma0(X[i+1])
+	add	w27,w27,w16			// h+=Sigma1(e)
+	eor	w28,w28,w21			// Maj(a,b,c)
+	eor	w17,w3,w20,ror#22	// Sigma0(a)
+	eor	w1,w1,w10,lsr#10	// sigma1(X[i+14])
+	add	w12,w12,w5
+	add	w23,w23,w27			// d+=h
+	add	w27,w27,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w12,w12,w2
+	add	w27,w27,w17			// h+=Sigma0(a)
+	add	w12,w12,w1
+	ldr	w1,[sp,#8]
+	str	w4,[sp,#4]
+	ror	w16,w23,#6
+	add	w26,w26,w28			// h+=K[i]
+	ror	w3,w14,#7
+	and	w17,w24,w23
+	ror	w2,w11,#17
+	bic	w28,w25,w23
+	ror	w4,w27,#2
+	add	w26,w26,w12			// h+=X[i]
+	eor	w16,w16,w23,ror#11
+	eor	w3,w3,w14,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w27,w20			// a^b, b^c in next round
+	eor	w16,w16,w23,ror#25	// Sigma1(e)
+	eor	w4,w4,w27,ror#13
+	add	w26,w26,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w2,w2,w11,ror#19
+	eor	w3,w3,w14,lsr#3	// sigma0(X[i+1])
+	add	w26,w26,w16			// h+=Sigma1(e)
+	eor	w19,w19,w20			// Maj(a,b,c)
+	eor	w17,w4,w27,ror#22	// Sigma0(a)
+	eor	w2,w2,w11,lsr#10	// sigma1(X[i+14])
+	add	w13,w13,w6
+	add	w22,w22,w26			// d+=h
+	add	w26,w26,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w13,w13,w3
+	add	w26,w26,w17			// h+=Sigma0(a)
+	add	w13,w13,w2
+	ldr	w2,[sp,#12]
+	str	w5,[sp,#8]
+	ror	w16,w22,#6
+	add	w25,w25,w19			// h+=K[i]
+	ror	w4,w15,#7
+	and	w17,w23,w22
+	ror	w3,w12,#17
+	bic	w19,w24,w22
+	ror	w5,w26,#2
+	add	w25,w25,w13			// h+=X[i]
+	eor	w16,w16,w22,ror#11
+	eor	w4,w4,w15,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w26,w27			// a^b, b^c in next round
+	eor	w16,w16,w22,ror#25	// Sigma1(e)
+	eor	w5,w5,w26,ror#13
+	add	w25,w25,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w3,w3,w12,ror#19
+	eor	w4,w4,w15,lsr#3	// sigma0(X[i+1])
+	add	w25,w25,w16			// h+=Sigma1(e)
+	eor	w28,w28,w27			// Maj(a,b,c)
+	eor	w17,w5,w26,ror#22	// Sigma0(a)
+	eor	w3,w3,w12,lsr#10	// sigma1(X[i+14])
+	add	w14,w14,w7
+	add	w21,w21,w25			// d+=h
+	add	w25,w25,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w14,w14,w4
+	add	w25,w25,w17			// h+=Sigma0(a)
+	add	w14,w14,w3
+	ldr	w3,[sp,#0]
+	str	w6,[sp,#12]
+	ror	w16,w21,#6
+	add	w24,w24,w28			// h+=K[i]
+	ror	w5,w0,#7
+	and	w17,w22,w21
+	ror	w4,w13,#17
+	bic	w28,w23,w21
+	ror	w6,w25,#2
+	add	w24,w24,w14			// h+=X[i]
+	eor	w16,w16,w21,ror#11
+	eor	w5,w5,w0,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w25,w26			// a^b, b^c in next round
+	eor	w16,w16,w21,ror#25	// Sigma1(e)
+	eor	w6,w6,w25,ror#13
+	add	w24,w24,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w4,w4,w13,ror#19
+	eor	w5,w5,w0,lsr#3	// sigma0(X[i+1])
+	add	w24,w24,w16			// h+=Sigma1(e)
+	eor	w19,w19,w26			// Maj(a,b,c)
+	eor	w17,w6,w25,ror#22	// Sigma0(a)
+	eor	w4,w4,w13,lsr#10	// sigma1(X[i+14])
+	add	w15,w15,w8
+	add	w20,w20,w24			// d+=h
+	add	w24,w24,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w15,w15,w5
+	add	w24,w24,w17			// h+=Sigma0(a)
+	add	w15,w15,w4
+	ldr	w4,[sp,#4]
+	str	w7,[sp,#0]
+	ror	w16,w20,#6
+	add	w23,w23,w19			// h+=K[i]
+	ror	w6,w1,#7
+	and	w17,w21,w20
+	ror	w5,w14,#17
+	bic	w19,w22,w20
+	ror	w7,w24,#2
+	add	w23,w23,w15			// h+=X[i]
+	eor	w16,w16,w20,ror#11
+	eor	w6,w6,w1,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w24,w25			// a^b, b^c in next round
+	eor	w16,w16,w20,ror#25	// Sigma1(e)
+	eor	w7,w7,w24,ror#13
+	add	w23,w23,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w5,w5,w14,ror#19
+	eor	w6,w6,w1,lsr#3	// sigma0(X[i+1])
+	add	w23,w23,w16			// h+=Sigma1(e)
+	eor	w28,w28,w25			// Maj(a,b,c)
+	eor	w17,w7,w24,ror#22	// Sigma0(a)
+	eor	w5,w5,w14,lsr#10	// sigma1(X[i+14])
+	add	w0,w0,w9
+	add	w27,w27,w23			// d+=h
+	add	w23,w23,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w0,w0,w6
+	add	w23,w23,w17			// h+=Sigma0(a)
+	add	w0,w0,w5
+	ldr	w5,[sp,#8]
+	str	w8,[sp,#4]
+	ror	w16,w27,#6
+	add	w22,w22,w28			// h+=K[i]
+	ror	w7,w2,#7
+	and	w17,w20,w27
+	ror	w6,w15,#17
+	bic	w28,w21,w27
+	ror	w8,w23,#2
+	add	w22,w22,w0			// h+=X[i]
+	eor	w16,w16,w27,ror#11
+	eor	w7,w7,w2,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w23,w24			// a^b, b^c in next round
+	eor	w16,w16,w27,ror#25	// Sigma1(e)
+	eor	w8,w8,w23,ror#13
+	add	w22,w22,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w6,w6,w15,ror#19
+	eor	w7,w7,w2,lsr#3	// sigma0(X[i+1])
+	add	w22,w22,w16			// h+=Sigma1(e)
+	eor	w19,w19,w24			// Maj(a,b,c)
+	eor	w17,w8,w23,ror#22	// Sigma0(a)
+	eor	w6,w6,w15,lsr#10	// sigma1(X[i+14])
+	add	w1,w1,w10
+	add	w26,w26,w22			// d+=h
+	add	w22,w22,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w1,w1,w7
+	add	w22,w22,w17			// h+=Sigma0(a)
+	add	w1,w1,w6
+	ldr	w6,[sp,#12]
+	str	w9,[sp,#8]
+	ror	w16,w26,#6
+	add	w21,w21,w19			// h+=K[i]
+	ror	w8,w3,#7
+	and	w17,w27,w26
+	ror	w7,w0,#17
+	bic	w19,w20,w26
+	ror	w9,w22,#2
+	add	w21,w21,w1			// h+=X[i]
+	eor	w16,w16,w26,ror#11
+	eor	w8,w8,w3,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w22,w23			// a^b, b^c in next round
+	eor	w16,w16,w26,ror#25	// Sigma1(e)
+	eor	w9,w9,w22,ror#13
+	add	w21,w21,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w7,w7,w0,ror#19
+	eor	w8,w8,w3,lsr#3	// sigma0(X[i+1])
+	add	w21,w21,w16			// h+=Sigma1(e)
+	eor	w28,w28,w23			// Maj(a,b,c)
+	eor	w17,w9,w22,ror#22	// Sigma0(a)
+	eor	w7,w7,w0,lsr#10	// sigma1(X[i+14])
+	add	w2,w2,w11
+	add	w25,w25,w21			// d+=h
+	add	w21,w21,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w2,w2,w8
+	add	w21,w21,w17			// h+=Sigma0(a)
+	add	w2,w2,w7
+	ldr	w7,[sp,#0]
+	str	w10,[sp,#12]
+	ror	w16,w25,#6
+	add	w20,w20,w28			// h+=K[i]
+	ror	w9,w4,#7
+	and	w17,w26,w25
+	ror	w8,w1,#17
+	bic	w28,w27,w25
+	ror	w10,w21,#2
+	add	w20,w20,w2			// h+=X[i]
+	eor	w16,w16,w25,ror#11
+	eor	w9,w9,w4,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w21,w22			// a^b, b^c in next round
+	eor	w16,w16,w25,ror#25	// Sigma1(e)
+	eor	w10,w10,w21,ror#13
+	add	w20,w20,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w8,w8,w1,ror#19
+	eor	w9,w9,w4,lsr#3	// sigma0(X[i+1])
+	add	w20,w20,w16			// h+=Sigma1(e)
+	eor	w19,w19,w22			// Maj(a,b,c)
+	eor	w17,w10,w21,ror#22	// Sigma0(a)
+	eor	w8,w8,w1,lsr#10	// sigma1(X[i+14])
+	add	w3,w3,w12
+	add	w24,w24,w20			// d+=h
+	add	w20,w20,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w3,w3,w9
+	add	w20,w20,w17			// h+=Sigma0(a)
+	add	w3,w3,w8
+	cbnz	w19,.Loop_16_xx
+
+	ldp	x0,x2,[x29,#96]
+	ldr	x1,[x29,#112]
+	sub	x30,x30,#260		// rewind
+
+	ldp	w3,w4,[x0]
+	ldp	w5,w6,[x0,#2*4]
+	add	x1,x1,#14*4			// advance input pointer
+	ldp	w7,w8,[x0,#4*4]
+	add	w20,w20,w3
+	ldp	w9,w10,[x0,#6*4]
+	add	w21,w21,w4
+	add	w22,w22,w5
+	add	w23,w23,w6
+	stp	w20,w21,[x0]
+	add	w24,w24,w7
+	add	w25,w25,w8
+	stp	w22,w23,[x0,#2*4]
+	add	w26,w26,w9
+	add	w27,w27,w10
+	cmp	x1,x2
+	stp	w24,w25,[x0,#4*4]
+	stp	w26,w27,[x0,#6*4]
+	b.ne	.Loop
+
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#4*4
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#128
+	ret
+.size	sha256_block_data_order,.-sha256_block_data_order
+
+.align	6
+.type	.LK256,%object
+.LK256:
+	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+	.long	0	//terminator
+.size	.LK256,.-.LK256
+#ifndef	__KERNEL__
+.align	3
+.LOPENSSL_armcap_P:
+# ifdef	__ILP32__
+	.long	OPENSSL_armcap_P-.
+# else
+	.quad	OPENSSL_armcap_P-.
+# endif
+#endif
+.asciz	"SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align	2
+#ifndef	__KERNEL__
+.type	sha256_block_armv8,%function
+.align	6
+sha256_block_armv8:
+.Lv8_entry:
+	stp		x29,x30,[sp,#-16]!
+	add		x29,sp,#0
+
+	ld1		{v0.4s,v1.4s},[x0]
+	adr		x3,.LK256
+
+.Loop_hw:
+	ld1		{v4.16b-v7.16b},[x1],#64
+	sub		x2,x2,#1
+	ld1		{v16.4s},[x3],#16
+	rev32		v4.16b,v4.16b
+	rev32		v5.16b,v5.16b
+	rev32		v6.16b,v6.16b
+	rev32		v7.16b,v7.16b
+	orr		v18.16b,v0.16b,v0.16b		// offload
+	orr		v19.16b,v1.16b,v1.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v4.4s
+	.inst	0x5e2828a4	//sha256su0 v4.16b,v5.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e0760c4	//sha256su1 v4.16b,v6.16b,v7.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v5.4s
+	.inst	0x5e2828c5	//sha256su0 v5.16b,v6.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0460e5	//sha256su1 v5.16b,v7.16b,v4.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v6.4s
+	.inst	0x5e2828e6	//sha256su0 v6.16b,v7.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e056086	//sha256su1 v6.16b,v4.16b,v5.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v7.4s
+	.inst	0x5e282887	//sha256su0 v7.16b,v4.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0660a7	//sha256su1 v7.16b,v5.16b,v6.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v4.4s
+	.inst	0x5e2828a4	//sha256su0 v4.16b,v5.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e0760c4	//sha256su1 v4.16b,v6.16b,v7.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v5.4s
+	.inst	0x5e2828c5	//sha256su0 v5.16b,v6.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0460e5	//sha256su1 v5.16b,v7.16b,v4.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v6.4s
+	.inst	0x5e2828e6	//sha256su0 v6.16b,v7.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e056086	//sha256su1 v6.16b,v4.16b,v5.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v7.4s
+	.inst	0x5e282887	//sha256su0 v7.16b,v4.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0660a7	//sha256su1 v7.16b,v5.16b,v6.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v4.4s
+	.inst	0x5e2828a4	//sha256su0 v4.16b,v5.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e0760c4	//sha256su1 v4.16b,v6.16b,v7.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v5.4s
+	.inst	0x5e2828c5	//sha256su0 v5.16b,v6.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0460e5	//sha256su1 v5.16b,v7.16b,v4.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v6.4s
+	.inst	0x5e2828e6	//sha256su0 v6.16b,v7.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e056086	//sha256su1 v6.16b,v4.16b,v5.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v7.4s
+	.inst	0x5e282887	//sha256su0 v7.16b,v4.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0660a7	//sha256su1 v7.16b,v5.16b,v6.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v4.4s
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v5.4s
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+
+	ld1		{v17.4s},[x3]
+	add		v16.4s,v16.4s,v6.4s
+	sub		x3,x3,#64*4-16	// rewind
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+
+	add		v17.4s,v17.4s,v7.4s
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+
+	add		v0.4s,v0.4s,v18.4s
+	add		v1.4s,v1.4s,v19.4s
+
+	cbnz		x2,.Loop_hw
+
+	st1		{v0.4s,v1.4s},[x0]
+
+	ldr		x29,[sp],#16
+	ret
+.size	sha256_block_armv8,.-sha256_block_armv8
+#endif
+#ifdef	__KERNEL__
+.globl	sha256_block_neon
+#endif
+.type	sha256_block_neon,%function
+.align	4
+sha256_block_neon:
+.Lneon_entry:
+	stp	x29, x30, [sp, #-16]!
+	mov	x29, sp
+	sub	sp,sp,#16*4
+
+	adr	x16,.LK256
+	add	x2,x1,x2,lsl#6	// len to point at the end of inp
+
+	ld1	{v0.16b},[x1], #16
+	ld1	{v1.16b},[x1], #16
+	ld1	{v2.16b},[x1], #16
+	ld1	{v3.16b},[x1], #16
+	ld1	{v4.4s},[x16], #16
+	ld1	{v5.4s},[x16], #16
+	ld1	{v6.4s},[x16], #16
+	ld1	{v7.4s},[x16], #16
+	rev32	v0.16b,v0.16b		// yes, even on
+	rev32	v1.16b,v1.16b		// big-endian
+	rev32	v2.16b,v2.16b
+	rev32	v3.16b,v3.16b
+	mov	x17,sp
+	add	v4.4s,v4.4s,v0.4s
+	add	v5.4s,v5.4s,v1.4s
+	add	v6.4s,v6.4s,v2.4s
+	st1	{v4.4s-v5.4s},[x17], #32
+	add	v7.4s,v7.4s,v3.4s
+	st1	{v6.4s-v7.4s},[x17]
+	sub	x17,x17,#32
+
+	ldp	w3,w4,[x0]
+	ldp	w5,w6,[x0,#8]
+	ldp	w7,w8,[x0,#16]
+	ldp	w9,w10,[x0,#24]
+	ldr	w12,[sp,#0]
+	mov	w13,wzr
+	eor	w14,w4,w5
+	mov	w15,wzr
+	b	.L_00_48
+
+.align	4
+.L_00_48:
+	ext	v4.16b,v0.16b,v1.16b,#4
+	add	w10,w10,w12
+	add	w3,w3,w15
+	and	w12,w8,w7
+	bic	w15,w9,w7
+	ext	v7.16b,v2.16b,v3.16b,#4
+	eor	w11,w7,w7,ror#5
+	add	w3,w3,w13
+	mov	d19,v3.d[1]
+	orr	w12,w12,w15
+	eor	w11,w11,w7,ror#19
+	ushr	v6.4s,v4.4s,#7
+	eor	w15,w3,w3,ror#11
+	ushr	v5.4s,v4.4s,#3
+	add	w10,w10,w12
+	add	v0.4s,v0.4s,v7.4s
+	ror	w11,w11,#6
+	sli	v6.4s,v4.4s,#25
+	eor	w13,w3,w4
+	eor	w15,w15,w3,ror#20
+	ushr	v7.4s,v4.4s,#18
+	add	w10,w10,w11
+	ldr	w12,[sp,#4]
+	and	w14,w14,w13
+	eor	v5.16b,v5.16b,v6.16b
+	ror	w15,w15,#2
+	add	w6,w6,w10
+	sli	v7.4s,v4.4s,#14
+	eor	w14,w14,w4
+	ushr	v16.4s,v19.4s,#17
+	add	w9,w9,w12
+	add	w10,w10,w15
+	and	w12,w7,w6
+	eor	v5.16b,v5.16b,v7.16b
+	bic	w15,w8,w6
+	eor	w11,w6,w6,ror#5
+	sli	v16.4s,v19.4s,#15
+	add	w10,w10,w14
+	orr	w12,w12,w15
+	ushr	v17.4s,v19.4s,#10
+	eor	w11,w11,w6,ror#19
+	eor	w15,w10,w10,ror#11
+	ushr	v7.4s,v19.4s,#19
+	add	w9,w9,w12
+	ror	w11,w11,#6
+	add	v0.4s,v0.4s,v5.4s
+	eor	w14,w10,w3
+	eor	w15,w15,w10,ror#20
+	sli	v7.4s,v19.4s,#13
+	add	w9,w9,w11
+	ldr	w12,[sp,#8]
+	and	w13,w13,w14
+	eor	v17.16b,v17.16b,v16.16b
+	ror	w15,w15,#2
+	add	w5,w5,w9
+	eor	w13,w13,w3
+	eor	v17.16b,v17.16b,v7.16b
+	add	w8,w8,w12
+	add	w9,w9,w15
+	and	w12,w6,w5
+	add	v0.4s,v0.4s,v17.4s
+	bic	w15,w7,w5
+	eor	w11,w5,w5,ror#5
+	add	w9,w9,w13
+	ushr	v18.4s,v0.4s,#17
+	orr	w12,w12,w15
+	ushr	v19.4s,v0.4s,#10
+	eor	w11,w11,w5,ror#19
+	eor	w15,w9,w9,ror#11
+	sli	v18.4s,v0.4s,#15
+	add	w8,w8,w12
+	ushr	v17.4s,v0.4s,#19
+	ror	w11,w11,#6
+	eor	w13,w9,w10
+	eor	v19.16b,v19.16b,v18.16b
+	eor	w15,w15,w9,ror#20
+	add	w8,w8,w11
+	sli	v17.4s,v0.4s,#13
+	ldr	w12,[sp,#12]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	ld1	{v4.4s},[x16], #16
+	add	w4,w4,w8
+	eor	v19.16b,v19.16b,v17.16b
+	eor	w14,w14,w10
+	eor	v17.16b,v17.16b,v17.16b
+	add	w7,w7,w12
+	add	w8,w8,w15
+	and	w12,w5,w4
+	mov	v17.d[1],v19.d[0]
+	bic	w15,w6,w4
+	eor	w11,w4,w4,ror#5
+	add	w8,w8,w14
+	add	v0.4s,v0.4s,v17.4s
+	orr	w12,w12,w15
+	eor	w11,w11,w4,ror#19
+	eor	w15,w8,w8,ror#11
+	add	v4.4s,v4.4s,v0.4s
+	add	w7,w7,w12
+	ror	w11,w11,#6
+	eor	w14,w8,w9
+	eor	w15,w15,w8,ror#20
+	add	w7,w7,w11
+	ldr	w12,[sp,#16]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w3,w3,w7
+	eor	w13,w13,w9
+	st1	{v4.4s},[x17], #16
+	ext	v4.16b,v1.16b,v2.16b,#4
+	add	w6,w6,w12
+	add	w7,w7,w15
+	and	w12,w4,w3
+	bic	w15,w5,w3
+	ext	v7.16b,v3.16b,v0.16b,#4
+	eor	w11,w3,w3,ror#5
+	add	w7,w7,w13
+	mov	d19,v0.d[1]
+	orr	w12,w12,w15
+	eor	w11,w11,w3,ror#19
+	ushr	v6.4s,v4.4s,#7
+	eor	w15,w7,w7,ror#11
+	ushr	v5.4s,v4.4s,#3
+	add	w6,w6,w12
+	add	v1.4s,v1.4s,v7.4s
+	ror	w11,w11,#6
+	sli	v6.4s,v4.4s,#25
+	eor	w13,w7,w8
+	eor	w15,w15,w7,ror#20
+	ushr	v7.4s,v4.4s,#18
+	add	w6,w6,w11
+	ldr	w12,[sp,#20]
+	and	w14,w14,w13
+	eor	v5.16b,v5.16b,v6.16b
+	ror	w15,w15,#2
+	add	w10,w10,w6
+	sli	v7.4s,v4.4s,#14
+	eor	w14,w14,w8
+	ushr	v16.4s,v19.4s,#17
+	add	w5,w5,w12
+	add	w6,w6,w15
+	and	w12,w3,w10
+	eor	v5.16b,v5.16b,v7.16b
+	bic	w15,w4,w10
+	eor	w11,w10,w10,ror#5
+	sli	v16.4s,v19.4s,#15
+	add	w6,w6,w14
+	orr	w12,w12,w15
+	ushr	v17.4s,v19.4s,#10
+	eor	w11,w11,w10,ror#19
+	eor	w15,w6,w6,ror#11
+	ushr	v7.4s,v19.4s,#19
+	add	w5,w5,w12
+	ror	w11,w11,#6
+	add	v1.4s,v1.4s,v5.4s
+	eor	w14,w6,w7
+	eor	w15,w15,w6,ror#20
+	sli	v7.4s,v19.4s,#13
+	add	w5,w5,w11
+	ldr	w12,[sp,#24]
+	and	w13,w13,w14
+	eor	v17.16b,v17.16b,v16.16b
+	ror	w15,w15,#2
+	add	w9,w9,w5
+	eor	w13,w13,w7
+	eor	v17.16b,v17.16b,v7.16b
+	add	w4,w4,w12
+	add	w5,w5,w15
+	and	w12,w10,w9
+	add	v1.4s,v1.4s,v17.4s
+	bic	w15,w3,w9
+	eor	w11,w9,w9,ror#5
+	add	w5,w5,w13
+	ushr	v18.4s,v1.4s,#17
+	orr	w12,w12,w15
+	ushr	v19.4s,v1.4s,#10
+	eor	w11,w11,w9,ror#19
+	eor	w15,w5,w5,ror#11
+	sli	v18.4s,v1.4s,#15
+	add	w4,w4,w12
+	ushr	v17.4s,v1.4s,#19
+	ror	w11,w11,#6
+	eor	w13,w5,w6
+	eor	v19.16b,v19.16b,v18.16b
+	eor	w15,w15,w5,ror#20
+	add	w4,w4,w11
+	sli	v17.4s,v1.4s,#13
+	ldr	w12,[sp,#28]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	ld1	{v4.4s},[x16], #16
+	add	w8,w8,w4
+	eor	v19.16b,v19.16b,v17.16b
+	eor	w14,w14,w6
+	eor	v17.16b,v17.16b,v17.16b
+	add	w3,w3,w12
+	add	w4,w4,w15
+	and	w12,w9,w8
+	mov	v17.d[1],v19.d[0]
+	bic	w15,w10,w8
+	eor	w11,w8,w8,ror#5
+	add	w4,w4,w14
+	add	v1.4s,v1.4s,v17.4s
+	orr	w12,w12,w15
+	eor	w11,w11,w8,ror#19
+	eor	w15,w4,w4,ror#11
+	add	v4.4s,v4.4s,v1.4s
+	add	w3,w3,w12
+	ror	w11,w11,#6
+	eor	w14,w4,w5
+	eor	w15,w15,w4,ror#20
+	add	w3,w3,w11
+	ldr	w12,[sp,#32]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w7,w7,w3
+	eor	w13,w13,w5
+	st1	{v4.4s},[x17], #16
+	ext	v4.16b,v2.16b,v3.16b,#4
+	add	w10,w10,w12
+	add	w3,w3,w15
+	and	w12,w8,w7
+	bic	w15,w9,w7
+	ext	v7.16b,v0.16b,v1.16b,#4
+	eor	w11,w7,w7,ror#5
+	add	w3,w3,w13
+	mov	d19,v1.d[1]
+	orr	w12,w12,w15
+	eor	w11,w11,w7,ror#19
+	ushr	v6.4s,v4.4s,#7
+	eor	w15,w3,w3,ror#11
+	ushr	v5.4s,v4.4s,#3
+	add	w10,w10,w12
+	add	v2.4s,v2.4s,v7.4s
+	ror	w11,w11,#6
+	sli	v6.4s,v4.4s,#25
+	eor	w13,w3,w4
+	eor	w15,w15,w3,ror#20
+	ushr	v7.4s,v4.4s,#18
+	add	w10,w10,w11
+	ldr	w12,[sp,#36]
+	and	w14,w14,w13
+	eor	v5.16b,v5.16b,v6.16b
+	ror	w15,w15,#2
+	add	w6,w6,w10
+	sli	v7.4s,v4.4s,#14
+	eor	w14,w14,w4
+	ushr	v16.4s,v19.4s,#17
+	add	w9,w9,w12
+	add	w10,w10,w15
+	and	w12,w7,w6
+	eor	v5.16b,v5.16b,v7.16b
+	bic	w15,w8,w6
+	eor	w11,w6,w6,ror#5
+	sli	v16.4s,v19.4s,#15
+	add	w10,w10,w14
+	orr	w12,w12,w15
+	ushr	v17.4s,v19.4s,#10
+	eor	w11,w11,w6,ror#19
+	eor	w15,w10,w10,ror#11
+	ushr	v7.4s,v19.4s,#19
+	add	w9,w9,w12
+	ror	w11,w11,#6
+	add	v2.4s,v2.4s,v5.4s
+	eor	w14,w10,w3
+	eor	w15,w15,w10,ror#20
+	sli	v7.4s,v19.4s,#13
+	add	w9,w9,w11
+	ldr	w12,[sp,#40]
+	and	w13,w13,w14
+	eor	v17.16b,v17.16b,v16.16b
+	ror	w15,w15,#2
+	add	w5,w5,w9
+	eor	w13,w13,w3
+	eor	v17.16b,v17.16b,v7.16b
+	add	w8,w8,w12
+	add	w9,w9,w15
+	and	w12,w6,w5
+	add	v2.4s,v2.4s,v17.4s
+	bic	w15,w7,w5
+	eor	w11,w5,w5,ror#5
+	add	w9,w9,w13
+	ushr	v18.4s,v2.4s,#17
+	orr	w12,w12,w15
+	ushr	v19.4s,v2.4s,#10
+	eor	w11,w11,w5,ror#19
+	eor	w15,w9,w9,ror#11
+	sli	v18.4s,v2.4s,#15
+	add	w8,w8,w12
+	ushr	v17.4s,v2.4s,#19
+	ror	w11,w11,#6
+	eor	w13,w9,w10
+	eor	v19.16b,v19.16b,v18.16b
+	eor	w15,w15,w9,ror#20
+	add	w8,w8,w11
+	sli	v17.4s,v2.4s,#13
+	ldr	w12,[sp,#44]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	ld1	{v4.4s},[x16], #16
+	add	w4,w4,w8
+	eor	v19.16b,v19.16b,v17.16b
+	eor	w14,w14,w10
+	eor	v17.16b,v17.16b,v17.16b
+	add	w7,w7,w12
+	add	w8,w8,w15
+	and	w12,w5,w4
+	mov	v17.d[1],v19.d[0]
+	bic	w15,w6,w4
+	eor	w11,w4,w4,ror#5
+	add	w8,w8,w14
+	add	v2.4s,v2.4s,v17.4s
+	orr	w12,w12,w15
+	eor	w11,w11,w4,ror#19
+	eor	w15,w8,w8,ror#11
+	add	v4.4s,v4.4s,v2.4s
+	add	w7,w7,w12
+	ror	w11,w11,#6
+	eor	w14,w8,w9
+	eor	w15,w15,w8,ror#20
+	add	w7,w7,w11
+	ldr	w12,[sp,#48]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w3,w3,w7
+	eor	w13,w13,w9
+	st1	{v4.4s},[x17], #16
+	ext	v4.16b,v3.16b,v0.16b,#4
+	add	w6,w6,w12
+	add	w7,w7,w15
+	and	w12,w4,w3
+	bic	w15,w5,w3
+	ext	v7.16b,v1.16b,v2.16b,#4
+	eor	w11,w3,w3,ror#5
+	add	w7,w7,w13
+	mov	d19,v2.d[1]
+	orr	w12,w12,w15
+	eor	w11,w11,w3,ror#19
+	ushr	v6.4s,v4.4s,#7
+	eor	w15,w7,w7,ror#11
+	ushr	v5.4s,v4.4s,#3
+	add	w6,w6,w12
+	add	v3.4s,v3.4s,v7.4s
+	ror	w11,w11,#6
+	sli	v6.4s,v4.4s,#25
+	eor	w13,w7,w8
+	eor	w15,w15,w7,ror#20
+	ushr	v7.4s,v4.4s,#18
+	add	w6,w6,w11
+	ldr	w12,[sp,#52]
+	and	w14,w14,w13
+	eor	v5.16b,v5.16b,v6.16b
+	ror	w15,w15,#2
+	add	w10,w10,w6
+	sli	v7.4s,v4.4s,#14
+	eor	w14,w14,w8
+	ushr	v16.4s,v19.4s,#17
+	add	w5,w5,w12
+	add	w6,w6,w15
+	and	w12,w3,w10
+	eor	v5.16b,v5.16b,v7.16b
+	bic	w15,w4,w10
+	eor	w11,w10,w10,ror#5
+	sli	v16.4s,v19.4s,#15
+	add	w6,w6,w14
+	orr	w12,w12,w15
+	ushr	v17.4s,v19.4s,#10
+	eor	w11,w11,w10,ror#19
+	eor	w15,w6,w6,ror#11
+	ushr	v7.4s,v19.4s,#19
+	add	w5,w5,w12
+	ror	w11,w11,#6
+	add	v3.4s,v3.4s,v5.4s
+	eor	w14,w6,w7
+	eor	w15,w15,w6,ror#20
+	sli	v7.4s,v19.4s,#13
+	add	w5,w5,w11
+	ldr	w12,[sp,#56]
+	and	w13,w13,w14
+	eor	v17.16b,v17.16b,v16.16b
+	ror	w15,w15,#2
+	add	w9,w9,w5
+	eor	w13,w13,w7
+	eor	v17.16b,v17.16b,v7.16b
+	add	w4,w4,w12
+	add	w5,w5,w15
+	and	w12,w10,w9
+	add	v3.4s,v3.4s,v17.4s
+	bic	w15,w3,w9
+	eor	w11,w9,w9,ror#5
+	add	w5,w5,w13
+	ushr	v18.4s,v3.4s,#17
+	orr	w12,w12,w15
+	ushr	v19.4s,v3.4s,#10
+	eor	w11,w11,w9,ror#19
+	eor	w15,w5,w5,ror#11
+	sli	v18.4s,v3.4s,#15
+	add	w4,w4,w12
+	ushr	v17.4s,v3.4s,#19
+	ror	w11,w11,#6
+	eor	w13,w5,w6
+	eor	v19.16b,v19.16b,v18.16b
+	eor	w15,w15,w5,ror#20
+	add	w4,w4,w11
+	sli	v17.4s,v3.4s,#13
+	ldr	w12,[sp,#60]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	ld1	{v4.4s},[x16], #16
+	add	w8,w8,w4
+	eor	v19.16b,v19.16b,v17.16b
+	eor	w14,w14,w6
+	eor	v17.16b,v17.16b,v17.16b
+	add	w3,w3,w12
+	add	w4,w4,w15
+	and	w12,w9,w8
+	mov	v17.d[1],v19.d[0]
+	bic	w15,w10,w8
+	eor	w11,w8,w8,ror#5
+	add	w4,w4,w14
+	add	v3.4s,v3.4s,v17.4s
+	orr	w12,w12,w15
+	eor	w11,w11,w8,ror#19
+	eor	w15,w4,w4,ror#11
+	add	v4.4s,v4.4s,v3.4s
+	add	w3,w3,w12
+	ror	w11,w11,#6
+	eor	w14,w4,w5
+	eor	w15,w15,w4,ror#20
+	add	w3,w3,w11
+	ldr	w12,[x16]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w7,w7,w3
+	eor	w13,w13,w5
+	st1	{v4.4s},[x17], #16
+	cmp	w12,#0				// check for K256 terminator
+	ldr	w12,[sp,#0]
+	sub	x17,x17,#64
+	bne	.L_00_48
+
+	sub	x16,x16,#256		// rewind x16
+	cmp	x1,x2
+	mov	x17, #64
+	csel	x17, x17, xzr, eq
+	sub	x1,x1,x17			// avoid SEGV
+	mov	x17,sp
+	add	w10,w10,w12
+	add	w3,w3,w15
+	and	w12,w8,w7
+	ld1	{v0.16b},[x1],#16
+	bic	w15,w9,w7
+	eor	w11,w7,w7,ror#5
+	ld1	{v4.4s},[x16],#16
+	add	w3,w3,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w7,ror#19
+	eor	w15,w3,w3,ror#11
+	rev32	v0.16b,v0.16b
+	add	w10,w10,w12
+	ror	w11,w11,#6
+	eor	w13,w3,w4
+	eor	w15,w15,w3,ror#20
+	add	v4.4s,v4.4s,v0.4s
+	add	w10,w10,w11
+	ldr	w12,[sp,#4]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w6,w6,w10
+	eor	w14,w14,w4
+	add	w9,w9,w12
+	add	w10,w10,w15
+	and	w12,w7,w6
+	bic	w15,w8,w6
+	eor	w11,w6,w6,ror#5
+	add	w10,w10,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w6,ror#19
+	eor	w15,w10,w10,ror#11
+	add	w9,w9,w12
+	ror	w11,w11,#6
+	eor	w14,w10,w3
+	eor	w15,w15,w10,ror#20
+	add	w9,w9,w11
+	ldr	w12,[sp,#8]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w5,w5,w9
+	eor	w13,w13,w3
+	add	w8,w8,w12
+	add	w9,w9,w15
+	and	w12,w6,w5
+	bic	w15,w7,w5
+	eor	w11,w5,w5,ror#5
+	add	w9,w9,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w5,ror#19
+	eor	w15,w9,w9,ror#11
+	add	w8,w8,w12
+	ror	w11,w11,#6
+	eor	w13,w9,w10
+	eor	w15,w15,w9,ror#20
+	add	w8,w8,w11
+	ldr	w12,[sp,#12]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w4,w4,w8
+	eor	w14,w14,w10
+	add	w7,w7,w12
+	add	w8,w8,w15
+	and	w12,w5,w4
+	bic	w15,w6,w4
+	eor	w11,w4,w4,ror#5
+	add	w8,w8,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w4,ror#19
+	eor	w15,w8,w8,ror#11
+	add	w7,w7,w12
+	ror	w11,w11,#6
+	eor	w14,w8,w9
+	eor	w15,w15,w8,ror#20
+	add	w7,w7,w11
+	ldr	w12,[sp,#16]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w3,w3,w7
+	eor	w13,w13,w9
+	st1	{v4.4s},[x17], #16
+	add	w6,w6,w12
+	add	w7,w7,w15
+	and	w12,w4,w3
+	ld1	{v1.16b},[x1],#16
+	bic	w15,w5,w3
+	eor	w11,w3,w3,ror#5
+	ld1	{v4.4s},[x16],#16
+	add	w7,w7,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w3,ror#19
+	eor	w15,w7,w7,ror#11
+	rev32	v1.16b,v1.16b
+	add	w6,w6,w12
+	ror	w11,w11,#6
+	eor	w13,w7,w8
+	eor	w15,w15,w7,ror#20
+	add	v4.4s,v4.4s,v1.4s
+	add	w6,w6,w11
+	ldr	w12,[sp,#20]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w10,w10,w6
+	eor	w14,w14,w8
+	add	w5,w5,w12
+	add	w6,w6,w15
+	and	w12,w3,w10
+	bic	w15,w4,w10
+	eor	w11,w10,w10,ror#5
+	add	w6,w6,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w10,ror#19
+	eor	w15,w6,w6,ror#11
+	add	w5,w5,w12
+	ror	w11,w11,#6
+	eor	w14,w6,w7
+	eor	w15,w15,w6,ror#20
+	add	w5,w5,w11
+	ldr	w12,[sp,#24]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w9,w9,w5
+	eor	w13,w13,w7
+	add	w4,w4,w12
+	add	w5,w5,w15
+	and	w12,w10,w9
+	bic	w15,w3,w9
+	eor	w11,w9,w9,ror#5
+	add	w5,w5,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w9,ror#19
+	eor	w15,w5,w5,ror#11
+	add	w4,w4,w12
+	ror	w11,w11,#6
+	eor	w13,w5,w6
+	eor	w15,w15,w5,ror#20
+	add	w4,w4,w11
+	ldr	w12,[sp,#28]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w8,w8,w4
+	eor	w14,w14,w6
+	add	w3,w3,w12
+	add	w4,w4,w15
+	and	w12,w9,w8
+	bic	w15,w10,w8
+	eor	w11,w8,w8,ror#5
+	add	w4,w4,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w8,ror#19
+	eor	w15,w4,w4,ror#11
+	add	w3,w3,w12
+	ror	w11,w11,#6
+	eor	w14,w4,w5
+	eor	w15,w15,w4,ror#20
+	add	w3,w3,w11
+	ldr	w12,[sp,#32]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w7,w7,w3
+	eor	w13,w13,w5
+	st1	{v4.4s},[x17], #16
+	add	w10,w10,w12
+	add	w3,w3,w15
+	and	w12,w8,w7
+	ld1	{v2.16b},[x1],#16
+	bic	w15,w9,w7
+	eor	w11,w7,w7,ror#5
+	ld1	{v4.4s},[x16],#16
+	add	w3,w3,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w7,ror#19
+	eor	w15,w3,w3,ror#11
+	rev32	v2.16b,v2.16b
+	add	w10,w10,w12
+	ror	w11,w11,#6
+	eor	w13,w3,w4
+	eor	w15,w15,w3,ror#20
+	add	v4.4s,v4.4s,v2.4s
+	add	w10,w10,w11
+	ldr	w12,[sp,#36]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w6,w6,w10
+	eor	w14,w14,w4
+	add	w9,w9,w12
+	add	w10,w10,w15
+	and	w12,w7,w6
+	bic	w15,w8,w6
+	eor	w11,w6,w6,ror#5
+	add	w10,w10,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w6,ror#19
+	eor	w15,w10,w10,ror#11
+	add	w9,w9,w12
+	ror	w11,w11,#6
+	eor	w14,w10,w3
+	eor	w15,w15,w10,ror#20
+	add	w9,w9,w11
+	ldr	w12,[sp,#40]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w5,w5,w9
+	eor	w13,w13,w3
+	add	w8,w8,w12
+	add	w9,w9,w15
+	and	w12,w6,w5
+	bic	w15,w7,w5
+	eor	w11,w5,w5,ror#5
+	add	w9,w9,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w5,ror#19
+	eor	w15,w9,w9,ror#11
+	add	w8,w8,w12
+	ror	w11,w11,#6
+	eor	w13,w9,w10
+	eor	w15,w15,w9,ror#20
+	add	w8,w8,w11
+	ldr	w12,[sp,#44]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w4,w4,w8
+	eor	w14,w14,w10
+	add	w7,w7,w12
+	add	w8,w8,w15
+	and	w12,w5,w4
+	bic	w15,w6,w4
+	eor	w11,w4,w4,ror#5
+	add	w8,w8,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w4,ror#19
+	eor	w15,w8,w8,ror#11
+	add	w7,w7,w12
+	ror	w11,w11,#6
+	eor	w14,w8,w9
+	eor	w15,w15,w8,ror#20
+	add	w7,w7,w11
+	ldr	w12,[sp,#48]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w3,w3,w7
+	eor	w13,w13,w9
+	st1	{v4.4s},[x17], #16
+	add	w6,w6,w12
+	add	w7,w7,w15
+	and	w12,w4,w3
+	ld1	{v3.16b},[x1],#16
+	bic	w15,w5,w3
+	eor	w11,w3,w3,ror#5
+	ld1	{v4.4s},[x16],#16
+	add	w7,w7,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w3,ror#19
+	eor	w15,w7,w7,ror#11
+	rev32	v3.16b,v3.16b
+	add	w6,w6,w12
+	ror	w11,w11,#6
+	eor	w13,w7,w8
+	eor	w15,w15,w7,ror#20
+	add	v4.4s,v4.4s,v3.4s
+	add	w6,w6,w11
+	ldr	w12,[sp,#52]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w10,w10,w6
+	eor	w14,w14,w8
+	add	w5,w5,w12
+	add	w6,w6,w15
+	and	w12,w3,w10
+	bic	w15,w4,w10
+	eor	w11,w10,w10,ror#5
+	add	w6,w6,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w10,ror#19
+	eor	w15,w6,w6,ror#11
+	add	w5,w5,w12
+	ror	w11,w11,#6
+	eor	w14,w6,w7
+	eor	w15,w15,w6,ror#20
+	add	w5,w5,w11
+	ldr	w12,[sp,#56]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w9,w9,w5
+	eor	w13,w13,w7
+	add	w4,w4,w12
+	add	w5,w5,w15
+	and	w12,w10,w9
+	bic	w15,w3,w9
+	eor	w11,w9,w9,ror#5
+	add	w5,w5,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w9,ror#19
+	eor	w15,w5,w5,ror#11
+	add	w4,w4,w12
+	ror	w11,w11,#6
+	eor	w13,w5,w6
+	eor	w15,w15,w5,ror#20
+	add	w4,w4,w11
+	ldr	w12,[sp,#60]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w8,w8,w4
+	eor	w14,w14,w6
+	add	w3,w3,w12
+	add	w4,w4,w15
+	and	w12,w9,w8
+	bic	w15,w10,w8
+	eor	w11,w8,w8,ror#5
+	add	w4,w4,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w8,ror#19
+	eor	w15,w4,w4,ror#11
+	add	w3,w3,w12
+	ror	w11,w11,#6
+	eor	w14,w4,w5
+	eor	w15,w15,w4,ror#20
+	add	w3,w3,w11
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w7,w7,w3
+	eor	w13,w13,w5
+	st1	{v4.4s},[x17], #16
+	add	w3,w3,w15			// h+=Sigma0(a) from the past
+	ldp	w11,w12,[x0,#0]
+	add	w3,w3,w13			// h+=Maj(a,b,c) from the past
+	ldp	w13,w14,[x0,#8]
+	add	w3,w3,w11			// accumulate
+	add	w4,w4,w12
+	ldp	w11,w12,[x0,#16]
+	add	w5,w5,w13
+	add	w6,w6,w14
+	ldp	w13,w14,[x0,#24]
+	add	w7,w7,w11
+	add	w8,w8,w12
+	 ldr	w12,[sp,#0]
+	stp	w3,w4,[x0,#0]
+	add	w9,w9,w13
+	 mov	w13,wzr
+	stp	w5,w6,[x0,#8]
+	add	w10,w10,w14
+	stp	w7,w8,[x0,#16]
+	 eor	w14,w4,w5
+	stp	w9,w10,[x0,#24]
+	 mov	w15,wzr
+	 mov	x17,sp
+	b.ne	.L_00_48
+
+	ldr	x29,[x29]
+	add	sp,sp,#16*4+16
+	ret
+.size	sha256_block_neon,.-sha256_block_neon
+#ifndef	__KERNEL__
+.comm	OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm64/crypto/sha512-core.S b/arch/arm64/crypto/sha512-core.S
new file mode 100644
index 0000000..bd0f59f
--- /dev/null
+++ b/arch/arm64/crypto/sha512-core.S
@@ -0,0 +1,1085 @@
+// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License").  You may not use
+// this file except in compliance with the License.  You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+
+// ====================================================================
+// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// project. The module is, however, dual licensed under OpenSSL and
+// CRYPTOGAMS licenses depending on where you obtain it. For further
+// details see http://www.openssl.org/~appro/cryptogams/.
+//
+// Permission to use under GPLv2 terms is granted.
+// ====================================================================
+//
+// SHA256/512 for ARMv8.
+//
+// Performance in cycles per processed byte and improvement coefficient
+// over code generated with "default" compiler:
+//
+//		SHA256-hw	SHA256(*)	SHA512
+// Apple A7	1.97		10.5 (+33%)	6.73 (-1%(**))
+// Cortex-A53	2.38		15.5 (+115%)	10.0 (+150%(***))
+// Cortex-A57	2.31		11.6 (+86%)	7.51 (+260%(***))
+// Denver	2.01		10.5 (+26%)	6.70 (+8%)
+// X-Gene			20.0 (+100%)	12.8 (+300%(***))
+// Mongoose	2.36		13.0 (+50%)	8.36 (+33%)
+//
+// (*)	Software SHA256 results are of lesser relevance, presented
+//	mostly for informational purposes.
+// (**)	The result is a trade-off: it's possible to improve it by
+//	10% (or by 1 cycle per round), but at the cost of 20% loss
+//	on Cortex-A53 (or by 4 cycles per round).
+// (***)	Super-impressive coefficients over gcc-generated code are
+//	indication of some compiler "pathology", most notably code
+//	generated with -mgeneral-regs-only is significanty faster
+//	and the gap is only 40-90%.
+//
+// October 2016.
+//
+// Originally it was reckoned that it makes no sense to implement NEON
+// version of SHA256 for 64-bit processors. This is because performance
+// improvement on most wide-spread Cortex-A5x processors was observed
+// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+// observed that 32-bit NEON SHA256 performs significantly better than
+// 64-bit scalar version on *some* of the more recent processors. As
+// result 64-bit NEON version of SHA256 was added to provide best
+// all-round performance. For example it executes ~30% faster on X-Gene
+// and Mongoose. [For reference, NEON version of SHA512 is bound to
+// deliver much less improvement, likely *negative* on Cortex-A5x.
+// Which is why NEON support is limited to SHA256.]
+
+#ifndef	__KERNEL__
+# include "arm_arch.h"
+#endif
+
+.text
+
+.extern	OPENSSL_armcap_P
+.globl	sha512_block_data_order
+.type	sha512_block_data_order,%function
+.align	6
+sha512_block_data_order:
+	stp	x29,x30,[sp,#-128]!
+	add	x29,sp,#0
+
+	stp	x19,x20,[sp,#16]
+	stp	x21,x22,[sp,#32]
+	stp	x23,x24,[sp,#48]
+	stp	x25,x26,[sp,#64]
+	stp	x27,x28,[sp,#80]
+	sub	sp,sp,#4*8
+
+	ldp	x20,x21,[x0]				// load context
+	ldp	x22,x23,[x0,#2*8]
+	ldp	x24,x25,[x0,#4*8]
+	add	x2,x1,x2,lsl#7	// end of input
+	ldp	x26,x27,[x0,#6*8]
+	adr	x30,.LK512
+	stp	x0,x2,[x29,#96]
+
+.Loop:
+	ldp	x3,x4,[x1],#2*8
+	ldr	x19,[x30],#8			// *K++
+	eor	x28,x21,x22				// magic seed
+	str	x1,[x29,#112]
+#ifndef	__AARCH64EB__
+	rev	x3,x3			// 0
+#endif
+	ror	x16,x24,#14
+	add	x27,x27,x19			// h+=K[i]
+	eor	x6,x24,x24,ror#23
+	and	x17,x25,x24
+	bic	x19,x26,x24
+	add	x27,x27,x3			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x20,x21			// a^b, b^c in next round
+	eor	x16,x16,x6,ror#18	// Sigma1(e)
+	ror	x6,x20,#28
+	add	x27,x27,x17			// h+=Ch(e,f,g)
+	eor	x17,x20,x20,ror#5
+	add	x27,x27,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x23,x23,x27			// d+=h
+	eor	x28,x28,x21			// Maj(a,b,c)
+	eor	x17,x6,x17,ror#34	// Sigma0(a)
+	add	x27,x27,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x27,x27,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x4,x4			// 1
+#endif
+	ldp	x5,x6,[x1],#2*8
+	add	x27,x27,x17			// h+=Sigma0(a)
+	ror	x16,x23,#14
+	add	x26,x26,x28			// h+=K[i]
+	eor	x7,x23,x23,ror#23
+	and	x17,x24,x23
+	bic	x28,x25,x23
+	add	x26,x26,x4			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x27,x20			// a^b, b^c in next round
+	eor	x16,x16,x7,ror#18	// Sigma1(e)
+	ror	x7,x27,#28
+	add	x26,x26,x17			// h+=Ch(e,f,g)
+	eor	x17,x27,x27,ror#5
+	add	x26,x26,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x22,x22,x26			// d+=h
+	eor	x19,x19,x20			// Maj(a,b,c)
+	eor	x17,x7,x17,ror#34	// Sigma0(a)
+	add	x26,x26,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x26,x26,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x5,x5			// 2
+#endif
+	add	x26,x26,x17			// h+=Sigma0(a)
+	ror	x16,x22,#14
+	add	x25,x25,x19			// h+=K[i]
+	eor	x8,x22,x22,ror#23
+	and	x17,x23,x22
+	bic	x19,x24,x22
+	add	x25,x25,x5			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x26,x27			// a^b, b^c in next round
+	eor	x16,x16,x8,ror#18	// Sigma1(e)
+	ror	x8,x26,#28
+	add	x25,x25,x17			// h+=Ch(e,f,g)
+	eor	x17,x26,x26,ror#5
+	add	x25,x25,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x21,x21,x25			// d+=h
+	eor	x28,x28,x27			// Maj(a,b,c)
+	eor	x17,x8,x17,ror#34	// Sigma0(a)
+	add	x25,x25,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x25,x25,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x6,x6			// 3
+#endif
+	ldp	x7,x8,[x1],#2*8
+	add	x25,x25,x17			// h+=Sigma0(a)
+	ror	x16,x21,#14
+	add	x24,x24,x28			// h+=K[i]
+	eor	x9,x21,x21,ror#23
+	and	x17,x22,x21
+	bic	x28,x23,x21
+	add	x24,x24,x6			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x25,x26			// a^b, b^c in next round
+	eor	x16,x16,x9,ror#18	// Sigma1(e)
+	ror	x9,x25,#28
+	add	x24,x24,x17			// h+=Ch(e,f,g)
+	eor	x17,x25,x25,ror#5
+	add	x24,x24,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x20,x20,x24			// d+=h
+	eor	x19,x19,x26			// Maj(a,b,c)
+	eor	x17,x9,x17,ror#34	// Sigma0(a)
+	add	x24,x24,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x24,x24,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x7,x7			// 4
+#endif
+	add	x24,x24,x17			// h+=Sigma0(a)
+	ror	x16,x20,#14
+	add	x23,x23,x19			// h+=K[i]
+	eor	x10,x20,x20,ror#23
+	and	x17,x21,x20
+	bic	x19,x22,x20
+	add	x23,x23,x7			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x24,x25			// a^b, b^c in next round
+	eor	x16,x16,x10,ror#18	// Sigma1(e)
+	ror	x10,x24,#28
+	add	x23,x23,x17			// h+=Ch(e,f,g)
+	eor	x17,x24,x24,ror#5
+	add	x23,x23,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x27,x27,x23			// d+=h
+	eor	x28,x28,x25			// Maj(a,b,c)
+	eor	x17,x10,x17,ror#34	// Sigma0(a)
+	add	x23,x23,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x23,x23,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x8,x8			// 5
+#endif
+	ldp	x9,x10,[x1],#2*8
+	add	x23,x23,x17			// h+=Sigma0(a)
+	ror	x16,x27,#14
+	add	x22,x22,x28			// h+=K[i]
+	eor	x11,x27,x27,ror#23
+	and	x17,x20,x27
+	bic	x28,x21,x27
+	add	x22,x22,x8			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x23,x24			// a^b, b^c in next round
+	eor	x16,x16,x11,ror#18	// Sigma1(e)
+	ror	x11,x23,#28
+	add	x22,x22,x17			// h+=Ch(e,f,g)
+	eor	x17,x23,x23,ror#5
+	add	x22,x22,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x26,x26,x22			// d+=h
+	eor	x19,x19,x24			// Maj(a,b,c)
+	eor	x17,x11,x17,ror#34	// Sigma0(a)
+	add	x22,x22,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x22,x22,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x9,x9			// 6
+#endif
+	add	x22,x22,x17			// h+=Sigma0(a)
+	ror	x16,x26,#14
+	add	x21,x21,x19			// h+=K[i]
+	eor	x12,x26,x26,ror#23
+	and	x17,x27,x26
+	bic	x19,x20,x26
+	add	x21,x21,x9			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x22,x23			// a^b, b^c in next round
+	eor	x16,x16,x12,ror#18	// Sigma1(e)
+	ror	x12,x22,#28
+	add	x21,x21,x17			// h+=Ch(e,f,g)
+	eor	x17,x22,x22,ror#5
+	add	x21,x21,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x25,x25,x21			// d+=h
+	eor	x28,x28,x23			// Maj(a,b,c)
+	eor	x17,x12,x17,ror#34	// Sigma0(a)
+	add	x21,x21,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x21,x21,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x10,x10			// 7
+#endif
+	ldp	x11,x12,[x1],#2*8
+	add	x21,x21,x17			// h+=Sigma0(a)
+	ror	x16,x25,#14
+	add	x20,x20,x28			// h+=K[i]
+	eor	x13,x25,x25,ror#23
+	and	x17,x26,x25
+	bic	x28,x27,x25
+	add	x20,x20,x10			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x21,x22			// a^b, b^c in next round
+	eor	x16,x16,x13,ror#18	// Sigma1(e)
+	ror	x13,x21,#28
+	add	x20,x20,x17			// h+=Ch(e,f,g)
+	eor	x17,x21,x21,ror#5
+	add	x20,x20,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x24,x24,x20			// d+=h
+	eor	x19,x19,x22			// Maj(a,b,c)
+	eor	x17,x13,x17,ror#34	// Sigma0(a)
+	add	x20,x20,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x20,x20,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x11,x11			// 8
+#endif
+	add	x20,x20,x17			// h+=Sigma0(a)
+	ror	x16,x24,#14
+	add	x27,x27,x19			// h+=K[i]
+	eor	x14,x24,x24,ror#23
+	and	x17,x25,x24
+	bic	x19,x26,x24
+	add	x27,x27,x11			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x20,x21			// a^b, b^c in next round
+	eor	x16,x16,x14,ror#18	// Sigma1(e)
+	ror	x14,x20,#28
+	add	x27,x27,x17			// h+=Ch(e,f,g)
+	eor	x17,x20,x20,ror#5
+	add	x27,x27,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x23,x23,x27			// d+=h
+	eor	x28,x28,x21			// Maj(a,b,c)
+	eor	x17,x14,x17,ror#34	// Sigma0(a)
+	add	x27,x27,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x27,x27,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x12,x12			// 9
+#endif
+	ldp	x13,x14,[x1],#2*8
+	add	x27,x27,x17			// h+=Sigma0(a)
+	ror	x16,x23,#14
+	add	x26,x26,x28			// h+=K[i]
+	eor	x15,x23,x23,ror#23
+	and	x17,x24,x23
+	bic	x28,x25,x23
+	add	x26,x26,x12			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x27,x20			// a^b, b^c in next round
+	eor	x16,x16,x15,ror#18	// Sigma1(e)
+	ror	x15,x27,#28
+	add	x26,x26,x17			// h+=Ch(e,f,g)
+	eor	x17,x27,x27,ror#5
+	add	x26,x26,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x22,x22,x26			// d+=h
+	eor	x19,x19,x20			// Maj(a,b,c)
+	eor	x17,x15,x17,ror#34	// Sigma0(a)
+	add	x26,x26,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x26,x26,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x13,x13			// 10
+#endif
+	add	x26,x26,x17			// h+=Sigma0(a)
+	ror	x16,x22,#14
+	add	x25,x25,x19			// h+=K[i]
+	eor	x0,x22,x22,ror#23
+	and	x17,x23,x22
+	bic	x19,x24,x22
+	add	x25,x25,x13			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x26,x27			// a^b, b^c in next round
+	eor	x16,x16,x0,ror#18	// Sigma1(e)
+	ror	x0,x26,#28
+	add	x25,x25,x17			// h+=Ch(e,f,g)
+	eor	x17,x26,x26,ror#5
+	add	x25,x25,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x21,x21,x25			// d+=h
+	eor	x28,x28,x27			// Maj(a,b,c)
+	eor	x17,x0,x17,ror#34	// Sigma0(a)
+	add	x25,x25,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x25,x25,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x14,x14			// 11
+#endif
+	ldp	x15,x0,[x1],#2*8
+	add	x25,x25,x17			// h+=Sigma0(a)
+	str	x6,[sp,#24]
+	ror	x16,x21,#14
+	add	x24,x24,x28			// h+=K[i]
+	eor	x6,x21,x21,ror#23
+	and	x17,x22,x21
+	bic	x28,x23,x21
+	add	x24,x24,x14			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x25,x26			// a^b, b^c in next round
+	eor	x16,x16,x6,ror#18	// Sigma1(e)
+	ror	x6,x25,#28
+	add	x24,x24,x17			// h+=Ch(e,f,g)
+	eor	x17,x25,x25,ror#5
+	add	x24,x24,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x20,x20,x24			// d+=h
+	eor	x19,x19,x26			// Maj(a,b,c)
+	eor	x17,x6,x17,ror#34	// Sigma0(a)
+	add	x24,x24,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x24,x24,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x15,x15			// 12
+#endif
+	add	x24,x24,x17			// h+=Sigma0(a)
+	str	x7,[sp,#0]
+	ror	x16,x20,#14
+	add	x23,x23,x19			// h+=K[i]
+	eor	x7,x20,x20,ror#23
+	and	x17,x21,x20
+	bic	x19,x22,x20
+	add	x23,x23,x15			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x24,x25			// a^b, b^c in next round
+	eor	x16,x16,x7,ror#18	// Sigma1(e)
+	ror	x7,x24,#28
+	add	x23,x23,x17			// h+=Ch(e,f,g)
+	eor	x17,x24,x24,ror#5
+	add	x23,x23,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x27,x27,x23			// d+=h
+	eor	x28,x28,x25			// Maj(a,b,c)
+	eor	x17,x7,x17,ror#34	// Sigma0(a)
+	add	x23,x23,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x23,x23,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x0,x0			// 13
+#endif
+	ldp	x1,x2,[x1]
+	add	x23,x23,x17			// h+=Sigma0(a)
+	str	x8,[sp,#8]
+	ror	x16,x27,#14
+	add	x22,x22,x28			// h+=K[i]
+	eor	x8,x27,x27,ror#23
+	and	x17,x20,x27
+	bic	x28,x21,x27
+	add	x22,x22,x0			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x23,x24			// a^b, b^c in next round
+	eor	x16,x16,x8,ror#18	// Sigma1(e)
+	ror	x8,x23,#28
+	add	x22,x22,x17			// h+=Ch(e,f,g)
+	eor	x17,x23,x23,ror#5
+	add	x22,x22,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x26,x26,x22			// d+=h
+	eor	x19,x19,x24			// Maj(a,b,c)
+	eor	x17,x8,x17,ror#34	// Sigma0(a)
+	add	x22,x22,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x22,x22,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x1,x1			// 14
+#endif
+	ldr	x6,[sp,#24]
+	add	x22,x22,x17			// h+=Sigma0(a)
+	str	x9,[sp,#16]
+	ror	x16,x26,#14
+	add	x21,x21,x19			// h+=K[i]
+	eor	x9,x26,x26,ror#23
+	and	x17,x27,x26
+	bic	x19,x20,x26
+	add	x21,x21,x1			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x22,x23			// a^b, b^c in next round
+	eor	x16,x16,x9,ror#18	// Sigma1(e)
+	ror	x9,x22,#28
+	add	x21,x21,x17			// h+=Ch(e,f,g)
+	eor	x17,x22,x22,ror#5
+	add	x21,x21,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x25,x25,x21			// d+=h
+	eor	x28,x28,x23			// Maj(a,b,c)
+	eor	x17,x9,x17,ror#34	// Sigma0(a)
+	add	x21,x21,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x21,x21,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x2,x2			// 15
+#endif
+	ldr	x7,[sp,#0]
+	add	x21,x21,x17			// h+=Sigma0(a)
+	str	x10,[sp,#24]
+	ror	x16,x25,#14
+	add	x20,x20,x28			// h+=K[i]
+	ror	x9,x4,#1
+	and	x17,x26,x25
+	ror	x8,x1,#19
+	bic	x28,x27,x25
+	ror	x10,x21,#28
+	add	x20,x20,x2			// h+=X[i]
+	eor	x16,x16,x25,ror#18
+	eor	x9,x9,x4,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x21,x22			// a^b, b^c in next round
+	eor	x16,x16,x25,ror#41	// Sigma1(e)
+	eor	x10,x10,x21,ror#34
+	add	x20,x20,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x8,x8,x1,ror#61
+	eor	x9,x9,x4,lsr#7	// sigma0(X[i+1])
+	add	x20,x20,x16			// h+=Sigma1(e)
+	eor	x19,x19,x22			// Maj(a,b,c)
+	eor	x17,x10,x21,ror#39	// Sigma0(a)
+	eor	x8,x8,x1,lsr#6	// sigma1(X[i+14])
+	add	x3,x3,x12
+	add	x24,x24,x20			// d+=h
+	add	x20,x20,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x3,x3,x9
+	add	x20,x20,x17			// h+=Sigma0(a)
+	add	x3,x3,x8
+.Loop_16_xx:
+	ldr	x8,[sp,#8]
+	str	x11,[sp,#0]
+	ror	x16,x24,#14
+	add	x27,x27,x19			// h+=K[i]
+	ror	x10,x5,#1
+	and	x17,x25,x24
+	ror	x9,x2,#19
+	bic	x19,x26,x24
+	ror	x11,x20,#28
+	add	x27,x27,x3			// h+=X[i]
+	eor	x16,x16,x24,ror#18
+	eor	x10,x10,x5,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x20,x21			// a^b, b^c in next round
+	eor	x16,x16,x24,ror#41	// Sigma1(e)
+	eor	x11,x11,x20,ror#34
+	add	x27,x27,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x9,x9,x2,ror#61
+	eor	x10,x10,x5,lsr#7	// sigma0(X[i+1])
+	add	x27,x27,x16			// h+=Sigma1(e)
+	eor	x28,x28,x21			// Maj(a,b,c)
+	eor	x17,x11,x20,ror#39	// Sigma0(a)
+	eor	x9,x9,x2,lsr#6	// sigma1(X[i+14])
+	add	x4,x4,x13
+	add	x23,x23,x27			// d+=h
+	add	x27,x27,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x4,x4,x10
+	add	x27,x27,x17			// h+=Sigma0(a)
+	add	x4,x4,x9
+	ldr	x9,[sp,#16]
+	str	x12,[sp,#8]
+	ror	x16,x23,#14
+	add	x26,x26,x28			// h+=K[i]
+	ror	x11,x6,#1
+	and	x17,x24,x23
+	ror	x10,x3,#19
+	bic	x28,x25,x23
+	ror	x12,x27,#28
+	add	x26,x26,x4			// h+=X[i]
+	eor	x16,x16,x23,ror#18
+	eor	x11,x11,x6,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x27,x20			// a^b, b^c in next round
+	eor	x16,x16,x23,ror#41	// Sigma1(e)
+	eor	x12,x12,x27,ror#34
+	add	x26,x26,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x10,x10,x3,ror#61
+	eor	x11,x11,x6,lsr#7	// sigma0(X[i+1])
+	add	x26,x26,x16			// h+=Sigma1(e)
+	eor	x19,x19,x20			// Maj(a,b,c)
+	eor	x17,x12,x27,ror#39	// Sigma0(a)
+	eor	x10,x10,x3,lsr#6	// sigma1(X[i+14])
+	add	x5,x5,x14
+	add	x22,x22,x26			// d+=h
+	add	x26,x26,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x5,x5,x11
+	add	x26,x26,x17			// h+=Sigma0(a)
+	add	x5,x5,x10
+	ldr	x10,[sp,#24]
+	str	x13,[sp,#16]
+	ror	x16,x22,#14
+	add	x25,x25,x19			// h+=K[i]
+	ror	x12,x7,#1
+	and	x17,x23,x22
+	ror	x11,x4,#19
+	bic	x19,x24,x22
+	ror	x13,x26,#28
+	add	x25,x25,x5			// h+=X[i]
+	eor	x16,x16,x22,ror#18
+	eor	x12,x12,x7,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x26,x27			// a^b, b^c in next round
+	eor	x16,x16,x22,ror#41	// Sigma1(e)
+	eor	x13,x13,x26,ror#34
+	add	x25,x25,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x11,x11,x4,ror#61
+	eor	x12,x12,x7,lsr#7	// sigma0(X[i+1])
+	add	x25,x25,x16			// h+=Sigma1(e)
+	eor	x28,x28,x27			// Maj(a,b,c)
+	eor	x17,x13,x26,ror#39	// Sigma0(a)
+	eor	x11,x11,x4,lsr#6	// sigma1(X[i+14])
+	add	x6,x6,x15
+	add	x21,x21,x25			// d+=h
+	add	x25,x25,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x6,x6,x12
+	add	x25,x25,x17			// h+=Sigma0(a)
+	add	x6,x6,x11
+	ldr	x11,[sp,#0]
+	str	x14,[sp,#24]
+	ror	x16,x21,#14
+	add	x24,x24,x28			// h+=K[i]
+	ror	x13,x8,#1
+	and	x17,x22,x21
+	ror	x12,x5,#19
+	bic	x28,x23,x21
+	ror	x14,x25,#28
+	add	x24,x24,x6			// h+=X[i]
+	eor	x16,x16,x21,ror#18
+	eor	x13,x13,x8,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x25,x26			// a^b, b^c in next round
+	eor	x16,x16,x21,ror#41	// Sigma1(e)
+	eor	x14,x14,x25,ror#34
+	add	x24,x24,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x12,x12,x5,ror#61
+	eor	x13,x13,x8,lsr#7	// sigma0(X[i+1])
+	add	x24,x24,x16			// h+=Sigma1(e)
+	eor	x19,x19,x26			// Maj(a,b,c)
+	eor	x17,x14,x25,ror#39	// Sigma0(a)
+	eor	x12,x12,x5,lsr#6	// sigma1(X[i+14])
+	add	x7,x7,x0
+	add	x20,x20,x24			// d+=h
+	add	x24,x24,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x7,x7,x13
+	add	x24,x24,x17			// h+=Sigma0(a)
+	add	x7,x7,x12
+	ldr	x12,[sp,#8]
+	str	x15,[sp,#0]
+	ror	x16,x20,#14
+	add	x23,x23,x19			// h+=K[i]
+	ror	x14,x9,#1
+	and	x17,x21,x20
+	ror	x13,x6,#19
+	bic	x19,x22,x20
+	ror	x15,x24,#28
+	add	x23,x23,x7			// h+=X[i]
+	eor	x16,x16,x20,ror#18
+	eor	x14,x14,x9,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x24,x25			// a^b, b^c in next round
+	eor	x16,x16,x20,ror#41	// Sigma1(e)
+	eor	x15,x15,x24,ror#34
+	add	x23,x23,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x13,x13,x6,ror#61
+	eor	x14,x14,x9,lsr#7	// sigma0(X[i+1])
+	add	x23,x23,x16			// h+=Sigma1(e)
+	eor	x28,x28,x25			// Maj(a,b,c)
+	eor	x17,x15,x24,ror#39	// Sigma0(a)
+	eor	x13,x13,x6,lsr#6	// sigma1(X[i+14])
+	add	x8,x8,x1
+	add	x27,x27,x23			// d+=h
+	add	x23,x23,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x8,x8,x14
+	add	x23,x23,x17			// h+=Sigma0(a)
+	add	x8,x8,x13
+	ldr	x13,[sp,#16]
+	str	x0,[sp,#8]
+	ror	x16,x27,#14
+	add	x22,x22,x28			// h+=K[i]
+	ror	x15,x10,#1
+	and	x17,x20,x27
+	ror	x14,x7,#19
+	bic	x28,x21,x27
+	ror	x0,x23,#28
+	add	x22,x22,x8			// h+=X[i]
+	eor	x16,x16,x27,ror#18
+	eor	x15,x15,x10,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x23,x24			// a^b, b^c in next round
+	eor	x16,x16,x27,ror#41	// Sigma1(e)
+	eor	x0,x0,x23,ror#34
+	add	x22,x22,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x14,x14,x7,ror#61
+	eor	x15,x15,x10,lsr#7	// sigma0(X[i+1])
+	add	x22,x22,x16			// h+=Sigma1(e)
+	eor	x19,x19,x24			// Maj(a,b,c)
+	eor	x17,x0,x23,ror#39	// Sigma0(a)
+	eor	x14,x14,x7,lsr#6	// sigma1(X[i+14])
+	add	x9,x9,x2
+	add	x26,x26,x22			// d+=h
+	add	x22,x22,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x9,x9,x15
+	add	x22,x22,x17			// h+=Sigma0(a)
+	add	x9,x9,x14
+	ldr	x14,[sp,#24]
+	str	x1,[sp,#16]
+	ror	x16,x26,#14
+	add	x21,x21,x19			// h+=K[i]
+	ror	x0,x11,#1
+	and	x17,x27,x26
+	ror	x15,x8,#19
+	bic	x19,x20,x26
+	ror	x1,x22,#28
+	add	x21,x21,x9			// h+=X[i]
+	eor	x16,x16,x26,ror#18
+	eor	x0,x0,x11,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x22,x23			// a^b, b^c in next round
+	eor	x16,x16,x26,ror#41	// Sigma1(e)
+	eor	x1,x1,x22,ror#34
+	add	x21,x21,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x15,x15,x8,ror#61
+	eor	x0,x0,x11,lsr#7	// sigma0(X[i+1])
+	add	x21,x21,x16			// h+=Sigma1(e)
+	eor	x28,x28,x23			// Maj(a,b,c)
+	eor	x17,x1,x22,ror#39	// Sigma0(a)
+	eor	x15,x15,x8,lsr#6	// sigma1(X[i+14])
+	add	x10,x10,x3
+	add	x25,x25,x21			// d+=h
+	add	x21,x21,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x10,x10,x0
+	add	x21,x21,x17			// h+=Sigma0(a)
+	add	x10,x10,x15
+	ldr	x15,[sp,#0]
+	str	x2,[sp,#24]
+	ror	x16,x25,#14
+	add	x20,x20,x28			// h+=K[i]
+	ror	x1,x12,#1
+	and	x17,x26,x25
+	ror	x0,x9,#19
+	bic	x28,x27,x25
+	ror	x2,x21,#28
+	add	x20,x20,x10			// h+=X[i]
+	eor	x16,x16,x25,ror#18
+	eor	x1,x1,x12,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x21,x22			// a^b, b^c in next round
+	eor	x16,x16,x25,ror#41	// Sigma1(e)
+	eor	x2,x2,x21,ror#34
+	add	x20,x20,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x0,x0,x9,ror#61
+	eor	x1,x1,x12,lsr#7	// sigma0(X[i+1])
+	add	x20,x20,x16			// h+=Sigma1(e)
+	eor	x19,x19,x22			// Maj(a,b,c)
+	eor	x17,x2,x21,ror#39	// Sigma0(a)
+	eor	x0,x0,x9,lsr#6	// sigma1(X[i+14])
+	add	x11,x11,x4
+	add	x24,x24,x20			// d+=h
+	add	x20,x20,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x11,x11,x1
+	add	x20,x20,x17			// h+=Sigma0(a)
+	add	x11,x11,x0
+	ldr	x0,[sp,#8]
+	str	x3,[sp,#0]
+	ror	x16,x24,#14
+	add	x27,x27,x19			// h+=K[i]
+	ror	x2,x13,#1
+	and	x17,x25,x24
+	ror	x1,x10,#19
+	bic	x19,x26,x24
+	ror	x3,x20,#28
+	add	x27,x27,x11			// h+=X[i]
+	eor	x16,x16,x24,ror#18
+	eor	x2,x2,x13,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x20,x21			// a^b, b^c in next round
+	eor	x16,x16,x24,ror#41	// Sigma1(e)
+	eor	x3,x3,x20,ror#34
+	add	x27,x27,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x1,x1,x10,ror#61
+	eor	x2,x2,x13,lsr#7	// sigma0(X[i+1])
+	add	x27,x27,x16			// h+=Sigma1(e)
+	eor	x28,x28,x21			// Maj(a,b,c)
+	eor	x17,x3,x20,ror#39	// Sigma0(a)
+	eor	x1,x1,x10,lsr#6	// sigma1(X[i+14])
+	add	x12,x12,x5
+	add	x23,x23,x27			// d+=h
+	add	x27,x27,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x12,x12,x2
+	add	x27,x27,x17			// h+=Sigma0(a)
+	add	x12,x12,x1
+	ldr	x1,[sp,#16]
+	str	x4,[sp,#8]
+	ror	x16,x23,#14
+	add	x26,x26,x28			// h+=K[i]
+	ror	x3,x14,#1
+	and	x17,x24,x23
+	ror	x2,x11,#19
+	bic	x28,x25,x23
+	ror	x4,x27,#28
+	add	x26,x26,x12			// h+=X[i]
+	eor	x16,x16,x23,ror#18
+	eor	x3,x3,x14,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x27,x20			// a^b, b^c in next round
+	eor	x16,x16,x23,ror#41	// Sigma1(e)
+	eor	x4,x4,x27,ror#34
+	add	x26,x26,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x2,x2,x11,ror#61
+	eor	x3,x3,x14,lsr#7	// sigma0(X[i+1])
+	add	x26,x26,x16			// h+=Sigma1(e)
+	eor	x19,x19,x20			// Maj(a,b,c)
+	eor	x17,x4,x27,ror#39	// Sigma0(a)
+	eor	x2,x2,x11,lsr#6	// sigma1(X[i+14])
+	add	x13,x13,x6
+	add	x22,x22,x26			// d+=h
+	add	x26,x26,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x13,x13,x3
+	add	x26,x26,x17			// h+=Sigma0(a)
+	add	x13,x13,x2
+	ldr	x2,[sp,#24]
+	str	x5,[sp,#16]
+	ror	x16,x22,#14
+	add	x25,x25,x19			// h+=K[i]
+	ror	x4,x15,#1
+	and	x17,x23,x22
+	ror	x3,x12,#19
+	bic	x19,x24,x22
+	ror	x5,x26,#28
+	add	x25,x25,x13			// h+=X[i]
+	eor	x16,x16,x22,ror#18
+	eor	x4,x4,x15,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x26,x27			// a^b, b^c in next round
+	eor	x16,x16,x22,ror#41	// Sigma1(e)
+	eor	x5,x5,x26,ror#34
+	add	x25,x25,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x3,x3,x12,ror#61
+	eor	x4,x4,x15,lsr#7	// sigma0(X[i+1])
+	add	x25,x25,x16			// h+=Sigma1(e)
+	eor	x28,x28,x27			// Maj(a,b,c)
+	eor	x17,x5,x26,ror#39	// Sigma0(a)
+	eor	x3,x3,x12,lsr#6	// sigma1(X[i+14])
+	add	x14,x14,x7
+	add	x21,x21,x25			// d+=h
+	add	x25,x25,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x14,x14,x4
+	add	x25,x25,x17			// h+=Sigma0(a)
+	add	x14,x14,x3
+	ldr	x3,[sp,#0]
+	str	x6,[sp,#24]
+	ror	x16,x21,#14
+	add	x24,x24,x28			// h+=K[i]
+	ror	x5,x0,#1
+	and	x17,x22,x21
+	ror	x4,x13,#19
+	bic	x28,x23,x21
+	ror	x6,x25,#28
+	add	x24,x24,x14			// h+=X[i]
+	eor	x16,x16,x21,ror#18
+	eor	x5,x5,x0,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x25,x26			// a^b, b^c in next round
+	eor	x16,x16,x21,ror#41	// Sigma1(e)
+	eor	x6,x6,x25,ror#34
+	add	x24,x24,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x4,x4,x13,ror#61
+	eor	x5,x5,x0,lsr#7	// sigma0(X[i+1])
+	add	x24,x24,x16			// h+=Sigma1(e)
+	eor	x19,x19,x26			// Maj(a,b,c)
+	eor	x17,x6,x25,ror#39	// Sigma0(a)
+	eor	x4,x4,x13,lsr#6	// sigma1(X[i+14])
+	add	x15,x15,x8
+	add	x20,x20,x24			// d+=h
+	add	x24,x24,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x15,x15,x5
+	add	x24,x24,x17			// h+=Sigma0(a)
+	add	x15,x15,x4
+	ldr	x4,[sp,#8]
+	str	x7,[sp,#0]
+	ror	x16,x20,#14
+	add	x23,x23,x19			// h+=K[i]
+	ror	x6,x1,#1
+	and	x17,x21,x20
+	ror	x5,x14,#19
+	bic	x19,x22,x20
+	ror	x7,x24,#28
+	add	x23,x23,x15			// h+=X[i]
+	eor	x16,x16,x20,ror#18
+	eor	x6,x6,x1,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x24,x25			// a^b, b^c in next round
+	eor	x16,x16,x20,ror#41	// Sigma1(e)
+	eor	x7,x7,x24,ror#34
+	add	x23,x23,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x5,x5,x14,ror#61
+	eor	x6,x6,x1,lsr#7	// sigma0(X[i+1])
+	add	x23,x23,x16			// h+=Sigma1(e)
+	eor	x28,x28,x25			// Maj(a,b,c)
+	eor	x17,x7,x24,ror#39	// Sigma0(a)
+	eor	x5,x5,x14,lsr#6	// sigma1(X[i+14])
+	add	x0,x0,x9
+	add	x27,x27,x23			// d+=h
+	add	x23,x23,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x0,x0,x6
+	add	x23,x23,x17			// h+=Sigma0(a)
+	add	x0,x0,x5
+	ldr	x5,[sp,#16]
+	str	x8,[sp,#8]
+	ror	x16,x27,#14
+	add	x22,x22,x28			// h+=K[i]
+	ror	x7,x2,#1
+	and	x17,x20,x27
+	ror	x6,x15,#19
+	bic	x28,x21,x27
+	ror	x8,x23,#28
+	add	x22,x22,x0			// h+=X[i]
+	eor	x16,x16,x27,ror#18
+	eor	x7,x7,x2,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x23,x24			// a^b, b^c in next round
+	eor	x16,x16,x27,ror#41	// Sigma1(e)
+	eor	x8,x8,x23,ror#34
+	add	x22,x22,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x6,x6,x15,ror#61
+	eor	x7,x7,x2,lsr#7	// sigma0(X[i+1])
+	add	x22,x22,x16			// h+=Sigma1(e)
+	eor	x19,x19,x24			// Maj(a,b,c)
+	eor	x17,x8,x23,ror#39	// Sigma0(a)
+	eor	x6,x6,x15,lsr#6	// sigma1(X[i+14])
+	add	x1,x1,x10
+	add	x26,x26,x22			// d+=h
+	add	x22,x22,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x1,x1,x7
+	add	x22,x22,x17			// h+=Sigma0(a)
+	add	x1,x1,x6
+	ldr	x6,[sp,#24]
+	str	x9,[sp,#16]
+	ror	x16,x26,#14
+	add	x21,x21,x19			// h+=K[i]
+	ror	x8,x3,#1
+	and	x17,x27,x26
+	ror	x7,x0,#19
+	bic	x19,x20,x26
+	ror	x9,x22,#28
+	add	x21,x21,x1			// h+=X[i]
+	eor	x16,x16,x26,ror#18
+	eor	x8,x8,x3,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x22,x23			// a^b, b^c in next round
+	eor	x16,x16,x26,ror#41	// Sigma1(e)
+	eor	x9,x9,x22,ror#34
+	add	x21,x21,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x7,x7,x0,ror#61
+	eor	x8,x8,x3,lsr#7	// sigma0(X[i+1])
+	add	x21,x21,x16			// h+=Sigma1(e)
+	eor	x28,x28,x23			// Maj(a,b,c)
+	eor	x17,x9,x22,ror#39	// Sigma0(a)
+	eor	x7,x7,x0,lsr#6	// sigma1(X[i+14])
+	add	x2,x2,x11
+	add	x25,x25,x21			// d+=h
+	add	x21,x21,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x2,x2,x8
+	add	x21,x21,x17			// h+=Sigma0(a)
+	add	x2,x2,x7
+	ldr	x7,[sp,#0]
+	str	x10,[sp,#24]
+	ror	x16,x25,#14
+	add	x20,x20,x28			// h+=K[i]
+	ror	x9,x4,#1
+	and	x17,x26,x25
+	ror	x8,x1,#19
+	bic	x28,x27,x25
+	ror	x10,x21,#28
+	add	x20,x20,x2			// h+=X[i]
+	eor	x16,x16,x25,ror#18
+	eor	x9,x9,x4,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x21,x22			// a^b, b^c in next round
+	eor	x16,x16,x25,ror#41	// Sigma1(e)
+	eor	x10,x10,x21,ror#34
+	add	x20,x20,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x8,x8,x1,ror#61
+	eor	x9,x9,x4,lsr#7	// sigma0(X[i+1])
+	add	x20,x20,x16			// h+=Sigma1(e)
+	eor	x19,x19,x22			// Maj(a,b,c)
+	eor	x17,x10,x21,ror#39	// Sigma0(a)
+	eor	x8,x8,x1,lsr#6	// sigma1(X[i+14])
+	add	x3,x3,x12
+	add	x24,x24,x20			// d+=h
+	add	x20,x20,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x3,x3,x9
+	add	x20,x20,x17			// h+=Sigma0(a)
+	add	x3,x3,x8
+	cbnz	x19,.Loop_16_xx
+
+	ldp	x0,x2,[x29,#96]
+	ldr	x1,[x29,#112]
+	sub	x30,x30,#648		// rewind
+
+	ldp	x3,x4,[x0]
+	ldp	x5,x6,[x0,#2*8]
+	add	x1,x1,#14*8			// advance input pointer
+	ldp	x7,x8,[x0,#4*8]
+	add	x20,x20,x3
+	ldp	x9,x10,[x0,#6*8]
+	add	x21,x21,x4
+	add	x22,x22,x5
+	add	x23,x23,x6
+	stp	x20,x21,[x0]
+	add	x24,x24,x7
+	add	x25,x25,x8
+	stp	x22,x23,[x0,#2*8]
+	add	x26,x26,x9
+	add	x27,x27,x10
+	cmp	x1,x2
+	stp	x24,x25,[x0,#4*8]
+	stp	x26,x27,[x0,#6*8]
+	b.ne	.Loop
+
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#4*8
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#128
+	ret
+.size	sha512_block_data_order,.-sha512_block_data_order
+
+.align	6
+.type	.LK512,%object
+.LK512:
+	.quad	0x428a2f98d728ae22,0x7137449123ef65cd
+	.quad	0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+	.quad	0x3956c25bf348b538,0x59f111f1b605d019
+	.quad	0x923f82a4af194f9b,0xab1c5ed5da6d8118
+	.quad	0xd807aa98a3030242,0x12835b0145706fbe
+	.quad	0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+	.quad	0x72be5d74f27b896f,0x80deb1fe3b1696b1
+	.quad	0x9bdc06a725c71235,0xc19bf174cf692694
+	.quad	0xe49b69c19ef14ad2,0xefbe4786384f25e3
+	.quad	0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+	.quad	0x2de92c6f592b0275,0x4a7484aa6ea6e483
+	.quad	0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+	.quad	0x983e5152ee66dfab,0xa831c66d2db43210
+	.quad	0xb00327c898fb213f,0xbf597fc7beef0ee4
+	.quad	0xc6e00bf33da88fc2,0xd5a79147930aa725
+	.quad	0x06ca6351e003826f,0x142929670a0e6e70
+	.quad	0x27b70a8546d22ffc,0x2e1b21385c26c926
+	.quad	0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+	.quad	0x650a73548baf63de,0x766a0abb3c77b2a8
+	.quad	0x81c2c92e47edaee6,0x92722c851482353b
+	.quad	0xa2bfe8a14cf10364,0xa81a664bbc423001
+	.quad	0xc24b8b70d0f89791,0xc76c51a30654be30
+	.quad	0xd192e819d6ef5218,0xd69906245565a910
+	.quad	0xf40e35855771202a,0x106aa07032bbd1b8
+	.quad	0x19a4c116b8d2d0c8,0x1e376c085141ab53
+	.quad	0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+	.quad	0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+	.quad	0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+	.quad	0x748f82ee5defb2fc,0x78a5636f43172f60
+	.quad	0x84c87814a1f0ab72,0x8cc702081a6439ec
+	.quad	0x90befffa23631e28,0xa4506cebde82bde9
+	.quad	0xbef9a3f7b2c67915,0xc67178f2e372532b
+	.quad	0xca273eceea26619c,0xd186b8c721c0c207
+	.quad	0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+	.quad	0x06f067aa72176fba,0x0a637dc5a2c898a6
+	.quad	0x113f9804bef90dae,0x1b710b35131c471b
+	.quad	0x28db77f523047d84,0x32caab7b40c72493
+	.quad	0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+	.quad	0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+	.quad	0x5fcb6fab3ad6faec,0x6c44198c4a475817
+	.quad	0	// terminator
+.size	.LK512,.-.LK512
+#ifndef	__KERNEL__
+.align	3
+.LOPENSSL_armcap_P:
+# ifdef	__ILP32__
+	.long	OPENSSL_armcap_P-.
+# else
+	.quad	OPENSSL_armcap_P-.
+# endif
+#endif
+.asciz	"SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align	2
+#ifndef	__KERNEL__
+.comm	OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 28196b1..fd38814 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -27,6 +27,7 @@
 generic-y += poll.h
 generic-y += preempt.h
 generic-y += resource.h
+generic-y += qrwlock.h
 generic-y += rwsem.h
 generic-y += segment.h
 generic-y += sembuf.h
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index c4cc771..f8b5c48 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -228,6 +228,7 @@
 }
 
 #define gic_read_typer(c)		readq_relaxed_no_log(c)
+#define gic_read_irouter(c)		readq_relaxed_no_log(c)
 #define gic_write_irouter(v, c)		writeq_relaxed_no_log(v, c)
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index eaa5bbe..f95235a 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -148,8 +148,17 @@
 
 static inline u64 arch_counter_get_cntvct(void)
 {
+	u64 cval;
 	isb();
-	return arch_timer_reg_read_stable(cntvct_el0);
+#if IS_ENABLED(CONFIG_MSM_TIMER_LEAP)
+#define L32_BITS       0x00000000FFFFFFFF
+	do {
+		cval = arch_timer_reg_read_stable(cntvct_el0);
+	} while ((cval & L32_BITS) == L32_BITS);
+#else
+		cval = arch_timer_reg_read_stable(cntvct_el0);
+#endif
+	return cval;
 }
 
 static inline int arch_timer_arch_init(void)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 88a4d1e..c7749d8 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -109,6 +109,24 @@
 	.endm
 
 /*
+ * Value prediction barrier
+ */
+	.macro	csdb
+	hint	#20
+	.endm
+
+/*
+ * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
+ * of bounds.
+ */
+	.macro	mask_nospec64, idx, limit, tmp
+	sub	\tmp, \idx, \limit
+	bic	\tmp, \tmp, \idx
+	and	\idx, \idx, \tmp, asr #63
+	csdb
+	.endm
+
+/*
  * NOP sequence
  */
 	.macro	nops, num
@@ -492,4 +510,8 @@
 .Ldone\@:
 	.endm
 
+	.macro	pte_to_phys, phys, pte
+	and	\phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
+	.endm
+
 #endif	/* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 0fe7e43..0b0755c 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -31,6 +31,8 @@
 #define dmb(opt)	asm volatile("dmb " #opt : : : "memory")
 #define dsb(opt)	asm volatile("dsb " #opt : : : "memory")
 
+#define csdb()		asm volatile("hint #20" : : : "memory")
+
 #define mb()		dsb(sy)
 #define rmb()		dsb(ld)
 #define wmb()		dsb(st)
@@ -38,6 +40,27 @@
 #define dma_rmb()	dmb(oshld)
 #define dma_wmb()	dmb(oshst)
 
+/*
+ * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
+ * and 0 otherwise.
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long idx,
+						    unsigned long sz)
+{
+	unsigned long mask;
+
+	asm volatile(
+	"	cmp	%1, %2\n"
+	"	sbc	%0, xzr, xzr\n"
+	: "=r" (mask)
+	: "r" (idx), "Ir" (sz)
+	: "cc");
+
+	csdb();
+	return mask;
+}
+
 #define __smp_mb()	dmb(ish)
 #define __smp_rmb()	dmb(ishld)
 #define __smp_wmb()	dmb(ishst)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 847bfe6..9a6b81a 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -92,6 +92,7 @@
 
 #define CAVIUM_CPU_PART_THUNDERX	0x0A1
 #define CAVIUM_CPU_PART_THUNDERX_81XX	0x0A2
+#define CAVIUM_CPU_PART_THUNDERX2	0x0AF
 
 #define BRCM_CPU_PART_VULCAN		0x516
 
@@ -107,6 +108,8 @@
 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 #define MIDR_KRYO2XX_GOLD \
 	MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_GOLD)
+#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h
index f7e2c32..50f70a8 100644
--- a/arch/arm64/include/asm/dma-contiguous.h
+++ b/arch/arm64/include/asm/dma-contiguous.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013,2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013,2017-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,7 +15,6 @@
 #define _ASM_DMA_CONTIGUOUS_H
 
 #ifdef __KERNEL__
-#ifdef CONFIG_DMA_CMA
 
 #include <linux/types.h>
 
@@ -23,5 +22,3 @@
 
 #endif
 #endif
-
-#endif
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 85c4a89..a891bb6 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -48,20 +48,10 @@
 } while (0)
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret, tmp;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 	pagefault_disable();
 
 	switch (op) {
@@ -91,30 +81,24 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
 			      u32 oldval, u32 newval)
 {
 	int ret = 0;
 	u32 val, tmp;
+	u32 __user *uaddr;
 
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+	if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
 		return -EFAULT;
 
+	uaddr = __uaccess_mask_ptr(_uaddr);
 	uaccess_enable();
 	asm volatile("// futex_atomic_cmpxchg_inatomic\n"
 "	prfm	pstl1strm, %2\n"
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 5c0c57e..388a0ca 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -167,9 +167,9 @@
 #define readq_relaxed_no_log(c)	({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; })
 
 #define writeb_relaxed_no_log(v, c)	((void)__raw_writeb_no_log((v), (c)))
-#define writew_relaxed_no_log(v, c)	((void)__raw_writew_no_log((__force u16)cpu_to_le32(v), (c)))
+#define writew_relaxed_no_log(v, c)	((void)__raw_writew_no_log((__force u16)cpu_to_le16(v), (c)))
 #define writel_relaxed_no_log(v, c)	((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c)))
-#define writeq_relaxed_no_log(v, c)	((void)__raw_writeq_no_log((__force u64)cpu_to_le32(v), (c)))
+#define writeq_relaxed_no_log(v, c)	((void)__raw_writeq_no_log((__force u64)cpu_to_le64(v), (c)))
 
 /*
  * I/O memory access primitives. Reads are ordered relative to any
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 77a27af..7803343 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -78,16 +78,8 @@
 /*
  * Initial memory map attributes.
  */
-#define _SWAPPER_PTE_FLAGS	(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define _SWAPPER_PMD_FLAGS	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define SWAPPER_PTE_FLAGS	(_SWAPPER_PTE_FLAGS | PTE_NG)
-#define SWAPPER_PMD_FLAGS	(_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
-#else
-#define SWAPPER_PTE_FLAGS	_SWAPPER_PTE_FLAGS
-#define SWAPPER_PMD_FLAGS	_SWAPPER_PMD_FLAGS
-#endif
+#define SWAPPER_PTE_FLAGS	(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define SWAPPER_PMD_FLAGS	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
 #if ARM64_SWAPPER_USES_SECTION_MAPS
 #define SWAPPER_MM_MMUFLAGS	(PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e505038..0a33ea3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -73,6 +73,9 @@
 
 	/* Timer */
 	struct arch_timer_kvm	timer;
+
+	/* Mandated version of PSCI */
+	u32 psci_version;
 };
 
 #define KVM_NR_MEM_OBJS     40
@@ -393,4 +396,9 @@
 		  "PARange is %d bits, unsupported configuration!", parange);
 }
 
+static inline bool kvm_arm_harden_branch_predictor(void)
+{
+	return cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+}
+
 #endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 35ea9c1..24a8369 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -313,6 +313,22 @@
 	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+				      gpa_t gpa, void *data, unsigned long len)
+{
+	int srcu_idx = srcu_read_lock(&kvm->srcu);
+	int ret = kvm_read_guest(kvm, gpa, data, len);
+
+	srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+	return ret;
+}
+
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 #include <asm/mmu.h>
 
@@ -325,7 +341,7 @@
 		vect = __bp_harden_hyp_vecs_start +
 		       data->hyp_vectors_slot * SZ_2K;
 
-		if (!has_vhe())
+		if (!cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
 			vect = lm_alias(vect);
 	}
 
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h
deleted file mode 100644
index bc39e55..0000000
--- a/arch/arm64/include/asm/kvm_psci.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2012,2013 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM64_KVM_PSCI_H__
-#define __ARM64_KVM_PSCI_H__
-
-#define KVM_ARM_PSCI_0_1	1
-#define KVM_ARM_PSCI_0_2	2
-
-int kvm_psci_version(struct kvm_vcpu *vcpu);
-int kvm_psci_call(struct kvm_vcpu *vcpu);
-
-#endif /* __ARM64_KVM_PSCI_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 8fe5ffc..708028e 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -61,12 +61,12 @@
  * KIMAGE_VADDR - the virtual address of the start of the kernel image
  * VA_BITS - the maximum number of bits for virtual addresses.
  * VA_START - the first kernel virtual address.
- * TASK_SIZE - the maximum size of a user space task.
- * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
  */
 #define VA_BITS			(CONFIG_ARM64_VA_BITS)
-#define VA_START		(UL(0xffffffffffffffff) << VA_BITS)
-#define PAGE_OFFSET		(UL(0xffffffffffffffff) << (VA_BITS - 1))
+#define VA_START		(UL(0xffffffffffffffff) - \
+	(UL(1) << VA_BITS) + 1)
+#define PAGE_OFFSET		(UL(0xffffffffffffffff) - \
+	(UL(1) << (VA_BITS - 1)) + 1)
 #define KIMAGE_VADDR		(MODULES_END)
 #define MODULES_END		(MODULES_VADDR + MODULES_VSIZE)
 #define MODULES_VADDR		(VA_START + KASAN_SHADOW_SIZE)
@@ -75,19 +75,6 @@
 #define PCI_IO_END		(VMEMMAP_START - SZ_2M)
 #define PCI_IO_START		(PCI_IO_END - PCI_IO_SIZE)
 #define FIXADDR_TOP		(PCI_IO_START - SZ_2M)
-#define TASK_SIZE_64		(UL(1) << VA_BITS)
-
-#ifdef CONFIG_COMPAT
-#define TASK_SIZE_32		UL(0x100000000)
-#define TASK_SIZE		(test_thread_flag(TIF_32BIT) ? \
-				TASK_SIZE_32 : TASK_SIZE_64)
-#define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_32BIT) ? \
-				TASK_SIZE_32 : TASK_SIZE_64)
-#else
-#define TASK_SIZE		TASK_SIZE_64
-#endif /* CONFIG_COMPAT */
-
-#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 4))
 
 #define KERNEL_START      _text
 #define KERNEL_END        _end
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index f543df3..24c780d 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -21,7 +21,7 @@
 
 #ifndef __ASSEMBLY__
 
-#include <asm/percpu.h>
+#include <linux/percpu.h>
 
 typedef struct {
 	atomic64_t	id;
@@ -55,7 +55,7 @@
 
 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
 {
-	return raw_cpu_ptr(&bp_hardening_data);
+	return this_cpu_ptr(&bp_hardening_data);
 }
 
 static inline void arm64_apply_bp_hardening(void)
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 84b5283..f705d96 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -37,13 +37,11 @@
 #define _PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define _PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define PROT_DEFAULT		(_PROT_DEFAULT | PTE_NG)
-#define PROT_SECT_DEFAULT	(_PROT_SECT_DEFAULT | PMD_SECT_NG)
-#else
-#define PROT_DEFAULT		_PROT_DEFAULT
-#define PROT_SECT_DEFAULT	_PROT_SECT_DEFAULT
-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+#define PTE_MAYBE_NG		(arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
+#define PMD_MAYBE_NG		(arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
+
+#define PROT_DEFAULT		(_PROT_DEFAULT | PTE_MAYBE_NG)
+#define PROT_SECT_DEFAULT	(_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
 
 #define PROT_DEVICE_nGnRnE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -55,22 +53,22 @@
 #define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
 #define PROT_SECT_NORMAL_EXEC	(PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
 
-#define _PAGE_DEFAULT		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
-#define _HYP_PAGE_DEFAULT	(_PAGE_DEFAULT & ~PTE_NG)
+#define _PAGE_DEFAULT		(_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+#define _HYP_PAGE_DEFAULT	_PAGE_DEFAULT
 
-#define PAGE_KERNEL		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
-#define PAGE_KERNEL_RO		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_ROX		__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
-#define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
+#define PAGE_KERNEL		__pgprot(PROT_NORMAL)
+#define PAGE_KERNEL_RO		__pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
+#define PAGE_KERNEL_ROX		__pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
+#define PAGE_KERNEL_EXEC	__pgprot(PROT_NORMAL & ~PTE_PXN)
+#define PAGE_KERNEL_EXEC_CONT	__pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
 
 #define PAGE_HYP		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
 #define PAGE_HYP_EXEC		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
 #define PAGE_HYP_RO		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
 #define PAGE_HYP_DEVICE		__pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
 
-#define PAGE_S2			__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
+#define PAGE_S2			__pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE		__pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
 
 #define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index a4af9f0..d8039a1 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -19,6 +19,13 @@
 #ifndef __ASM_PROCESSOR_H
 #define __ASM_PROCESSOR_H
 
+#define TASK_SIZE_64		(UL(1) << VA_BITS)
+
+#define KERNEL_DS	UL(-1)
+#define USER_DS		(TASK_SIZE_64 - 1)
+
+#ifndef __ASSEMBLY__
+
 /*
  * Default implementation of macro that returns current
  * instruction pointer ("program counter").
@@ -37,6 +44,22 @@
 #include <asm/ptrace.h>
 #include <asm/types.h>
 
+/*
+ * TASK_SIZE - the maximum size of a user space task.
+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
+ */
+#ifdef CONFIG_COMPAT
+#define TASK_SIZE_32		UL(0x100000000)
+#define TASK_SIZE		(test_thread_flag(TIF_32BIT) ? \
+				TASK_SIZE_32 : TASK_SIZE_64)
+#define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+				TASK_SIZE_32 : TASK_SIZE_64)
+#else
+#define TASK_SIZE		TASK_SIZE_64
+#endif /* CONFIG_COMPAT */
+
+#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 4))
+
 #define STACK_TOP_MAX		TASK_SIZE_64
 #ifdef CONFIG_COMPAT
 #define AARCH32_VECTORS_BASE	0xffff0000
@@ -195,4 +218,5 @@
 int cpu_enable_uao(void *__unused);
 int cpu_enable_cache_maint_trap(void *__unused);
 
+#endif /* __ASSEMBLY__ */
 #endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 55082cc..53c0f54 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -140,8 +140,8 @@
 	"	cbnz	%w1, 1f\n"
 	"	add	%w1, %w0, %3\n"
 	"	casa	%w0, %w1, %2\n"
-	"	and	%w1, %w1, #0xffff\n"
-	"	eor	%w1, %w1, %w0, lsr #16\n"
+	"	sub	%w1, %w1, %3\n"
+	"	eor	%w1, %w1, %w0\n"
 	"1:")
 	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
 	: "I" (1 << TICKET_SHIFT)
@@ -186,169 +186,7 @@
 }
 #define arch_spin_is_contended	arch_spin_is_contended
 
-/*
- * Write lock implementation.
- *
- * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
- * exclusively held.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- */
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
-	unsigned int tmp;
-
-	asm volatile(ARM64_LSE_ATOMIC_INSN(
-	/* LL/SC */
-	"	sevl\n"
-	"1:	wfe\n"
-	"2:	ldaxr	%w0, %1\n"
-	"	cbnz	%w0, 1b\n"
-	"	stxr	%w0, %w2, %1\n"
-	"	cbnz	%w0, 2b\n"
-	__nops(1),
-	/* LSE atomics */
-	"1:	mov	%w0, wzr\n"
-	"2:	casa	%w0, %w2, %1\n"
-	"	cbz	%w0, 3f\n"
-	"	ldxr	%w0, %1\n"
-	"	cbz	%w0, 2b\n"
-	"	wfe\n"
-	"	b	1b\n"
-	"3:")
-	: "=&r" (tmp), "+Q" (rw->lock)
-	: "r" (0x80000000)
-	: "memory");
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
-	unsigned int tmp;
-
-	asm volatile(ARM64_LSE_ATOMIC_INSN(
-	/* LL/SC */
-	"1:	ldaxr	%w0, %1\n"
-	"	cbnz	%w0, 2f\n"
-	"	stxr	%w0, %w2, %1\n"
-	"	cbnz	%w0, 1b\n"
-	"2:",
-	/* LSE atomics */
-	"	mov	%w0, wzr\n"
-	"	casa	%w0, %w2, %1\n"
-	__nops(2))
-	: "=&r" (tmp), "+Q" (rw->lock)
-	: "r" (0x80000000)
-	: "memory");
-
-	return !tmp;
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
-	asm volatile(ARM64_LSE_ATOMIC_INSN(
-	"	stlr	wzr, %0",
-	"	swpl	wzr, wzr, %0")
-	: "=Q" (rw->lock) :: "memory");
-}
-
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x)		((x)->lock == 0)
-
-/*
- * Read lock implementation.
- *
- * It exclusively loads the lock value, increments it and stores the new value
- * back if positive and the CPU still exclusively owns the location. If the
- * value is negative, the lock is already held.
- *
- * During unlocking there may be multiple active read locks but no write lock.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- *
- * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
- * and LSE implementations may exhibit different behaviour (although this
- * will have no effect on lockdep).
- */
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
-	unsigned int tmp, tmp2;
-
-	asm volatile(
-	"	sevl\n"
-	ARM64_LSE_ATOMIC_INSN(
-	/* LL/SC */
-	"1:	wfe\n"
-	"2:	ldaxr	%w0, %2\n"
-	"	add	%w0, %w0, #1\n"
-	"	tbnz	%w0, #31, 1b\n"
-	"	stxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 2b\n"
-	__nops(1),
-	/* LSE atomics */
-	"1:	wfe\n"
-	"2:	ldxr	%w0, %2\n"
-	"	adds	%w1, %w0, #1\n"
-	"	tbnz	%w1, #31, 1b\n"
-	"	casa	%w0, %w1, %2\n"
-	"	sbc	%w0, %w1, %w0\n"
-	"	cbnz	%w0, 2b")
-	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
-	:
-	: "cc", "memory");
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
-	unsigned int tmp, tmp2;
-
-	asm volatile(ARM64_LSE_ATOMIC_INSN(
-	/* LL/SC */
-	"1:	ldxr	%w0, %2\n"
-	"	sub	%w0, %w0, #1\n"
-	"	stlxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 1b",
-	/* LSE atomics */
-	"	movn	%w0, #0\n"
-	"	staddl	%w0, %2\n"
-	__nops(2))
-	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
-	:
-	: "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
-	unsigned int tmp, tmp2;
-
-	asm volatile(ARM64_LSE_ATOMIC_INSN(
-	/* LL/SC */
-	"	mov	%w1, #1\n"
-	"1:	ldaxr	%w0, %2\n"
-	"	add	%w0, %w0, #1\n"
-	"	tbnz	%w0, #31, 2f\n"
-	"	stxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 1b\n"
-	"2:",
-	/* LSE atomics */
-	"	ldr	%w0, %2\n"
-	"	adds	%w1, %w0, #1\n"
-	"	tbnz	%w1, #31, 1f\n"
-	"	casa	%w0, %w1, %2\n"
-	"	sbc	%w1, %w1, %w0\n"
-	__nops(1)
-	"1:")
-	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
-	:
-	: "cc", "memory");
-
-	return !tmp2;
-}
-
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x)		((x)->lock < 0x80000000)
+#include <asm/qrwlock.h>
 
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 55be59a..6b85601 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -36,10 +36,6 @@
 
 #define __ARCH_SPIN_LOCK_UNLOCKED	{ 0 , 0 }
 
-typedef struct {
-	volatile unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
+#include <asm-generic/qrwlock_types.h>
 
 #endif
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 801a16db..7d2a15a 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -23,7 +23,7 @@
 	unsigned long sp;
 	unsigned long pc;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-	unsigned int graph;
+	int graph;
 #endif
 };
 
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 9311547..de21caa 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -35,6 +35,7 @@
 
 #include <asm/cpufeature.h>
 #include <asm/kernel-pgtable.h>
+#include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <asm/errno.h>
 #include <asm/memory.h>
@@ -65,10 +66,7 @@
 
 extern int fixup_exception(struct pt_regs *regs);
 
-#define KERNEL_DS	(-1UL)
 #define get_ds()	(KERNEL_DS)
-
-#define USER_DS		TASK_SIZE_64
 #define get_fs()	(current_thread_info()->addr_limit)
 
 static inline void set_fs(mm_segment_t fs)
@@ -76,6 +74,13 @@
 	current_thread_info()->addr_limit = fs;
 
 	/*
+	 * Prevent a mispredicted conditional call to set_fs from forwarding
+	 * the wrong address limit to access_ok under speculation.
+	 */
+	dsb(nsh);
+	isb();
+
+	/*
 	 * Enable/disable UAO so that copy_to_user() etc can access
 	 * kernel memory with the unprivileged instructions.
 	 */
@@ -93,22 +98,32 @@
  * Returns 1 if the range is valid, 0 otherwise.
  *
  * This is equivalent to the following test:
- * (u65)addr + (u65)size <= current->addr_limit
- *
- * This needs 65-bit arithmetic.
+ * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
  */
-#define __range_ok(addr, size)						\
-({									\
-	unsigned long __addr = (unsigned long __force)(addr);		\
-	unsigned long flag, roksum;					\
-	__chk_user_ptr(addr);						\
-	asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"		\
-		: "=&r" (flag), "=&r" (roksum)				\
-		: "1" (__addr), "Ir" (size),				\
-		  "r" (current_thread_info()->addr_limit)		\
-		: "cc");						\
-	flag;								\
-})
+static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
+{
+	unsigned long limit = current_thread_info()->addr_limit;
+
+	__chk_user_ptr(addr);
+	asm volatile(
+	// A + B <= C + 1 for all A,B,C, in four easy steps:
+	// 1: X = A + B; X' = X % 2^64
+	"	adds	%0, %0, %2\n"
+	// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
+	"	csel	%1, xzr, %1, hi\n"
+	// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
+	//    to compensate for the carry flag being set in step 4. For
+	//    X > 2^64, X' merely has to remain nonzero, which it does.
+	"	csinv	%0, %0, xzr, cc\n"
+	// 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
+	//    comes from the carry in being clear. Otherwise, we are
+	//    testing X' - C == 0, subject to the previous adjustments.
+	"	sbcs	xzr, %0, %1\n"
+	"	cset	%0, ls\n"
+	: "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
+
+	return addr;
+}
 
 /*
  * When dealing with data aborts, watchpoints, or instruction traps we may end
@@ -117,7 +132,7 @@
  */
 #define untagged_addr(addr)		sign_extend64(addr, 55)
 
-#define access_ok(type, addr, size)	__range_ok(addr, size)
+#define access_ok(type, addr, size)	__range_ok((unsigned long)(addr), size)
 #define user_addr_max			get_fs
 
 #define _ASM_EXTABLE(from, to)						\
@@ -236,6 +251,26 @@
 }
 
 /*
+ * Sanitise a uaccess pointer such that it becomes NULL if above the
+ * current addr_limit.
+ */
+#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
+static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
+{
+	void __user *safe_ptr;
+
+	asm volatile(
+	"	bics	xzr, %1, %2\n"
+	"	csel	%0, %1, xzr, eq\n"
+	: "=&r" (safe_ptr)
+	: "r" (ptr), "r" (current_thread_info()->addr_limit)
+	: "cc");
+
+	csdb();
+	return safe_ptr;
+}
+
+/*
  * The "__xxx" versions of the user access functions do not verify the address
  * space - it must have been done previously with a separate "access_ok()"
  * call.
@@ -277,7 +312,7 @@
 			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	case 8:								\
-		__get_user_asm("ldr", "ldtr", "%",  __gu_val, (ptr),	\
+		__get_user_asm("ldr", "ldtr", "%x",  __gu_val, (ptr),	\
 			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	default:							\
@@ -287,29 +322,34 @@
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
 } while (0)
 
-#define __get_user(x, ptr)						\
+#define __get_user_check(x, ptr, err)					\
 ({									\
-	int __gu_err = 0;						\
-	__get_user_err((x), (ptr), __gu_err);				\
-	__gu_err;							\
+	__typeof__(*(ptr)) __user *__p = (ptr);				\
+	might_fault();							\
+	if (access_ok(VERIFY_READ, __p, sizeof(*__p))) {		\
+		__p = uaccess_mask_ptr(__p);				\
+		__get_user_err((x), __p, (err));			\
+	} else {							\
+		(x) = 0; (err) = -EFAULT;				\
+	}								\
 })
 
 #define __get_user_error(x, ptr, err)					\
 ({									\
-	__get_user_err((x), (ptr), (err));				\
+	__get_user_check((x), (ptr), (err));				\
 	(void)0;							\
 })
 
+#define __get_user(x, ptr)						\
+({									\
+	int __gu_err = 0;						\
+	__get_user_check((x), (ptr), __gu_err);				\
+	__gu_err;							\
+})
+
 #define __get_user_unaligned __get_user
 
-#define get_user(x, ptr)						\
-({									\
-	__typeof__(*(ptr)) __user *__p = (ptr);				\
-	might_fault();							\
-	access_ok(VERIFY_READ, __p, sizeof(*__p)) ?			\
-		__get_user((x), __p) :					\
-		((x) = 0, -EFAULT);					\
-})
+#define get_user	__get_user
 
 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)	\
 	asm volatile(							\
@@ -344,7 +384,7 @@
 			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	case 8:								\
-		__put_user_asm("str", "sttr", "%", __pu_val, (ptr),	\
+		__put_user_asm("str", "sttr", "%x", __pu_val, (ptr),	\
 			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	default:							\
@@ -353,47 +393,51 @@
 	uaccess_disable_not_uao();					\
 } while (0)
 
-#define __put_user(x, ptr)						\
+#define __put_user_check(x, ptr, err)					\
 ({									\
-	int __pu_err = 0;						\
-	__put_user_err((x), (ptr), __pu_err);				\
-	__pu_err;							\
+	__typeof__(*(ptr)) __user *__p = (ptr);				\
+	might_fault();							\
+	if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) {		\
+		__p = uaccess_mask_ptr(__p);				\
+		__put_user_err((x), __p, (err));			\
+	} else	{							\
+		(err) = -EFAULT;					\
+	}								\
 })
 
 #define __put_user_error(x, ptr, err)					\
 ({									\
-	__put_user_err((x), (ptr), (err));				\
+	__put_user_check((x), (ptr), (err));				\
 	(void)0;							\
 })
 
+#define __put_user(x, ptr)						\
+({									\
+	int __pu_err = 0;						\
+	__put_user_check((x), (ptr), __pu_err);				\
+	__pu_err;							\
+})
+
 #define __put_user_unaligned __put_user
 
-#define put_user(x, ptr)						\
-({									\
-	__typeof__(*(ptr)) __user *__p = (ptr);				\
-	might_fault();							\
-	access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ?			\
-		__put_user((x), __p) :					\
-		-EFAULT;						\
-})
+#define put_user	__put_user
 
 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
 
 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	kasan_check_write(to, n);
 	check_object_size(to, n, false);
-	return __arch_copy_from_user(to, from, n);
+	return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);
 }
 
 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	kasan_check_read(from, n);
 	check_object_size(from, n, true);
-	return __arch_copy_to_user(to, from, n);
+	return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);
 }
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
@@ -421,22 +465,25 @@
 	return n;
 }
 
-static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
 	if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
-		n = __copy_in_user(to, from, n);
+		n = __arch_copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n);
 	return n;
 }
+#define copy_in_user __copy_in_user
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
+static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
 {
 	if (access_ok(VERIFY_WRITE, to, n))
-		n = __clear_user(to, n);
+		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
 	return n;
 }
+#define clear_user	__clear_user
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3051f86..702de7a 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -195,6 +195,12 @@
 #define KVM_REG_ARM_TIMER_CNT		ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL		ARM64_SYS_REG(3, 3, 14, 0, 2)
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+					 KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index dd918d0..ca1cf2d 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -38,8 +38,8 @@
 	/* user mem (segment) */
 EXPORT_SYMBOL(__arch_copy_from_user);
 EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_in_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_copy_in_user);
 
 	/* physical memory */
 EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
index dec95bd..746a203 100644
--- a/arch/arm64/kernel/bpi.S
+++ b/arch/arm64/kernel/bpi.S
@@ -17,6 +17,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/arm-smccc.h>
 
 .macro ventry target
 	.rept 31
@@ -53,6 +54,7 @@
 	vectors __kvm_hyp_vector
 	.endr
 ENTRY(__bp_harden_hyp_vecs_end)
+
 ENTRY(__psci_hyp_bp_inval_start)
 	sub	sp, sp, #(8 * 18)
 	stp	x16, x17, [sp, #(16 * 0)]
@@ -77,3 +79,23 @@
 	ldp	x0, x1, [sp, #(16 * 8)]
 	add	sp, sp, #(8 * 18)
 ENTRY(__psci_hyp_bp_inval_end)
+
+.macro smccc_workaround_1 inst
+	sub	sp, sp, #(8 * 4)
+	stp	x2, x3, [sp, #(8 * 0)]
+	stp	x0, x1, [sp, #(8 * 2)]
+	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_1
+	\inst	#0
+	ldp	x2, x3, [sp, #(8 * 0)]
+	ldp	x0, x1, [sp, #(8 * 2)]
+	add	sp, sp, #(8 * 4)
+.endm
+
+ENTRY(__smccc_workaround_1_smc_start)
+	smccc_workaround_1	smc
+ENTRY(__smccc_workaround_1_smc_end)
+
+ENTRY(__smccc_workaround_1_hvc_start)
+	smccc_workaround_1	hvc
+ENTRY(__smccc_workaround_1_hvc_end)
+.#endif
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 65f42d2..f736a6f 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -16,7 +16,7 @@
 #include <asm/virt.h>
 
 .text
-.pushsection    .idmap.text, "ax"
+.pushsection    .idmap.text, "awx"
 
 /*
  * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 7c8e185..e7908c9 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -54,11 +54,15 @@
 
 #ifdef CONFIG_KVM
 extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+extern char __smccc_workaround_1_smc_start[];
+extern char __smccc_workaround_1_smc_end[];
+extern char __smccc_workaround_1_hvc_start[];
+extern char __smccc_workaround_1_hvc_end[];
 
 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
 				const char *hyp_vecs_end)
 {
-	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+	void *dst = __bp_harden_hyp_vecs_start + slot * SZ_2K;
 	int i;
 
 	for (i = 0; i < SZ_2K; i += 0x80)
@@ -98,6 +102,10 @@
 #else
 #define __psci_hyp_bp_inval_start	NULL
 #define __psci_hyp_bp_inval_end		NULL
+#define __smccc_workaround_1_smc_start		NULL
+#define __smccc_workaround_1_smc_end		NULL
+#define __smccc_workaround_1_hvc_start		NULL
+#define __smccc_workaround_1_hvc_end		NULL
 
 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 				      const char *hyp_vecs_start,
@@ -124,8 +132,11 @@
 	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
 }
 
+#include <uapi/linux/psci.h>
+#include <linux/arm-smccc.h>
 #include <linux/psci.h>
 
+#ifdef CONFIG_PSCI_BP_HARDENING
 static int enable_psci_bp_hardening(void *data)
 {
 	const struct arm64_cpu_capabilities *entry = data;
@@ -135,6 +146,59 @@
 				       (bp_hardening_cb_t)psci_ops.get_version,
 				       __psci_hyp_bp_inval_start,
 				       __psci_hyp_bp_inval_end);
+	return 0;
+}
+#endif
+
+static void call_smc_arch_workaround_1(void)
+{
+	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void call_hvc_arch_workaround_1(void)
+{
+	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static int enable_smccc_arch_workaround_1(void *data)
+{
+	const struct arm64_cpu_capabilities *entry = data;
+	bp_hardening_cb_t cb;
+	void *smccc_start, *smccc_end;
+	struct arm_smccc_res res;
+
+	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+		return 0;
+
+	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+		return 0;
+
+	switch (psci_ops.conduit) {
+	case PSCI_CONDUIT_HVC:
+		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+		if ((int)res.a0 < 0)
+			return 0;
+		cb = call_hvc_arch_workaround_1;
+		smccc_start = __smccc_workaround_1_hvc_start;
+		smccc_end = __smccc_workaround_1_hvc_end;
+		break;
+
+	case PSCI_CONDUIT_SMC:
+		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+		if ((int)res.a0 < 0)
+			return 0;
+		cb = call_smc_arch_workaround_1;
+		smccc_start = __smccc_workaround_1_smc_start;
+		smccc_end = __smccc_workaround_1_smc_end;
+		break;
+
+	default:
+		return 0;
+	}
+
+	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
 
 	return 0;
 }
@@ -238,32 +302,50 @@
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
-		.enable = enable_psci_bp_hardening,
+		.enable = enable_smccc_arch_workaround_1,
 	},
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
-		.enable = enable_psci_bp_hardening,
+		.enable = enable_smccc_arch_workaround_1,
 	},
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
-		.enable = enable_psci_bp_hardening,
+		.enable = enable_smccc_arch_workaround_1,
 	},
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
-		.enable = enable_psci_bp_hardening,
+		.enable = enable_smccc_arch_workaround_1,
 	},
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_KRYO3G),
+#ifdef CONFIG_PSCI_BP_HARDENING
 		.enable = enable_psci_bp_hardening,
+#else
+		.enable = enable_smccc_arch_workaround_1,
+#endif
 	},
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_KRYO2XX_GOLD),
+#ifdef CONFIG_PSCI_BP_HARDENING
 		.enable = enable_psci_bp_hardening,
+#else
+		.enable = enable_smccc_arch_workaround_1,
+#endif
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+		.enable = enable_smccc_arch_workaround_1,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+		.enable = enable_smccc_arch_workaround_1,
 	},
 #endif
 	{
@@ -279,15 +361,18 @@
 {
 	const struct arm64_cpu_capabilities *caps = arm64_errata;
 
-	for (; caps->matches; caps++)
-		if (!cpus_have_cap(caps->capability) &&
-			caps->matches(caps, SCOPE_LOCAL_CPU)) {
+	for (; caps->matches; caps++) {
+		if (cpus_have_cap(caps->capability)) {
+			if (caps->enable)
+				caps->enable((void *)caps);
+		} else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
 			pr_crit("CPU%d: Requires work around for %s, not detected"
 					" at boot time\n",
 				smp_processor_id(),
 				caps->desc ? : "an erratum");
 			cpu_die_early();
 		}
+	}
 }
 
 void update_cpu_errata_workarounds(void)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 5cf4c64..675bf45 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -95,12 +95,13 @@
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
-	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
+	ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+	ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
+	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 24, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
 	S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
 	S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
-	ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
 	/* Linux doesn't care about the EL3 */
 	ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
@@ -753,12 +754,23 @@
 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 				int __unused)
 {
+	char const *str = "command line option";
 	u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
 
-	/* Forced on command line? */
+	/*
+	 * For reasons that aren't entirely clear, enabling KPTI on Cavium
+	 * ThunderX leads to apparent I-cache corruption of kernel text, which
+	 * ends as well as you might imagine. Don't even try.
+	 */
+	if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+		str = "ARM64_WORKAROUND_CAVIUM_27456";
+		__kpti_forced = -1;
+	}
+
+	/* Forced? */
 	if (__kpti_forced) {
-		pr_info_once("kernel page table isolation forced %s by command line option\n",
-			     __kpti_forced > 0 ? "ON" : "OFF");
+		pr_info_once("kernel page table isolation forced %s by %s\n",
+			     __kpti_forced > 0 ? "ON" : "OFF", str);
 		return __kpti_forced > 0;
 	}
 
@@ -766,11 +778,42 @@
 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
 		return true;
 
+	/* Don't force KPTI for CPUs that are not vulnerable */
+	switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
+	case MIDR_CAVIUM_THUNDERX2:
+	case MIDR_BRCM_VULCAN:
+		return false;
+	}
+
 	/* Defer to CPU feature registers */
 	return !cpuid_feature_extract_unsigned_field(pfr0,
 						     ID_AA64PFR0_CSV3_SHIFT);
 }
 
+static int __nocfi kpti_install_ng_mappings(void *__unused)
+{
+	typedef void (kpti_remap_fn)(int, int, phys_addr_t);
+	extern kpti_remap_fn idmap_kpti_install_ng_mappings;
+	kpti_remap_fn *remap_fn;
+
+	static bool kpti_applied = false;
+	int cpu = smp_processor_id();
+
+	if (kpti_applied)
+		return 0;
+
+	remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+
+	cpu_install_idmap();
+	remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
+	cpu_uninstall_idmap();
+
+	if (!cpu)
+		kpti_applied = true;
+
+	return 0;
+}
+
 static int __init parse_kpti(char *str)
 {
 	bool enabled;
@@ -874,6 +917,7 @@
 		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
 		.def_scope = SCOPE_SYSTEM,
 		.matches = unmap_kernel_at_el0,
+		.enable = kpti_install_ng_mappings,
 	},
 #endif
 	{},
@@ -969,6 +1013,25 @@
 			cap_set_elf_hwcap(hwcaps);
 }
 
+/*
+ * Check if the current CPU has a given feature capability.
+ * Should be called from non-preemptible context.
+ */
+static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
+			       unsigned int cap)
+{
+	const struct arm64_cpu_capabilities *caps;
+
+	if (WARN_ON(preemptible()))
+		return false;
+
+	for (caps = cap_array; caps->matches; caps++)
+		if (caps->capability == cap &&
+		    caps->matches(caps, SCOPE_LOCAL_CPU))
+			return true;
+	return false;
+}
+
 void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 			    const char *info)
 {
@@ -1037,8 +1100,9 @@
 }
 
 static void
-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
+verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
 {
+	const struct arm64_cpu_capabilities *caps = caps_list;
 	for (; caps->matches; caps++) {
 		if (!cpus_have_cap(caps->capability))
 			continue;
@@ -1046,7 +1110,7 @@
 		 * If the new CPU misses an advertised feature, we cannot proceed
 		 * further, park the cpu.
 		 */
-		if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
+		if (!__this_cpu_has_cap(caps_list, caps->capability)) {
 			pr_crit("CPU%d: missing feature: %s\n",
 					smp_processor_id(), caps->desc);
 			cpu_die_early();
@@ -1099,22 +1163,12 @@
 	enable_cpu_capabilities(arm64_features);
 }
 
-/*
- * Check if the current CPU has a given feature capability.
- * Should be called from non-preemptible context.
- */
+extern const struct arm64_cpu_capabilities arm64_errata[];
+
 bool this_cpu_has_cap(unsigned int cap)
 {
-	const struct arm64_cpu_capabilities *caps;
-
-	if (WARN_ON(preemptible()))
-		return false;
-
-	for (caps = arm64_features; caps->desc; caps++)
-		if (caps->capability == cap && caps->matches)
-			return caps->matches(caps, SCOPE_LOCAL_CPU);
-
-	return false;
+	return (__this_cpu_has_cap(arm64_features, cap) ||
+		__this_cpu_has_cap(arm64_errata, cap));
 }
 
 void __init setup_cpu_features(void)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f8ba35d..7613ed1 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -30,11 +30,13 @@
 #include <asm/irq.h>
 #include <asm/memory.h>
 #include <asm/mmu.h>
+#include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/uaccess.h>
 #include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
+#include <asm/kernel-pgtable.h>
 
 /*
  * Context tracking subsystem.  Used to instrument transitions
@@ -125,10 +127,10 @@
 	.else
 	add	x21, sp, #S_FRAME_SIZE
 	get_thread_info tsk
-	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+	/* Save the task's original addr_limit and set USER_DS */
 	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
-	mov	x20, #TASK_SIZE_64
+	mov	x20, #USER_DS
 	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
 	.endif /* \el == 0 */
@@ -668,8 +670,7 @@
 	 * Instruction abort handling
 	 */
 	mrs	x26, far_el1
-	// enable interrupts before calling the main handler
-	enable_dbg_and_irq
+	msr     daifclr, #(8 | 4 | 1)
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_off
 #endif
@@ -704,8 +705,10 @@
 	 * Stack or PC alignment exception handling
 	 */
 	mrs	x26, far_el1
-	// enable interrupts before calling the main handler
-	enable_dbg_and_irq
+	enable_dbg
+#ifdef CONFIG_TRACE_IRQFLAGS
+	bl	trace_hardirqs_off
+#endif
 	ct_user_exit
 	mov	x0, x26
 	mov	x1, x25
@@ -764,6 +767,11 @@
 #endif
 
 	ct_user_exit
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+	tbz	x22, #55, 1f
+	bl	do_el0_irq_bp_hardening
+1:
+#endif
 	irq_handler
 
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -876,6 +884,7 @@
 	b.ne	__sys_trace
 	cmp     scno, sc_nr                     // check upper syscall limit
 	b.hs	ni_sys
+	mask_nospec64 scno, sc_nr, x19	// enforce bounds for syscall number
 	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
 	blr	x16				// call sys_* routine
 	b	ret_fast_syscall
@@ -953,16 +962,9 @@
 	orr	\tmp, \tmp, #USER_ASID_FLAG
 	msr	ttbr1_el1, \tmp
 	/*
-	 * We avoid running the post_ttbr_update_workaround here because the
-	 * user and kernel ASIDs don't have conflicting mappings, so any
-	 * "blessing" as described in:
-	 *
-	 *   http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
-	 *
-	 * will not hurt correctness. Whilst this may partially defeat the
-	 * point of using split ASIDs in the first place, it avoids
-	 * the hit of invalidating the entire I-cache on every return to
-	 * userspace.
+	 * We avoid running the post_ttbr_update_workaround here because
+	 * it's only needed by Cavium ThunderX, which requires KPTI to be
+	 * disabled.
 	 */
 	.endm
 
@@ -972,6 +974,11 @@
 	.if	\regsize == 64
 	msr	tpidrro_el0, x30	// Restored in kernel_ventry
 	.endif
+	/*
+	 * Defend against branch aliasing attacks by pushing a dummy
+	 * entry onto the return stack and using a RET instruction to
+	 * enter the full-fat kernel vectors.
+	 */
 	bl	2f
 	b	.
 2:
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c186586..d479185 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -474,7 +474,7 @@
  * end early head section, begin head code that is also used for
  * hotplug and needs to have the same protections as the text region
  */
-	.section ".idmap.text","ax"
+	.section ".idmap.text","awx"
 
 ENTRY(kimage_vaddr)
 	.quad		_text - TEXT_OFFSET
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index c9a2ab4..f035ff6 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -32,11 +32,16 @@
 
 void *module_alloc(unsigned long size)
 {
+	gfp_t gfp_mask = GFP_KERNEL;
 	void *p;
 
+	/* Silence the initial allocation */
+	if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+		gfp_mask |= __GFP_NOWARN;
+
 	p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
 				module_alloc_base + MODULES_VSIZE,
-				GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+				gfp_mask, PAGE_KERNEL_EXEC, 0,
 				NUMA_NO_NODE, __builtin_return_address(0));
 
 	if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 409abc4..1b3eb67 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -175,8 +175,10 @@
 		return NULL;
 
 	root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
-	if (!root_ops)
+	if (!root_ops) {
+		kfree(ri);
 		return NULL;
+	}
 
 	ri->cfg = pci_acpi_setup_ecam_mapping(root);
 	if (!ri->cfg) {
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 52710f1..e0b731e 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -869,15 +869,23 @@
 {
 	unsigned long config_base = 0;
 
-	if (is_kernel_in_hyp_mode() &&
-	    attr->exclude_kernel != attr->exclude_hv)
-		return -EINVAL;
+	/*
+	 * If we're running in hyp mode, then we *are* the hypervisor.
+	 * Therefore we ignore exclude_hv in this configuration, since
+	 * there's no hypervisor to sample anyway. This is consistent
+	 * with other architectures (x86 and Power).
+	 */
+	if (is_kernel_in_hyp_mode()) {
+		if (!attr->exclude_kernel)
+			config_base |= ARMV8_PMU_INCLUDE_EL2;
+	} else {
+		if (attr->exclude_kernel)
+			config_base |= ARMV8_PMU_EXCLUDE_EL1;
+		if (!attr->exclude_hv)
+			config_base |= ARMV8_PMU_INCLUDE_EL2;
+	}
 	if (attr->exclude_user)
 		config_base |= ARMV8_PMU_EXCLUDE_EL0;
-	if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
-		config_base |= ARMV8_PMU_EXCLUDE_EL1;
-	if (!attr->exclude_hv)
-		config_base |= ARMV8_PMU_INCLUDE_EL2;
 
 	/*
 	 * Install the filter into config_base as this is used to
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index ee0ea17..08ca9dc 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -213,7 +213,7 @@
 		for (j = 0; j < 8; j++) {
 			u32	data;
 			if (probe_kernel_address(p, data)) {
-				printk(" ********");
+				pr_cont(" ********");
 			} else {
 				pr_cont(" %08x", data);
 			}
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index df67652..5a4fbcc 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -95,7 +95,7 @@
 	ret
 ENDPROC(__cpu_suspend_enter)
 
-	.pushsection ".idmap.text", "ax"
+	.pushsection ".idmap.text", "awx"
 ENTRY(cpu_resume)
 	bl	el2_setup		// if in EL2 drop to EL1 cleanly
 	bl	__cpu_setup
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 623dd48..69d3266 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -602,16 +602,6 @@
 void (*__smp_cross_call)(const struct cpumask *, unsigned int);
 DEFINE_PER_CPU(bool, pending_ipi);
 
-void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func)
-{
-	unsigned int cpu;
-
-	for_each_cpu(cpu, cpumask)
-		per_cpu(pending_ipi, cpu) = true;
-
-	__smp_cross_call(cpumask, func);
-}
-
 /*
  * Enumerate the possible CPU set from the device tree and build the
  * cpu logical map array containing MPIDR values related to logical
@@ -779,6 +769,17 @@
 	__smp_cross_call(target, ipinr);
 }
 
+static void smp_cross_call_common(const struct cpumask *cpumask,
+				  unsigned int func)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, cpumask)
+		per_cpu(pending_ipi, cpu) = true;
+
+	smp_cross_call(cpumask, func);
+}
+
 void show_ipi_list(struct seq_file *p, int prec)
 {
 	unsigned int cpu, i;
@@ -825,7 +826,8 @@
 void arch_irq_work_raise(void)
 {
 	if (__smp_cross_call)
-		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+		smp_cross_call_common(cpumask_of(smp_processor_id()),
+				      IPI_IRQ_WORK);
 }
 #endif
 
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index f7ce3d2..4fa6d84 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -73,6 +73,11 @@
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	if (tsk->ret_stack &&
 			(frame->pc == (unsigned long)return_to_handler)) {
+		if (WARN_ON_ONCE(frame->graph == -1))
+			return -EINVAL;
+		if (frame->graph < -1)
+			frame->graph += FTRACE_NOTRACE_DEPTH;
+
 		/*
 		 * This is a case where function graph tracer has
 		 * modified a return address (LR) in a stack frame
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 5977969..5d9076e 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -53,7 +53,7 @@
 	frame.sp = regs->sp;
 	frame.pc = regs->pc;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-	frame.graph = -1; /* no task info */
+	frame.graph = current->curr_ret_stack;
 #endif
 	do {
 		int ret = unwind_frame(NULL, &frame);
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index c39872a..2bd7426 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -66,7 +66,16 @@
 	.macro	get_clock_shifted_nsec res, cycle_last, mult
 	/* Read the virtual counter. */
 	isb
+#if IS_ENABLED(CONFIG_MSM_TIMER_LEAP)
+#define LEAST_32BITS	0x00000000FFFFFFFF
+9999:
+	mrs x_tmp, cntvct_el0
+	and \res, x_tmp, #LEAST_32BITS
+	eor \res, \res, #LEAST_32BITS
+	cbz \res, 9999b
+#else
 	mrs	x_tmp, cntvct_el0
+#endif
 	/* Calculate cycle delta and convert to ns. */
 	sub	\res, x_tmp, \cycle_last
 	/* We can only guarantee 56 bits of precision. */
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 3f9e157..d3e0a2f 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
+#include <kvm/arm_psci.h>
 #include <asm/cputype.h>
 #include <asm/uaccess.h>
 #include <asm/kvm.h>
@@ -205,7 +206,7 @@
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
 {
 	return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
-                + NUM_TIMER_REGS;
+		+ kvm_arm_get_fw_num_regs(vcpu)	+ NUM_TIMER_REGS;
 }
 
 /**
@@ -225,6 +226,11 @@
 		uindices++;
 	}
 
+	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
+	if (ret)
+		return ret;
+	uindices += kvm_arm_get_fw_num_regs(vcpu);
+
 	ret = copy_timer_indices(vcpu, uindices);
 	if (ret)
 		return ret;
@@ -243,6 +249,9 @@
 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
 		return get_core_reg(vcpu, reg);
 
+	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+		return kvm_arm_get_fw_reg(vcpu, reg);
+
 	if (is_timer_reg(reg->id))
 		return get_timer_reg(vcpu, reg);
 
@@ -259,6 +268,9 @@
 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
 		return set_core_reg(vcpu, reg);
 
+	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+		return kvm_arm_set_fw_reg(vcpu, reg);
+
 	if (is_timer_reg(reg->id))
 		return set_timer_reg(vcpu, reg);
 
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 2e6e9e9..efe43c5 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -22,12 +22,15 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 
+#include <kvm/arm_psci.h>
+
 #include <asm/esr.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_coproc.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_mmu.h>
-#include <asm/kvm_psci.h>
+#include <asm/debug-monitors.h>
+#include <asm/traps.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -42,7 +45,7 @@
 			    kvm_vcpu_hvc_get_imm(vcpu));
 	vcpu->stat.hvc_exit_stat++;
 
-	ret = kvm_psci_call(vcpu);
+	ret = kvm_hvc_call_handler(vcpu);
 	if (ret < 0) {
 		vcpu_set_reg(vcpu, 0, ~0UL);
 		return 1;
@@ -53,7 +56,16 @@
 
 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
+	/*
+	 * "If an SMC instruction executed at Non-secure EL1 is
+	 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
+	 * Trap exception, not a Secure Monitor Call exception [...]"
+	 *
+	 * We need to advance the PC after the trap, as it would
+	 * otherwise return to the same address...
+	 */
 	vcpu_set_reg(vcpu, 0, ~0UL);
+	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 	return 1;
 }
 
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 4e92399..4e9d50c 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -15,6 +15,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/linkage.h>
 
 #include <asm/alternative.h>
@@ -79,10 +80,11 @@
 	lsr	x0, x1, #ESR_ELx_EC_SHIFT
 
 	cmp	x0, #ESR_ELx_EC_HVC64
+	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
 	b.ne	el1_trap
 
-	mrs	x1, vttbr_el2		// If vttbr is valid, the 64bit guest
-	cbnz	x1, el1_trap		// called HVC
+	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
+	cbnz	x1, el1_hvc_guest	// called HVC
 
 	/* Here, we're pretty sure the host called HVC. */
 	ldp	x0, x1, [sp], #16
@@ -101,6 +103,20 @@
 
 2:	eret
 
+el1_hvc_guest:
+	/*
+	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
+	 * The workaround has already been applied on the host,
+	 * so let's quickly get back to the guest. We don't bother
+	 * restoring x1, as it can be clobbered anyway.
+	 */
+	ldr	x1, [sp]				// Guest's x0
+	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+	cbnz	w1, el1_trap
+	mov	x0, x1
+	add	sp, sp, #16
+	eret
+
 el1_trap:
 	/*
 	 * x0: ESR_EC
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 3eab6ac..849ee8a 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -19,6 +19,8 @@
 #include <linux/jump_label.h>
 #include <uapi/linux/psci.h>
 
+#include <kvm/arm_psci.h>
+
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
@@ -311,14 +313,16 @@
 
 	if (exit_code == ARM_EXCEPTION_TRAP &&
 	    (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
-	     kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
-	    vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
-		u64 val = PSCI_RET_NOT_SUPPORTED;
-		if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
-			val = 2;
+	     kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32)) {
+		u32 val = vcpu_get_reg(vcpu, 0);
 
-		vcpu_set_reg(vcpu, 0, val);
-		goto again;
+		if (val == PSCI_0_2_FN_PSCI_VERSION) {
+			val = kvm_psci_version(vcpu, kern_hyp_va(vcpu->kvm));
+			if (unlikely(val == KVM_ARM_PSCI_0_1))
+				val = PSCI_RET_NOT_SUPPORTED;
+			vcpu_set_reg(vcpu, 0, val);
+			goto again;
+		}
 	}
 
 	if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
@@ -417,6 +421,7 @@
 
 		vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
 		host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+		__timer_save_state(vcpu);
 		__deactivate_traps(vcpu);
 		__deactivate_vm(vcpu);
 		__sysreg_restore_host_state(host_ctxt);
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 07c7ad9..b581e16 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -21,7 +21,7 @@
 
 	.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
@@ -29,7 +29,7 @@
  *
  * Alignment fixed up by hardware.
  */
-ENTRY(__clear_user)
+ENTRY(__arch_clear_user)
 	uaccess_enable_not_uao x2, x3, x4
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
@@ -52,7 +52,7 @@
 5:	mov	x0, #0
 	uaccess_disable_not_uao x2, x3
 	ret
-ENDPROC(__clear_user)
+ENDPROC(__arch_clear_user)
 
 	.section .fixup,"ax"
 	.align	2
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index e8bfaf1..800779e 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -64,14 +64,14 @@
 	.endm
 
 end	.req	x5
-ENTRY(__copy_in_user)
+ENTRY(__arch_copy_in_user)
 	uaccess_enable_not_uao x3, x4, x5
 	add	end, x0, x2
 #include "copy_template.S"
 	uaccess_disable_not_uao x3, x4
 	mov	x0, #0
 	ret
-ENDPROC(__copy_in_user)
+ENDPROC(__arch_copy_in_user)
 
 	.section .fixup,"ax"
 	.align	2
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2b8950e..c1d02d1 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -253,14 +253,12 @@
 #define VM_FAULT_BADMAP		0x010000
 #define VM_FAULT_BADACCESS	0x020000
 
-static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
+static int __do_page_fault(struct vm_area_struct *vma, unsigned long addr,
 			   unsigned int mm_flags, unsigned long vm_flags,
 			   struct task_struct *tsk)
 {
-	struct vm_area_struct *vma;
 	int fault;
 
-	vma = find_vma(mm, addr);
 	fault = VM_FAULT_BADMAP;
 	if (unlikely(!vma))
 		goto out;
@@ -318,6 +316,7 @@
 	int fault, sig, code;
 	unsigned long vm_flags = VM_READ | VM_WRITE;
 	unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+	struct vm_area_struct *vma = NULL;
 
 	if (notify_page_fault(regs, esr))
 		return 0;
@@ -343,7 +342,7 @@
 		mm_flags |= FAULT_FLAG_WRITE;
 	}
 
-	if (addr < USER_DS && is_permission_fault(esr, regs)) {
+	if (addr < TASK_SIZE && is_permission_fault(esr, regs)) {
 		/* regs->orig_addr_limit may be 0 if we entered from EL0 */
 		if (regs->orig_addr_limit == KERNEL_DS)
 			die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
@@ -356,6 +355,14 @@
 	}
 
 	/*
+	 * let's try a speculative page fault without grabbing the
+	 * mmap_sem.
+	 */
+	fault = handle_speculative_fault(mm, addr, mm_flags, &vma);
+	if (fault != VM_FAULT_RETRY)
+		goto done;
+
+	/*
 	 * As per x86, we may deadlock here. However, since the kernel only
 	 * validly references user space from well defined areas of the code,
 	 * we can bug out early if this is from code which shouldn't.
@@ -377,19 +384,44 @@
 #endif
 	}
 
-	fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
+	if (!vma || !can_reuse_spf_vma(vma, addr))
+		vma = find_vma(mm, addr);
 
-	/*
-	 * If we need to retry but a fatal signal is pending, handle the
-	 * signal first. We do not need to release the mmap_sem because it
-	 * would already be released in __lock_page_or_retry in mm/filemap.c.
-	 */
-	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
-		if (!user_mode(regs))
-			goto no_context;
-		return 0;
+	fault = __do_page_fault(vma, addr, mm_flags, vm_flags, tsk);
+	if (fault & VM_FAULT_RETRY) {
+		/*
+		 * If we need to retry but a fatal signal is pending, handle the
+		 * signal first. We do not need to release the mmap_sem because
+		 * it would already be released in __lock_page_or_retry in
+		 * mm/filemap.c.
+		 */
+
+		if (fatal_signal_pending(current)) {
+			if (!user_mode(regs))
+				goto no_context;
+			return 0;
+		}
+
+		/*
+		 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
+		 * starvation.
+		 */
+		if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
+			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			mm_flags |= FAULT_FLAG_TRIED;
+
+			/*
+			 * Do not try to reuse this vma and fetch it
+			 * again since we will release the mmap_sem.
+			 */
+			vma = NULL;
+			goto retry;
+		}
 	}
 
+	up_read(&mm->mmap_sem);
+done:
+
 	/*
 	 * Major/minor page fault accounting is only done on the initial
 	 * attempt. If we go through a retry, it is extremely likely that the
@@ -407,19 +439,8 @@
 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
 				      addr);
 		}
-		if (fault & VM_FAULT_RETRY) {
-			/*
-			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
-			 * starvation.
-			 */
-			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
-			mm_flags |= FAULT_FLAG_TRIED;
-			goto retry;
-		}
 	}
 
-	up_read(&mm->mmap_sem);
-
 	/*
 	 * Handle the "normal" case first - VM_FAULT_MAJOR
 	 */
@@ -618,6 +639,12 @@
 	arm64_notify_die("", regs, &info, esr);
 }
 
+asmlinkage void __exception do_el0_irq_bp_hardening(void)
+{
+	/* PC has already been checked in entry.S */
+	arm64_apply_bp_hardening();
+}
+
 asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
 						   unsigned int esr,
 						   struct pt_regs *regs)
@@ -644,6 +671,12 @@
 	struct siginfo info;
 	struct task_struct *tsk = current;
 
+	if (user_mode(regs)) {
+		if (instruction_pointer(regs) > TASK_SIZE)
+			arm64_apply_bp_hardening();
+		local_irq_enable();
+	}
+
 	if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
 		pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
 				    tsk->comm, task_pid_nr(tsk),
@@ -703,6 +736,9 @@
 	if (interrupts_enabled(regs))
 		trace_hardirqs_off();
 
+	if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
+		arm64_apply_bp_hardening();
+
 	if (!inf->fn(addr, esr, regs)) {
 		rv = 1;
 	} else {
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 01c1717..caf75ab 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -18,6 +18,7 @@
 
 #include <linux/elf.h>
 #include <linux/fs.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
 #include <linux/export.h>
@@ -102,12 +103,18 @@
  */
 int valid_phys_addr_range(phys_addr_t addr, size_t size)
 {
-	if (addr < PHYS_OFFSET)
-		return 0;
-	if (addr + size > __pa(high_memory - 1) + 1)
-		return 0;
-
-	return 1;
+	/*
+	 * Check whether addr is covered by a memory region without the
+	 * MEMBLOCK_NOMAP attribute, and whether that region covers the
+	 * entire range. In theory, this could lead to false negatives
+	 * if the range is covered by distinct but adjacent memory regions
+	 * that only differ in other attributes. However, few of such
+	 * attributes have been defined, and it is debatable whether it
+	 * follows that /dev/mem read() calls should be able traverse
+	 * such boundaries.
+	 */
+	return memblock_is_region_memory(addr, size) &&
+	       memblock_is_map_memory(addr);
 }
 
 /*
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e7bf3c3..6e989eb 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -466,7 +466,7 @@
 {
 	extern char __entry_tramp_text_start[];
 
-	pgprot_t prot = PAGE_KERNEL_EXEC;
+	pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
 	phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
 
 	/* The trampoline is always mapped and can therefore be global */
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 2e69a14..3a9af60 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -132,7 +132,7 @@
  *
  * x0: Address of context pointer
  */
-	.pushsection ".idmap.text", "ax"
+	.pushsection ".idmap.text", "awx"
 ENTRY(cpu_do_resume)
 	ldp	x2, x3, [x0]
 	ldp	x4, x5, [x0, #16]
@@ -197,7 +197,17 @@
 	b	post_ttbr_update_workaround	// Back to C code...
 ENDPROC(cpu_do_switch_mm)
 
-	.pushsection ".idmap.text", "ax"
+	.pushsection ".idmap.text", "awx"
+
+.macro	__idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
+	adrp	\tmp1, empty_zero_page
+	msr	ttbr1_el1, \tmp1
+	isb
+	tlbi	vmalle1
+	dsb	nsh
+	isb
+.endm
+
 /*
  * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
  *
@@ -208,13 +218,7 @@
 	mrs	x2, daif
 	msr	daifset, #0xf
 
-	adrp	x1, empty_zero_page
-	msr	ttbr1_el1, x1
-	isb
-
-	tlbi	vmalle1
-	dsb	nsh
-	isb
+	__idmap_cpu_set_reserved_ttbr1 x1, x3
 
 	msr	ttbr1_el1, x0
 	isb
@@ -225,13 +229,196 @@
 ENDPROC(idmap_cpu_replace_ttbr1)
 	.popsection
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	.pushsection ".idmap.text", "awx"
+
+	.macro	__idmap_kpti_get_pgtable_ent, type
+	dc	cvac, cur_\()\type\()p		// Ensure any existing dirty
+	dmb	sy				// lines are written back before
+	ldr	\type, [cur_\()\type\()p]	// loading the entry
+	tbz	\type, #0, next_\()\type	// Skip invalid entries
+	.endm
+
+	.macro __idmap_kpti_put_pgtable_ent_ng, type
+	orr	\type, \type, #PTE_NG		// Same bit for blocks and pages
+	str	\type, [cur_\()\type\()p]	// Update the entry and ensure it
+	dc	civac, cur_\()\type\()p		// is visible to all CPUs.
+	.endm
+
+/*
+ * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
+ *
+ * Called exactly once from stop_machine context by each CPU found during boot.
+ */
+__idmap_kpti_flag:
+	.long	1
+ENTRY(idmap_kpti_install_ng_mappings)
+	cpu		.req	w0
+	num_cpus	.req	w1
+	swapper_pa	.req	x2
+	swapper_ttb	.req	x3
+	flag_ptr	.req	x4
+	cur_pgdp	.req	x5
+	end_pgdp	.req	x6
+	pgd		.req	x7
+	cur_pudp	.req	x8
+	end_pudp	.req	x9
+	pud		.req	x10
+	cur_pmdp	.req	x11
+	end_pmdp	.req	x12
+	pmd		.req	x13
+	cur_ptep	.req	x14
+	end_ptep	.req	x15
+	pte		.req	x16
+
+	mrs	swapper_ttb, ttbr1_el1
+	adr	flag_ptr, __idmap_kpti_flag
+
+	cbnz	cpu, __idmap_kpti_secondary
+
+	/* We're the boot CPU. Wait for the others to catch up */
+	sevl
+1:	wfe
+	ldaxr	w18, [flag_ptr]
+	eor	w18, w18, num_cpus
+	cbnz	w18, 1b
+
+	/* We need to walk swapper, so turn off the MMU. */
+	mrs	x18, sctlr_el1
+	bic	x18, x18, #SCTLR_ELx_M
+	msr	sctlr_el1, x18
+	isb
+
+	/* Everybody is enjoying the idmap, so we can rewrite swapper. */
+	/* PGD */
+	mov	cur_pgdp, swapper_pa
+	add	end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
+do_pgd:	__idmap_kpti_get_pgtable_ent	pgd
+	tbnz	pgd, #1, walk_puds
+	__idmap_kpti_put_pgtable_ent_ng	pgd
+next_pgd:
+	add	cur_pgdp, cur_pgdp, #8
+	cmp	cur_pgdp, end_pgdp
+	b.ne	do_pgd
+
+	/* Publish the updated tables and nuke all the TLBs */
+	dsb	sy
+	tlbi	vmalle1is
+	dsb	ish
+	isb
+
+	/* We're done: fire up the MMU again */
+	mrs	x18, sctlr_el1
+	orr	x18, x18, #SCTLR_ELx_M
+	msr	sctlr_el1, x18
+	isb
+
+	/* Set the flag to zero to indicate that we're all done */
+	str	wzr, [flag_ptr]
+	ret
+
+	/* PUD */
+walk_puds:
+	.if CONFIG_PGTABLE_LEVELS > 3
+	pte_to_phys	cur_pudp, pgd
+	add	end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
+do_pud:	__idmap_kpti_get_pgtable_ent	pud
+	tbnz	pud, #1, walk_pmds
+	__idmap_kpti_put_pgtable_ent_ng	pud
+next_pud:
+	add	cur_pudp, cur_pudp, 8
+	cmp	cur_pudp, end_pudp
+	b.ne	do_pud
+	b	next_pgd
+	.else /* CONFIG_PGTABLE_LEVELS <= 3 */
+	mov	pud, pgd
+	b	walk_pmds
+next_pud:
+	b	next_pgd
+	.endif
+
+	/* PMD */
+walk_pmds:
+	.if CONFIG_PGTABLE_LEVELS > 2
+	pte_to_phys	cur_pmdp, pud
+	add	end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
+do_pmd:	__idmap_kpti_get_pgtable_ent	pmd
+	tbnz	pmd, #1, walk_ptes
+	__idmap_kpti_put_pgtable_ent_ng	pmd
+next_pmd:
+	add	cur_pmdp, cur_pmdp, #8
+	cmp	cur_pmdp, end_pmdp
+	b.ne	do_pmd
+	b	next_pud
+	.else /* CONFIG_PGTABLE_LEVELS <= 2 */
+	mov	pmd, pud
+	b	walk_ptes
+next_pmd:
+	b	next_pud
+	.endif
+
+	/* PTE */
+walk_ptes:
+	pte_to_phys	cur_ptep, pmd
+	add	end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
+do_pte:	__idmap_kpti_get_pgtable_ent	pte
+	__idmap_kpti_put_pgtable_ent_ng	pte
+next_pte:
+	add	cur_ptep, cur_ptep, #8
+	cmp	cur_ptep, end_ptep
+	b.ne	do_pte
+	b	next_pmd
+
+	/* Secondary CPUs end up here */
+__idmap_kpti_secondary:
+	/* Uninstall swapper before surgery begins */
+	__idmap_cpu_set_reserved_ttbr1 x18, x17
+
+	/* Increment the flag to let the boot CPU we're ready */
+1:	ldxr	w18, [flag_ptr]
+	add	w18, w18, #1
+	stxr	w17, w18, [flag_ptr]
+	cbnz	w17, 1b
+
+	/* Wait for the boot CPU to finish messing around with swapper */
+	sevl
+1:	wfe
+	ldxr	w18, [flag_ptr]
+	cbnz	w18, 1b
+
+	/* All done, act like nothing happened */
+	msr	ttbr1_el1, swapper_ttb
+	isb
+	ret
+
+	.unreq	cpu
+	.unreq	num_cpus
+	.unreq	swapper_pa
+	.unreq	swapper_ttb
+	.unreq	flag_ptr
+	.unreq	cur_pgdp
+	.unreq	end_pgdp
+	.unreq	pgd
+	.unreq	cur_pudp
+	.unreq	end_pudp
+	.unreq	pud
+	.unreq	cur_pmdp
+	.unreq	end_pmdp
+	.unreq	pmd
+	.unreq	cur_ptep
+	.unreq	end_ptep
+	.unreq	pte
+ENDPROC(idmap_kpti_install_ng_mappings)
+	.popsection
+#endif
+
 /*
  *	__cpu_setup
  *
  *	Initialise the processor for turning the MMU on.  Return in x0 the
  *	value of the SCTLR_EL1 register.
  */
-	.pushsection ".idmap.text", "ax"
+	.pushsection ".idmap.text", "awx"
 ENTRY(__cpu_setup)
 	tlbi	vmalle1				// Invalidate local TLB
 	dsb	nsh
diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h
index 4bea27f..2702bd8 100644
--- a/arch/frv/include/asm/futex.h
+++ b/arch/frv/include/asm/futex.h
@@ -7,7 +7,8 @@
 #include <asm/errno.h>
 #include <asm/uaccess.h>
 
-extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
+extern int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr);
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h
index a89bdde..139093f 100644
--- a/arch/frv/include/asm/timex.h
+++ b/arch/frv/include/asm/timex.h
@@ -16,5 +16,11 @@
 #define vxtime_lock()		do {} while (0)
 #define vxtime_unlock()		do {} while (0)
 
+/* This attribute is used in include/linux/jiffies.h alongside with
+ * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
+ * for frv does not contain another section specification.
+ */
+#define __jiffy_arch_data	__attribute__((__section__(".data")))
+
 #endif
 
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index d155ca9..37f7b2b 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -186,20 +186,10 @@
 /*
  * do the futex operations
  */
-int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 	pagefault_disable();
 
 	switch (op) {
@@ -225,18 +215,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS; break;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
 
 	return ret;
 
-} /* end futex_atomic_op_inuser() */
+} /* end arch_futex_atomic_op_inuser() */
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index 7e597f8..c607b77 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -31,18 +31,9 @@
 
 
 static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -72,30 +63,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ:
-			ret = (oldval == cmparg);
-			break;
-		case FUTEX_OP_CMP_NE:
-			ret = (oldval != cmparg);
-			break;
-		case FUTEX_OP_CMP_LT:
-			ret = (oldval < cmparg);
-			break;
-		case FUTEX_OP_CMP_GE:
-			ret = (oldval >= cmparg);
-			break;
-		case FUTEX_OP_CMP_LE:
-			ret = (oldval <= cmparg);
-			break;
-		case FUTEX_OP_CMP_GT:
-			ret = (oldval > cmparg);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index 76acbcd..6d67dc1 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -45,18 +45,9 @@
 } while (0)
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -84,17 +75,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 5ed0ea9..f851c9d 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -142,7 +142,7 @@
 	u64 virt_addr=simple_strtoull(buf, NULL, 16);
 	int ret;
 
-	ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
+	ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
 	if (ret<=0) {
 #ifdef ERR_INJ_DEBUG
 		printk("Virtual address %lx is not existing.\n",virt_addr);
diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
index a0fc0c1..3e8be0f 100644
--- a/arch/m68k/coldfire/device.c
+++ b/arch/m68k/coldfire/device.c
@@ -135,7 +135,11 @@
 	.id			= 0,
 	.num_resources		= ARRAY_SIZE(mcf_fec0_resources),
 	.resource		= mcf_fec0_resources,
-	.dev.platform_data	= FEC_PDATA,
+	.dev = {
+		.dma_mask		= &mcf_fec0.dev.coherent_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+		.platform_data		= FEC_PDATA,
+	}
 };
 
 #ifdef MCFFEC_BASE1
@@ -167,7 +171,11 @@
 	.id			= 1,
 	.num_resources		= ARRAY_SIZE(mcf_fec1_resources),
 	.resource		= mcf_fec1_resources,
-	.dev.platform_data	= FEC_PDATA,
+	.dev = {
+		.dma_mask		= &mcf_fec1.dev.coherent_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+		.platform_data		= FEC_PDATA,
+	}
 };
 #endif /* MCFFEC_BASE1 */
 #endif /* CONFIG_FEC */
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 01848f0..a9dad9e 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -29,18 +29,9 @@
 })
 
 static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -66,30 +57,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ:
-			ret = (oldval == cmparg);
-			break;
-		case FUTEX_OP_CMP_NE:
-			ret = (oldval != cmparg);
-			break;
-		case FUTEX_OP_CMP_LT:
-			ret = (oldval < cmparg);
-			break;
-		case FUTEX_OP_CMP_GE:
-			ret = (oldval >= cmparg);
-			break;
-		case FUTEX_OP_CMP_LE:
-			ret = (oldval <= cmparg);
-			break;
-		case FUTEX_OP_CMP_GT:
-			ret = (oldval > cmparg);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2d2fd79..34fbbf8 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -95,6 +95,7 @@
 	select PCI_DRIVERS_GENERIC
 	select PINCTRL
 	select SMP_UP if SMP
+	select SWAP_IO_SPACE
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_HAS_CPU_MIPS32_R2
 	select SYS_HAS_CPU_MIPS32_R6
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 6ed1ded..6420c83 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2271,7 +2271,7 @@
 
 	parent_irq = irq_of_parse_and_map(ciu_node, 0);
 	if (!parent_irq) {
-		pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
+		pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
 			ciu_node->name);
 		return -EINVAL;
 	}
@@ -2283,7 +2283,7 @@
 
 	addr = of_get_address(ciu_node, 0, NULL, NULL);
 	if (!addr) {
-		pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
+		pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
 		return -EINVAL;
 	}
 	host_data->raw_reg = (u64)phys_to_virt(
@@ -2291,7 +2291,7 @@
 
 	addr = of_get_address(ciu_node, 1, NULL, NULL);
 	if (!addr) {
-		pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
+		pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
 		return -EINVAL;
 	}
 	host_data->en_reg = (u64)phys_to_virt(
@@ -2299,7 +2299,7 @@
 
 	r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
 	if (r) {
-		pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
+		pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
 			ciu_node->name);
 		return r;
 	}
@@ -2309,7 +2309,7 @@
 					   &octeon_irq_domain_cib_ops,
 					   host_data);
 	if (!cib_domain) {
-		pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
+		pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
 		return -ENOMEM;
 	}
 
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 1de190b..a9e61ea 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -83,18 +83,9 @@
 }
 
 static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -125,17 +116,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index daba1f9..174aedc 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -40,7 +40,8 @@
 
 #define flush_insn_slot(p)						\
 do {									\
-	flush_icache_range((unsigned long)p->addr,			\
+	if (p->addr)							\
+		flush_icache_range((unsigned long)p->addr,		\
 			   (unsigned long)p->addr +			\
 			   (MAX_INSN_SIZE * sizeof(kprobe_opcode_t)));	\
 } while (0)
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index aa3800c..d99ca86 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -167,7 +167,7 @@
 #define AR71XX_AHB_DIV_MASK		0x7
 
 #define AR724X_PLL_REG_CPU_CONFIG	0x00
-#define AR724X_PLL_REG_PCIE_CONFIG	0x18
+#define AR724X_PLL_REG_PCIE_CONFIG	0x10
 
 #define AR724X_PLL_FB_SHIFT		0
 #define AR724X_PLL_FB_MASK		0x3ff
diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h
index 6b444cd..db930cd 100644
--- a/arch/mips/include/asm/machine.h
+++ b/arch/mips/include/asm/machine.h
@@ -52,7 +52,7 @@
 	if (!mach->matches)
 		return NULL;
 
-	for (match = mach->matches; match->compatible; match++) {
+	for (match = mach->matches; match->compatible[0]; match++) {
 		if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0)
 			return match;
 	}
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index d21f3da..c0be540 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -18,6 +18,10 @@
 
 #include <asm-generic/pgtable-nopmd.h>
 
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+#endif
+
 extern int temp_tlb_entry;
 
 /*
@@ -61,7 +65,8 @@
 
 #define VMALLOC_START	  MAP_BASE
 
-#define PKMAP_BASE		(0xfe000000UL)
+#define PKMAP_END	((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
+#define PKMAP_BASE	(PKMAP_END - PAGE_SIZE * LAST_PKMAP)
 
 #ifdef CONFIG_HIGHMEM
 # define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 89fa5c0b..c92f4c2 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -1257,6 +1257,13 @@
 {
 	__kernel_size_t res;
 
+#ifdef CONFIG_CPU_MICROMIPS
+/* micromips memset / bzero also clobbers t7 & t8 */
+#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
+#else
+#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
+#endif /* CONFIG_CPU_MICROMIPS */
+
 	if (eva_kernel_access()) {
 		__asm__ __volatile__(
 			"move\t$4, %1\n\t"
@@ -1266,7 +1273,7 @@
 			"move\t%0, $6"
 			: "=r" (res)
 			: "r" (addr), "r" (size)
-			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
+			: bzero_clobbers);
 	} else {
 		might_fault();
 		__asm__ __volatile__(
@@ -1277,7 +1284,7 @@
 			"move\t%0, $6"
 			: "=r" (res)
 			: "r" (addr), "r" (size)
-			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
+			: bzero_clobbers);
 	}
 
 	return res;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0c8ae2c..8f7bf74 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -483,7 +483,7 @@
 /*
  * Copy the floating-point context to the supplied NT_PRFPREG buffer.
  * Choose the appropriate helper for general registers, and then copy
- * the FCSR register separately.
+ * the FCSR and FIR registers separately.
  */
 static int fpr_get(struct task_struct *target,
 		   const struct user_regset *regset,
@@ -491,6 +491,7 @@
 		   void *kbuf, void __user *ubuf)
 {
 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+	const int fir_pos = fcr31_pos + sizeof(u32);
 	int err;
 
 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
@@ -503,6 +504,12 @@
 	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 				  &target->thread.fpu.fcr31,
 				  fcr31_pos, fcr31_pos + sizeof(u32));
+	if (err)
+		return err;
+
+	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				  &boot_cpu_data.fpu_id,
+				  fir_pos, fir_pos + sizeof(u32));
 
 	return err;
 }
@@ -551,7 +558,8 @@
 /*
  * Copy the supplied NT_PRFPREG buffer to the floating-point context.
  * Choose the appropriate helper for general registers, and then copy
- * the FCSR register separately.
+ * the FCSR register separately.  Ignore the incoming FIR register
+ * contents though, as the register is read-only.
  *
  * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
  * which is supposed to have been guaranteed by the kernel before
@@ -565,6 +573,7 @@
 		   const void *kbuf, const void __user *ubuf)
 {
 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+	const int fir_pos = fcr31_pos + sizeof(u32);
 	u32 fcr31;
 	int err;
 
@@ -592,6 +601,11 @@
 		ptrace_setfcr31(target, fcr31);
 	}
 
+	if (count > 0)
+		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+						fir_pos,
+						fir_pos + sizeof(u32));
+
 	return err;
 }
 
@@ -813,7 +827,7 @@
 			fregs = get_fpu_regs(child);
 
 #ifdef CONFIG_32BIT
-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
+			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 				/*
 				 * The odd registers are actually the high
 				 * order bits of the values stored in the even
@@ -902,7 +916,7 @@
 
 			init_fp_ctx(child);
 #ifdef CONFIG_32BIT
-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
+			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 				/*
 				 * The odd registers are actually the high
 				 * order bits of the values stored in the even
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 5fcbdcd..bc9afba 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -97,7 +97,7 @@
 				break;
 			}
 			fregs = get_fpu_regs(child);
-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
+			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 				/*
 				 * The odd registers are actually the high
 				 * order bits of the values stored in the even
@@ -204,7 +204,7 @@
 				       sizeof(child->thread.fpu));
 				child->thread.fpu.fcr31 = 0;
 			}
-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
+			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 				/*
 				 * The odd registers are actually the high
 				 * order bits of the values stored in the even
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 29ec9ab..a2c46f5 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -42,7 +42,7 @@
 	{ "cache",	  VCPU_STAT(cache_exits),	 KVM_STAT_VCPU },
 	{ "signal",	  VCPU_STAT(signal_exits),	 KVM_STAT_VCPU },
 	{ "interrupt",	  VCPU_STAT(int_exits),		 KVM_STAT_VCPU },
-	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
+	{ "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
 	{ "tlbmod",	  VCPU_STAT(tlbmod_exits),	 KVM_STAT_VCPU },
 	{ "tlbmiss_ld",	  VCPU_STAT(tlbmiss_ld_exits),	 KVM_STAT_VCPU },
 	{ "tlbmiss_st",	  VCPU_STAT(tlbmiss_st_exits),	 KVM_STAT_VCPU },
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 18a1ccd..2b1bf93 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -218,7 +218,7 @@
 1:	PTR_ADDIU	a0, 1			/* fill bytewise */
 	R10KCBARRIER(0(ra))
 	bne		t1, a0, 1b
-	sb		a1, -1(a0)
+	 EX(sb, a1, -1(a0), .Lsmall_fixup\@)
 
 2:	jr		ra			/* done */
 	move		a2, zero
@@ -251,13 +251,18 @@
 	PTR_L		t0, TI_TASK($28)
 	andi		a2, STORMASK
 	LONG_L		t0, THREAD_BUADDR(t0)
-	LONG_ADDU	a2, t1
+	LONG_ADDU	a2, a0
 	jr		ra
 	LONG_SUBU	a2, t0
 
 .Llast_fixup\@:
 	jr		ra
-	andi		v1, a2, STORMASK
+	 nop
+
+.Lsmall_fixup\@:
+	PTR_SUBU	a2, t1, a0
+	jr		ra
+	 PTR_ADDIU	a2, 1
 
 	.endm
 
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 9d0107f..43fa682 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -851,9 +851,12 @@
 	/*
 	 * Either no secondary cache or the available caches don't have the
 	 * subset property so we have to flush the primary caches
-	 * explicitly
+	 * explicitly.
+	 * If we would need IPI to perform an INDEX-type operation, then
+	 * we have to use the HIT-type alternative as IPI cannot be used
+	 * here due to interrupts possibly being disabled.
 	 */
-	if (size >= dcache_size) {
+	if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
 		r4k_blast_dcache();
 	} else {
 		R4600_HIT_CACHEOP_WAR_IMPL;
@@ -890,7 +893,7 @@
 		return;
 	}
 
-	if (size >= dcache_size) {
+	if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
 		r4k_blast_dcache();
 	} else {
 		R4600_HIT_CACHEOP_WAR_IMPL;
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911..b19a3c5 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -51,15 +51,15 @@
 	/*
 	 * Fixed mappings:
 	 */
-	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
-	fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+	fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
 
 #ifdef CONFIG_HIGHMEM
 	/*
 	 * Permanent kmaps:
 	 */
 	vaddr = PKMAP_BASE;
-	fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+	fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
 
 	pgd = swapper_pg_dir + __pgd_offset(vaddr);
 	pud = pud_offset(pgd, vaddr);
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index 8b93730..fd26fad 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -186,7 +186,7 @@
 
 #define RBTX4939_MAX_7SEGLEDS	8
 
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS)
 static u8 led_val[RBTX4939_MAX_7SEGLEDS];
 struct rbtx4939_led_data {
 	struct led_classdev cdev;
@@ -261,7 +261,7 @@
 
 static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
 {
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS)
 	unsigned long flags;
 	local_irq_save(flags);
 	/* bit7: reserved for LED class */
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index ac8bd58..06a1a88 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -32,22 +32,12 @@
 }
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
 	unsigned long int flags;
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval, ret;
 	u32 tmp;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
-		return -EFAULT;
-
 	_futex_spin_lock_irqsave(uaddr, &flags);
 	pagefault_disable();
 
@@ -85,17 +75,9 @@
 	pagefault_enable();
 	_futex_spin_unlock_irqrestore(uaddr, &flags);
 
-	if (ret == 0) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 700e2d2..2e68ca1 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -648,6 +648,10 @@
 					(modpath->mod == PCI_FUNC(devfn)));
 	}
 
+	/* index might be out of bounds for bc[] */
+	if (index >= 6)
+		return 0;
+
 	id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
 	return (modpath->bc[index] == id);
 }
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 9d47f2e..bb69f39 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -92,7 +92,8 @@
 libfdt       := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
 libfdtheader := fdt.h libfdt.h libfdt_internal.h
 
-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
+$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
+	treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
 	$(addprefix $(obj)/,$(libfdtheader))
 
 src-wlib-y := string.S crt0.S crtsavres.S stdio.c decompress.c main.c \
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index c0deafc..798ab37 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -34,7 +34,8 @@
 #define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
 #define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
 
-#ifdef __SUBARCH_HAS_LWSYNC
+/* The sub-arch has lwsync */
+#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
 #    define SMPWMB      LWSYNC
 #else
 #    define SMPWMB      eieio
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 2a9cf84..f4c7467f 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -31,18 +31,10 @@
 	: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
 	: "cr0", "memory")
 
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -68,17 +60,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
index 744fd54..1bcc849 100644
--- a/arch/powerpc/include/asm/irq_work.h
+++ b/arch/powerpc/include/asm/irq_work.h
@@ -5,5 +5,6 @@
 {
 	return true;
 }
+extern void arch_irq_work_raise(void);
 
 #endif /* _ASM_POWERPC_IRQ_WORK_H */
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index cd4ffd8..bb9073a 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -14,6 +14,10 @@
 #include <asm-generic/module.h>
 
 
+#ifdef CC_USING_MPROFILE_KERNEL
+#define MODULE_ARCH_VERMAGIC	"mprofile-kernel"
+#endif
+
 #ifndef __powerpc64__
 /*
  * Thanks to Paul M for explaining this.
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index e958b70..9e5e0d9 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -21,6 +21,9 @@
 /* We calculate number of sg entries based on PAGE_SIZE */
 #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
 
+/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
+#define OPAL_BUSY_DELAY_MS	10
+
 /* /sys/firmware/opal */
 extern struct kobject *opal_kobj;
 
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 56398e7..71c6988 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -132,7 +132,19 @@
 #define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
 #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * On hash the vmalloc and other regions alias to the kernel region when passed
+ * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
+ * return true for some vmalloc addresses, which is incorrect. So explicitly
+ * check that the address is in the kernel region.
+ */
+#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
+				pfn_valid(virt_to_pfn(kaddr)))
+#else
 #define virt_addr_valid(kaddr)	pfn_valid(virt_to_pfn(kaddr))
+#endif
 
 /*
  * On Book-E parts we need __va to parse the device tree and we can't
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index 78efe8d..30f2d6d 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -5,10 +5,6 @@
 #include <linux/stringify.h>
 #include <asm/feature-fixups.h>
 
-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
-#define __SUBARCH_HAS_LWSYNC
-#endif
-
 #ifndef __ASSEMBLY__
 extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
 extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 6ef8f0b..27843665 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -207,18 +207,18 @@
 
 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 		return NULL;
+
+	device_lock(&dev->dev);
 	dev->error_state = pci_channel_io_frozen;
 
 	driver = eeh_pcid_get(dev);
-	if (!driver) return NULL;
+	if (!driver) goto out_no_dev;
 
 	eeh_disable_irq(dev);
 
 	if (!driver->err_handler ||
-	    !driver->err_handler->error_detected) {
-		eeh_pcid_put(dev);
-		return NULL;
-	}
+	    !driver->err_handler->error_detected)
+		goto out;
 
 	rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
 
@@ -227,7 +227,10 @@
 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
 
 	edev->in_error = true;
+out:
 	eeh_pcid_put(dev);
+out_no_dev:
+	device_unlock(&dev->dev);
 	return NULL;
 }
 
@@ -250,15 +253,14 @@
 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 		return NULL;
 
+	device_lock(&dev->dev);
 	driver = eeh_pcid_get(dev);
-	if (!driver) return NULL;
+	if (!driver) goto out_no_dev;
 
 	if (!driver->err_handler ||
 	    !driver->err_handler->mmio_enabled ||
-	    (edev->mode & EEH_DEV_NO_HANDLER)) {
-		eeh_pcid_put(dev);
-		return NULL;
-	}
+	    (edev->mode & EEH_DEV_NO_HANDLER))
+		goto out;
 
 	rc = driver->err_handler->mmio_enabled(dev);
 
@@ -266,7 +268,10 @@
 	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
 
+out:
 	eeh_pcid_put(dev);
+out_no_dev:
+	device_unlock(&dev->dev);
 	return NULL;
 }
 
@@ -289,20 +294,20 @@
 
 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 		return NULL;
+
+	device_lock(&dev->dev);
 	dev->error_state = pci_channel_io_normal;
 
 	driver = eeh_pcid_get(dev);
-	if (!driver) return NULL;
+	if (!driver) goto out_no_dev;
 
 	eeh_enable_irq(dev);
 
 	if (!driver->err_handler ||
 	    !driver->err_handler->slot_reset ||
 	    (edev->mode & EEH_DEV_NO_HANDLER) ||
-	    (!edev->in_error)) {
-		eeh_pcid_put(dev);
-		return NULL;
-	}
+	    (!edev->in_error))
+		goto out;
 
 	rc = driver->err_handler->slot_reset(dev);
 	if ((*res == PCI_ERS_RESULT_NONE) ||
@@ -310,7 +315,10 @@
 	if (*res == PCI_ERS_RESULT_DISCONNECT &&
 	     rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
 
+out:
 	eeh_pcid_put(dev);
+out_no_dev:
+	device_unlock(&dev->dev);
 	return NULL;
 }
 
@@ -361,10 +369,12 @@
 
 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 		return NULL;
+
+	device_lock(&dev->dev);
 	dev->error_state = pci_channel_io_normal;
 
 	driver = eeh_pcid_get(dev);
-	if (!driver) return NULL;
+	if (!driver) goto out_no_dev;
 
 	was_in_error = edev->in_error;
 	edev->in_error = false;
@@ -374,13 +384,15 @@
 	    !driver->err_handler->resume ||
 	    (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
 		edev->mode &= ~EEH_DEV_NO_HANDLER;
-		eeh_pcid_put(dev);
-		return NULL;
+		goto out;
 	}
 
 	driver->err_handler->resume(dev);
 
+out:
 	eeh_pcid_put(dev);
+out_no_dev:
+	device_unlock(&dev->dev);
 	return NULL;
 }
 
@@ -400,22 +412,25 @@
 
 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 		return NULL;
+
+	device_lock(&dev->dev);
 	dev->error_state = pci_channel_io_perm_failure;
 
 	driver = eeh_pcid_get(dev);
-	if (!driver) return NULL;
+	if (!driver) goto out_no_dev;
 
 	eeh_disable_irq(dev);
 
 	if (!driver->err_handler ||
-	    !driver->err_handler->error_detected) {
-		eeh_pcid_put(dev);
-		return NULL;
-	}
+	    !driver->err_handler->error_detected)
+		goto out;
 
 	driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
 
+out:
 	eeh_pcid_put(dev);
+out_no_dev:
+	device_unlock(&dev->dev);
 	return NULL;
 }
 
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index de7d091..1abd8dd 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -795,7 +795,8 @@
 	eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
 
 	/* PCI Command: 0x4 */
-	eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
+	eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
+			      PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
 
 	/* Check the PCIe link is ready */
 	eeh_bridge_check_link(edev);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 7614d1d..94b5dfb 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -723,7 +723,7 @@
 	ld	r3, PACA_EXSLB+EX_DAR(r13)
 	std	r3, _DAR(r1)
 	beq	cr6, 2f
-	li	r10, 0x480		/* fix trap number for I-SLB miss */
+	li	r10, 0x481		/* fix trap number for I-SLB miss */
 	std	r10, _TRAP(r1)
 2:	bl	save_nvgprs
 	addi	r3, r1, STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 028a22b..ad713f7 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -372,6 +372,14 @@
 	 */
 	WARN_ON(!arch_irqs_disabled());
 
+	/*
+	 * Interrupts must always be hard disabled before irq_happened is
+	 * modified (to prevent lost update in case of interrupt between
+	 * load and store).
+	 */
+	__hard_irq_disable();
+	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
 	/* Indicate in the PACA that we have an interrupt to replay */
 	local_paca->irq_happened |= PACA_IRQ_EE;
 }
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index f516ac5..bf0f712 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -228,14 +228,6 @@
 	unsigned short maj;
 	unsigned short min;
 
-	/* We only show online cpus: disable preempt (overzealous, I
-	 * knew) to prevent cpu going down. */
-	preempt_disable();
-	if (!cpu_online(cpu_id)) {
-		preempt_enable();
-		return 0;
-	}
-
 #ifdef CONFIG_SMP
 	pvr = per_cpu(cpu_pvr, cpu_id);
 #else
@@ -340,9 +332,6 @@
 #ifdef CONFIG_SMP
 	seq_printf(m, "\n");
 #endif
-
-	preempt_enable();
-
 	/* If this is the last cpu, print the summary */
 	if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
 		show_cpuinfo_summary(m);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index f1d7e99..ab7b661 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -719,12 +719,20 @@
 static void start_cpu_decrementer(void)
 {
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+	unsigned int tcr;
+
 	/* Clear any pending timer interrupts */
 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
 
-	/* Enable decrementer interrupt */
-	mtspr(SPRN_TCR, TCR_DIE);
-#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
+	tcr = mfspr(SPRN_TCR);
+	/*
+	 * The watchdog may have already been enabled by u-boot. So leave
+	 * TRC[WP] (Watchdog Period) alone.
+	 */
+	tcr &= TCR_WP_MASK;	/* Clear all bits except for TCR[WP] */
+	tcr |= TCR_DIE;		/* Enable decrementer */
+	mtspr(SPRN_TCR, tcr);
+#endif
 }
 
 void __init generic_calibrate_decr(void)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 218cba2..0a2b247 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3107,15 +3107,17 @@
 		goto up_out;
 
 	psize = vma_kernel_pagesize(vma);
-	porder = __ilog2(psize);
 
 	up_read(&current->mm->mmap_sem);
 
 	/* We can handle 4k, 64k or 16M pages in the VRMA */
-	err = -EINVAL;
-	if (!(psize == 0x1000 || psize == 0x10000 ||
-	      psize == 0x1000000))
-		goto out_srcu;
+	if (psize >= 0x1000000)
+		psize = 0x1000000;
+	else if (psize >= 0x10000)
+		psize = 0x10000;
+	else
+		psize = 0x1000;
+	porder = __ilog2(psize);
 
 	/* Update VRMASD field in the LPCR */
 	senc = slb_pgsize_encoding(psize);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 55fbc0c..79a180c 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -299,7 +299,6 @@
 	stw	r12, STACK_SLOT_TRAP(r1)
 	bl	kvmhv_commence_exit
 	nop
-	lwz	r12, STACK_SLOT_TRAP(r1)
 	b	kvmhv_switch_to_host
 
 /*
@@ -1023,6 +1022,7 @@
 
 secondary_too_late:
 	li	r12, 0
+	stw	r12, STACK_SLOT_TRAP(r1)
 	cmpdi	r4, 0
 	beq	11f
 	stw	r12, VCPU_TRAP(r4)
@@ -1266,12 +1266,12 @@
 	bl	kvmhv_accumulate_time
 #endif
 
+	stw	r12, STACK_SLOT_TRAP(r1)
 	mr 	r3, r12
 	/* Increment exit count, poke other threads to exit */
 	bl	kvmhv_commence_exit
 	nop
 	ld	r9, HSTATE_KVM_VCPU(r13)
-	lwz	r12, VCPU_TRAP(r9)
 
 	/* Stop others sending VCPU interrupts to this physical CPU */
 	li	r0, -1
@@ -1549,6 +1549,7 @@
 	 * POWER7/POWER8 guest -> host partition switch code.
 	 * We don't have to lock against tlbies but we do
 	 * have to coordinate the hardware threads.
+	 * Here STACK_SLOT_TRAP(r1) contains the trap number.
 	 */
 kvmhv_switch_to_host:
 	/* Secondary threads wait for primary to do partition switch */
@@ -1599,11 +1600,11 @@
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
 	/* If HMI, call kvmppc_realmode_hmi_handler() */
+	lwz	r12, STACK_SLOT_TRAP(r1)
 	cmpwi	r12, BOOK3S_INTERRUPT_HMI
 	bne	27f
 	bl	kvmppc_realmode_hmi_handler
 	nop
-	li	r12, BOOK3S_INTERRUPT_HMI
 	/*
 	 * At this point kvmppc_realmode_hmi_handler would have resync-ed
 	 * the TB. Hence it is not required to subtract guest timebase
@@ -1678,6 +1679,7 @@
 	li	r0, KVM_GUEST_MODE_NONE
 	stb	r0, HSTATE_IN_GUEST(r13)
 
+	lwz	r12, STACK_SLOT_TRAP(r1)	/* return trap # in r12 */
 	ld	r0, SFS+PPC_LR_STKOFF(r1)
 	addi	r1, r1, SFS
 	mtlr	r0
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 02176fd..286a3b0 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -50,7 +50,9 @@
 	pteg_addr = get_pteg_addr(vcpu, pte_index);
 
 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
-	copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
+	ret = H_FUNCTION;
+	if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
+		goto done;
 	hpte = pteg;
 
 	ret = H_PTEG_FULL;
@@ -71,7 +73,9 @@
 	hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
 	hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
 	pteg_addr += i * HPTE_SIZE;
-	copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
+	ret = H_FUNCTION;
+	if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
+		goto done;
 	kvmppc_set_gpr(vcpu, 4, pte_index | i);
 	ret = H_SUCCESS;
 
@@ -93,7 +97,9 @@
 
 	pteg = get_pteg_addr(vcpu, pte_index);
 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
-	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+	ret = H_FUNCTION;
+	if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+		goto done;
 	pte[0] = be64_to_cpu((__force __be64)pte[0]);
 	pte[1] = be64_to_cpu((__force __be64)pte[1]);
 
@@ -103,7 +109,9 @@
 	    ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
 		goto done;
 
-	copy_to_user((void __user *)pteg, &v, sizeof(v));
+	ret = H_FUNCTION;
+	if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
+		goto done;
 
 	rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
@@ -171,7 +179,10 @@
 		}
 
 		pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
-		copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+		if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
+			ret = H_FUNCTION;
+			break;
+		}
 		pte[0] = be64_to_cpu((__force __be64)pte[0]);
 		pte[1] = be64_to_cpu((__force __be64)pte[1]);
 
@@ -184,7 +195,10 @@
 			tsh |= H_BULK_REMOVE_NOT_FOUND;
 		} else {
 			/* Splat the pteg in (userland) hpt */
-			copy_to_user((void __user *)pteg, &v, sizeof(v));
+			if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
+				ret = H_FUNCTION;
+				break;
+			}
 
 			rb = compute_tlbie_rb(pte[0], pte[1],
 					      tsh & H_BULK_REMOVE_PTEX);
@@ -211,7 +225,9 @@
 
 	pteg = get_pteg_addr(vcpu, pte_index);
 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
-	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+	ret = H_FUNCTION;
+	if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+		goto done;
 	pte[0] = be64_to_cpu((__force __be64)pte[0]);
 	pte[1] = be64_to_cpu((__force __be64)pte[1]);
 
@@ -234,7 +250,9 @@
 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 	pte[0] = (__force u64)cpu_to_be64(pte[0]);
 	pte[1] = (__force u64)cpu_to_be64(pte[1]);
-	copy_to_user((void __user *)pteg, pte, sizeof(pte));
+	ret = H_FUNCTION;
+	if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
+		goto done;
 	ret = H_SUCCESS;
 
  done:
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index e86bfa1..46c8338 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -55,7 +55,7 @@
 		unsigned int *target = (unsigned int *)branch_target(src);
 
 		/* Branch within the section doesn't need translating */
-		if (target < alt_start || target >= alt_end) {
+		if (target < alt_start || target > alt_end) {
 			instr = translate_branch(dest, src);
 			if (!instr)
 				return 1;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index a51c188..6cff96e 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -551,7 +551,7 @@
 	nid = of_node_to_nid_single(cpu);
 
 out_present:
-	if (nid < 0 || !node_online(nid))
+	if (nid < 0 || !node_possible(nid))
 		nid = first_online_node;
 
 	map_cpu_to_node(lcpu, nid);
@@ -904,6 +904,32 @@
 	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
 }
 
+static void __init find_possible_nodes(void)
+{
+	struct device_node *rtas;
+	u32 numnodes, i;
+
+	if (min_common_depth <= 0)
+		return;
+
+	rtas = of_find_node_by_path("/rtas");
+	if (!rtas)
+		return;
+
+	if (of_property_read_u32_index(rtas,
+				"ibm,max-associativity-domains",
+				min_common_depth, &numnodes))
+		goto out;
+
+	for (i = 0; i < numnodes; i++) {
+		if (!node_possible(i))
+			node_set(i, node_possible_map);
+	}
+
+out:
+	of_node_put(rtas);
+}
+
 void __init initmem_init(void)
 {
 	int nid, cpu;
@@ -917,12 +943,15 @@
 	memblock_dump_all();
 
 	/*
-	 * Reduce the possible NUMA nodes to the online NUMA nodes,
-	 * since we do not support node hotplug. This ensures that  we
-	 * lower the maximum NUMA node ID to what is actually present.
+	 * Modify the set of possible NUMA nodes to reflect information
+	 * available about the set of online nodes, and the set of nodes
+	 * that we expect to make use of for this platform's affinity
+	 * calculations.
 	 */
 	nodes_and(node_possible_map, node_possible_map, node_online_map);
 
+	find_possible_nodes();
+
 	for_each_online_node(nid) {
 		unsigned long start_pfn, end_pfn;
 
@@ -1274,6 +1303,40 @@
 	return rc;
 }
 
+static inline int find_and_online_cpu_nid(int cpu)
+{
+	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
+	int new_nid;
+
+	/* Use associativity from first thread for all siblings */
+	vphn_get_associativity(cpu, associativity);
+	new_nid = associativity_to_nid(associativity);
+	if (new_nid < 0 || !node_possible(new_nid))
+		new_nid = first_online_node;
+
+	if (NODE_DATA(new_nid) == NULL) {
+#ifdef CONFIG_MEMORY_HOTPLUG
+		/*
+		 * Need to ensure that NODE_DATA is initialized for a node from
+		 * available memory (see memblock_alloc_try_nid). If unable to
+		 * init the node, then default to nearest node that has memory
+		 * installed.
+		 */
+		if (try_online_node(new_nid))
+			new_nid = first_online_node;
+#else
+		/*
+		 * Default to using the nearest node that has memory installed.
+		 * Otherwise, it would be necessary to patch the kernel MM code
+		 * to deal with more memoryless-node error conditions.
+		 */
+		new_nid = first_online_node;
+#endif
+	}
+
+	return new_nid;
+}
+
 /*
  * Update the CPU maps and sysfs entries for a single CPU when its NUMA
  * characteristics change. This function doesn't perform any locking and is
@@ -1339,7 +1402,6 @@
 {
 	unsigned int cpu, sibling, changed = 0;
 	struct topology_update_data *updates, *ud;
-	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
 	cpumask_t updated_cpus;
 	struct device *dev;
 	int weight, new_nid, i = 0;
@@ -1374,11 +1436,7 @@
 			continue;
 		}
 
-		/* Use associativity from first thread for all siblings */
-		vphn_get_associativity(cpu, associativity);
-		new_nid = associativity_to_nid(associativity);
-		if (new_nid < 0 || !node_online(new_nid))
-			new_nid = first_online_node;
+		new_nid = find_and_online_cpu_nid(cpu);
 
 		if (new_nid == numa_cpu_lookup_table[cpu]) {
 			cpumask_andnot(&cpu_associativity_changes_mask,
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 7e706f3..9c58194 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -329,6 +329,9 @@
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
 			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
 			break;
+		case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
+			PPC_LWZ_OFFS(r_A, r_skb, K);
+			break;
 		case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
 			PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
 			break;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index bf94962..771edff 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -448,6 +448,16 @@
 				/* invalid entry */
 				continue;
 
+			/*
+			 * BHRB rolling buffer could very much contain the kernel
+			 * addresses at this point. Check the privileges before
+			 * exporting it to userspace (avoid exposure of regions
+			 * where we could have speculative execution)
+			 */
+			if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
+				is_kernel_addr(addr))
+				continue;
+
 			/* Branches are read most recent first (ie. mfbhrb 0 is
 			 * the most recent branch).
 			 * There are two types of valid entries:
@@ -1188,6 +1198,7 @@
 		 */
 		write_mmcr0(cpuhw, val);
 		mb();
+		isync();
 
 		/*
 		 * Disable instruction sampling if it was enabled
@@ -1196,12 +1207,26 @@
 			mtspr(SPRN_MMCRA,
 			      cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 			mb();
+			isync();
 		}
 
 		cpuhw->disabled = 1;
 		cpuhw->n_added = 0;
 
 		ebb_switch_out(mmcr0);
+
+#ifdef CONFIG_PPC64
+		/*
+		 * These are readable by userspace, may contain kernel
+		 * addresses and are not switched by context switch, so clear
+		 * them now to avoid leaking anything to userspace in general
+		 * including to another process.
+		 */
+		if (ppmu->flags & PPMU_ARCH_207S) {
+			mtspr(SPRN_SDAR, 0);
+			mtspr(SPRN_SIAR, 0);
+		}
+#endif
 	}
 
 	local_irq_restore(flags);
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 85c85eb..b4abf9d 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -175,6 +175,8 @@
 	skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
 	if (!dump_skip(cprm, skip))
 		goto Eio;
+
+	rc = 0;
 out:
 	free_page((unsigned long)buf);
 	return rc;
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index 9db4398..5584247 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -11,6 +11,7 @@
 
 #define DEBUG
 
+#include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/of.h>
@@ -43,6 +44,10 @@
 	return count;
 }
 
+/*
+ * This can be called in the panic path with interrupts off, so use
+ * mdelay in that case.
+ */
 static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
 {
 	s64 rc = OPAL_BUSY;
@@ -56,9 +61,23 @@
 
 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
 		rc = opal_write_nvram(__pa(buf), count, off);
-		if (rc == OPAL_BUSY_EVENT)
+		if (rc == OPAL_BUSY_EVENT) {
+			if (in_interrupt() || irqs_disabled())
+				mdelay(OPAL_BUSY_DELAY_MS);
+			else
+				msleep(OPAL_BUSY_DELAY_MS);
 			opal_poll_events(NULL);
+		} else if (rc == OPAL_BUSY) {
+			if (in_interrupt() || irqs_disabled())
+				mdelay(OPAL_BUSY_DELAY_MS);
+			else
+				msleep(OPAL_BUSY_DELAY_MS);
+		}
 	}
+
+	if (rc)
+		return -EIO;
+
 	*index += count;
 	return count;
 }
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index f886886..aa2a513 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -48,10 +48,12 @@
 
 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
 		rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
-		if (rc == OPAL_BUSY_EVENT)
+		if (rc == OPAL_BUSY_EVENT) {
+			mdelay(OPAL_BUSY_DELAY_MS);
 			opal_poll_events(NULL);
-		else if (rc == OPAL_BUSY)
-			mdelay(10);
+		} else if (rc == OPAL_BUSY) {
+			mdelay(OPAL_BUSY_DELAY_MS);
+		}
 	}
 	if (rc != OPAL_SUCCESS)
 		return 0;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 3e828b2..2842f9d 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -79,7 +79,7 @@
 	irq = in_be32(&siu_reg->sc_sivec) >> 26;
 
 	if (irq == PIC_VEC_SPURRIOUS)
-		irq = 0;
+		return 0;
 
         return irq_linear_revmap(mpc8xx_pic_host, irq);
 
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index b9aac95..f37567e 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -626,7 +626,7 @@
 	int i;
 	u32 mask = 0;
 
-	for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
+	for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
 		mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
 	return mask;
 }
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9aa0d04..1c4a595 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -118,6 +118,7 @@
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CPU_AUTOPROBE
 	select GENERIC_CPU_DEVICES if !SMP
+	select GENERIC_CPU_VULNERABILITIES
 	select GENERIC_FIND_FIRST_BIT
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_TIME_VSYSCALL
@@ -704,6 +705,51 @@
 
 	  If unsure, say Y.
 
+config KERNEL_NOBP
+	def_bool n
+	prompt "Enable modified branch prediction for the kernel by default"
+	help
+	  If this option is selected the kernel will switch to a modified
+	  branch prediction mode if the firmware interface is available.
+	  The modified branch prediction mode improves the behaviour in
+	  regard to speculative execution.
+
+	  With the option enabled the kernel parameter "nobp=0" or "nospec"
+	  can be used to run the kernel in the normal branch prediction mode.
+
+	  With the option disabled the modified branch prediction mode is
+	  enabled with the "nobp=1" kernel parameter.
+
+	  If unsure, say N.
+
+config EXPOLINE
+	def_bool n
+	prompt "Avoid speculative indirect branches in the kernel"
+	help
+	  Compile the kernel with the expoline compiler options to guard
+	  against kernel-to-user data leaks by avoiding speculative indirect
+	  branches.
+	  Requires a compiler with -mindirect-branch=thunk support for full
+	  protection. The kernel may run slower.
+
+	  If unsure, say N.
+
+choice
+	prompt "Expoline default"
+	depends on EXPOLINE
+	default EXPOLINE_FULL
+
+config EXPOLINE_OFF
+	bool "spectre_v2=off"
+
+config EXPOLINE_AUTO
+	bool "spectre_v2=auto"
+
+config EXPOLINE_FULL
+	bool "spectre_v2=on"
+
+endchoice
+
 endmenu
 
 menu "Power Management"
@@ -753,6 +799,7 @@
 config SHARED_KERNEL
 	bool "VM shared kernel support"
 	depends on !JUMP_LABEL
+	depends on !ALTERNATIVES
 	help
 	  Select this option, if you want to share the text segment of the
 	  Linux kernel between different VM guests. This reduces memory
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 54e0052..bef67c0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -79,6 +79,16 @@
 cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
 endif
 
+ifdef CONFIG_EXPOLINE
+  ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
+    CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
+    CC_FLAGS_EXPOLINE += -mfunction-return=thunk
+    CC_FLAGS_EXPOLINE += -mindirect-branch-table
+    export CC_FLAGS_EXPOLINE
+    cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+  endif
+endif
+
 ifdef CONFIG_FUNCTION_TRACER
 # make use of hotpatch feature if the compiler supports it
 cc_hotpatch	:= -mhotpatch=0,3
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
index 8013989..096affb 100644
--- a/arch/s390/crypto/crc32be-vx.S
+++ b/arch/s390/crypto/crc32be-vx.S
@@ -12,6 +12,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-insn.h>
 #include <asm/vx-insn.h>
 
 /* Vector register range containing CRC-32 constants */
@@ -66,6 +67,8 @@
 
 .previous
 
+	GEN_BR_THUNK %r14
+
 .text
 /*
  * The CRC-32 function(s) use these calling conventions:
@@ -202,6 +205,6 @@
 
 .Ldone:
 	VLGVF	%r2,%v2,3
-	br	%r14
+	BR_EX	%r14
 
 .previous
diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S
index 17f2504..8dc98c1 100644
--- a/arch/s390/crypto/crc32le-vx.S
+++ b/arch/s390/crypto/crc32le-vx.S
@@ -13,6 +13,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-insn.h>
 #include <asm/vx-insn.h>
 
 /* Vector register range containing CRC-32 constants */
@@ -75,6 +76,7 @@
 
 .previous
 
+	GEN_BR_THUNK %r14
 
 .text
 
@@ -263,6 +265,6 @@
 
 .Ldone:
 	VLGVF	%r2,%v2,2
-	br	%r14
+	BR_EX	%r14
 
 .previous
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 09bccb2..2a17123 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -318,7 +318,7 @@
 
 	if (sb->s_root)
 		hypfs_delete_tree(sb->s_root);
-	if (sb_info->update_file)
+	if (sb_info && sb_info->update_file)
 		hypfs_remove(sb_info->update_file);
 	kfree(sb->s_fs_info);
 	sb->s_fs_info = NULL;
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
new file mode 100644
index 0000000..955d620
--- /dev/null
+++ b/arch/s390/include/asm/alternative-asm.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_ALTERNATIVE_ASM_H
+#define _ASM_S390_ALTERNATIVE_ASM_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * Check the length of an instruction sequence. The length may not be larger
+ * than 254 bytes and it has to be divisible by 2.
+ */
+.macro alt_len_check start,end
+	.if ( \end - \start ) > 254
+	.error "cpu alternatives does not support instructions blocks > 254 bytes\n"
+	.endif
+	.if ( \end - \start ) % 2
+	.error "cpu alternatives instructions length is odd\n"
+	.endif
+.endm
+
+/*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+ * enough information for the alternatives patching code to patch an
+ * instruction. See apply_alternatives().
+ */
+.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
+	.long	\orig_start - .
+	.long	\alt_start - .
+	.word	\feature
+	.byte	\orig_end - \orig_start
+	.byte	\alt_end - \alt_start
+.endm
+
+/*
+ * Fill up @bytes with nops. The macro emits 6-byte nop instructions
+ * for the bulk of the area, possibly followed by a 4-byte and/or
+ * a 2-byte nop if the size of the area is not divisible by 6.
+ */
+.macro alt_pad_fill bytes
+	.fill	( \bytes ) / 6, 6, 0xc0040000
+	.fill	( \bytes ) % 6 / 4, 4, 0x47000000
+	.fill	( \bytes ) % 6 % 4 / 2, 2, 0x0700
+.endm
+
+/*
+ * Fill up @bytes with nops. If the number of bytes is larger
+ * than 6, emit a jg instruction to branch over all nops, then
+ * fill an area of size (@bytes - 6) with nop instructions.
+ */
+.macro alt_pad bytes
+	.if ( \bytes > 0 )
+	.if ( \bytes > 6 )
+	jg	. + \bytes
+	alt_pad_fill \bytes - 6
+	.else
+	alt_pad_fill \bytes
+	.endif
+	.endif
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr. ".skip" directive takes care of proper instruction padding
+ * in case @newinstr is longer than @oldinstr.
+ */
+.macro ALTERNATIVE oldinstr, newinstr, feature
+	.pushsection .altinstr_replacement,"ax"
+770:	\newinstr
+771:	.popsection
+772:	\oldinstr
+773:	alt_len_check 770b, 771b
+	alt_len_check 772b, 773b
+	alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
+774:	.pushsection .altinstructions,"a"
+	alt_entry 772b, 774b, 770b, 771b, \feature
+	.popsection
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr. ".skip" directive takes care of proper instruction padding
+ * in case @newinstr is longer than @oldinstr.
+ */
+.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
+	.pushsection .altinstr_replacement,"ax"
+770:	\newinstr1
+771:	\newinstr2
+772:	.popsection
+773:	\oldinstr
+774:	alt_len_check 770b, 771b
+	alt_len_check 771b, 772b
+	alt_len_check 773b, 774b
+	.if ( 771b - 770b > 772b - 771b )
+	alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
+	.else
+	alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
+	.endif
+775:	.pushsection .altinstructions,"a"
+	alt_entry 773b, 775b, 770b, 771b,\feature1
+	alt_entry 773b, 775b, 771b, 772b,\feature2
+	.popsection
+.endm
+
+#endif	/*  __ASSEMBLY__  */
+
+#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
new file mode 100644
index 0000000..a720020
--- /dev/null
+++ b/arch/s390/include/asm/alternative.h
@@ -0,0 +1,149 @@
+#ifndef _ASM_S390_ALTERNATIVE_H
+#define _ASM_S390_ALTERNATIVE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+	s32 instr_offset;	/* original instruction */
+	s32 repl_offset;	/* offset to replacement instruction */
+	u16 facility;		/* facility bit set for replacement */
+	u8  instrlen;		/* length of original instruction */
+	u8  replacementlen;	/* length of new instruction */
+} __packed;
+
+void apply_alternative_instructions(void);
+void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+/*
+ * |661:       |662:	  |6620      |663:
+ * +-----------+---------------------+
+ * | oldinstr  | oldinstr_padding    |
+ * |	       +----------+----------+
+ * |	       |	  |	     |
+ * |	       | >6 bytes |6/4/2 nops|
+ * |	       |6 bytes jg----------->
+ * +-----------+---------------------+
+ *		 ^^ static padding ^^
+ *
+ * .altinstr_replacement section
+ * +---------------------+-----------+
+ * |6641:			     |6651:
+ * | alternative instr 1	     |
+ * +-----------+---------+- - - - - -+
+ * |6642:		 |6652:      |
+ * | alternative instr 2 | padding
+ * +---------------------+- - - - - -+
+ *			  ^ runtime ^
+ *
+ * .altinstructions section
+ * +---------------------------------+
+ * | alt_instr entries for each      |
+ * | alternative instr		     |
+ * +---------------------------------+
+ */
+
+#define b_altinstr(num)	"664"#num
+#define e_altinstr(num)	"665"#num
+
+#define e_oldinstr_pad_end	"663"
+#define oldinstr_len		"662b-661b"
+#define oldinstr_total_len	e_oldinstr_pad_end"b-661b"
+#define altinstr_len(num)	e_altinstr(num)"b-"b_altinstr(num)"b"
+#define oldinstr_pad_len(num) \
+	"-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
+	"((" altinstr_len(num) ")-(" oldinstr_len "))"
+
+#define INSTR_LEN_SANITY_CHECK(len)					\
+	".if " len " > 254\n"						\
+	"\t.error \"cpu alternatives does not support instructions "	\
+		"blocks > 254 bytes\"\n"				\
+	".endif\n"							\
+	".if (" len ") %% 2\n"						\
+	"\t.error \"cpu alternatives instructions length is odd\"\n"	\
+	".endif\n"
+
+#define OLDINSTR_PADDING(oldinstr, num)					\
+	".if " oldinstr_pad_len(num) " > 6\n"				\
+	"\tjg " e_oldinstr_pad_end "f\n"				\
+	"6620:\n"							\
+	"\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+	".else\n"							\
+	"\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n"	\
+	"\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n"	\
+	"\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n"	\
+	".endif\n"
+
+#define OLDINSTR(oldinstr, num)						\
+	"661:\n\t" oldinstr "\n662:\n"					\
+	OLDINSTR_PADDING(oldinstr, num)					\
+	e_oldinstr_pad_end ":\n"					\
+	INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define OLDINSTR_2(oldinstr, num1, num2)				\
+	"661:\n\t" oldinstr "\n662:\n"					\
+	".if " altinstr_len(num1) " < " altinstr_len(num2) "\n"		\
+	OLDINSTR_PADDING(oldinstr, num2)				\
+	".else\n"							\
+	OLDINSTR_PADDING(oldinstr, num1)				\
+	".endif\n"							\
+	e_oldinstr_pad_end ":\n"					\
+	INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define ALTINSTR_ENTRY(facility, num)					\
+	"\t.long 661b - .\n"			/* old instruction */	\
+	"\t.long " b_altinstr(num)"b - .\n"	/* alt instruction */	\
+	"\t.word " __stringify(facility) "\n"	/* facility bit    */	\
+	"\t.byte " oldinstr_total_len "\n"	/* source len	   */	\
+	"\t.byte " altinstr_len(num) "\n"	/* alt instruction len */
+
+#define ALTINSTR_REPLACEMENT(altinstr, num)	/* replacement */	\
+	b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n"	\
+	INSTR_LEN_SANITY_CHECK(altinstr_len(num))
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+	".pushsection .altinstr_replacement, \"ax\"\n"			\
+	ALTINSTR_REPLACEMENT(altinstr, 1)				\
+	".popsection\n"							\
+	OLDINSTR(oldinstr, 1)						\
+	".pushsection .altinstructions,\"a\"\n"				\
+	ALTINSTR_ENTRY(facility, 1)					\
+	".popsection\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+	".pushsection .altinstr_replacement, \"ax\"\n"			\
+	ALTINSTR_REPLACEMENT(altinstr1, 1)				\
+	ALTINSTR_REPLACEMENT(altinstr2, 2)				\
+	".popsection\n"							\
+	OLDINSTR_2(oldinstr, 1, 2)					\
+	".pushsection .altinstructions,\"a\"\n"				\
+	ALTINSTR_ENTRY(facility1, 1)					\
+	ALTINSTR_ENTRY(facility2, 2)					\
+	".popsection\n"
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * oldinstr is padded with jump and nops at compile time if altinstr is
+ * longer. altinstr is padded with jump and nops at run-time during patching.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, altinstr, facility)			\
+	asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+
+#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+	asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1,	    \
+				   altinstr2, facility2) ::: "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 5c8db3c..03b2e5b 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -48,6 +48,30 @@
 #define __smp_mb__before_atomic()	barrier()
 #define __smp_mb__after_atomic()	barrier()
 
+/**
+ * array_index_mask_nospec - generate a mask for array_idx() that is
+ * ~0UL when the bounds check succeeds and 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+						    unsigned long size)
+{
+	unsigned long mask;
+
+	if (__builtin_constant_p(size) && size > 0) {
+		asm("	clgr	%2,%1\n"
+		    "	slbgr	%0,%0\n"
+		    :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
+		return mask;
+	}
+	asm("	clgr	%1,%2\n"
+	    "	slbgr	%0,%0\n"
+	    :"=d" (mask) : "d" (size), "d" (index) :"cc");
+	return ~mask;
+}
+
 #include <asm-generic/barrier.h>
 
 #endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 09b406d..5811e78 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -15,7 +15,25 @@
 #include <linux/preempt.h>
 #include <asm/lowcore.h>
 
-#define MAX_FACILITY_BIT (256*8)	/* stfle_fac_list has 256 bytes */
+#define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8)
+
+static inline void __set_facility(unsigned long nr, void *facilities)
+{
+	unsigned char *ptr = (unsigned char *) facilities;
+
+	if (nr >= MAX_FACILITY_BIT)
+		return;
+	ptr[nr >> 3] |= 0x80 >> (nr & 7);
+}
+
+static inline void __clear_facility(unsigned long nr, void *facilities)
+{
+	unsigned char *ptr = (unsigned char *) facilities;
+
+	if (nr >= MAX_FACILITY_BIT)
+		return;
+	ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
+}
 
 static inline int __test_facility(unsigned long nr, void *facilities)
 {
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index a4811aa..8f8eec9e 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -21,17 +21,12 @@
 		: "0" (-EFAULT), "d" (oparg), "a" (uaddr),		\
 		  "m" (*uaddr) : "cc");
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, newval, ret;
 
 	load_kernel_asce();
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
 
 	pagefault_disable();
 	switch (op) {
@@ -60,17 +55,9 @@
 	}
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a41faf3..5792590 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -181,7 +181,8 @@
 	__u16	ipa;			/* 0x0056 */
 	__u32	ipb;			/* 0x0058 */
 	__u32	scaoh;			/* 0x005c */
-	__u8	reserved60;		/* 0x0060 */
+#define FPF_BPBC 	0x20
+	__u8	fpf;			/* 0x0060 */
 	__u8	ecb;			/* 0x0061 */
 	__u8    ecb2;                   /* 0x0062 */
 #define ECB3_AES 0x04
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 7b93b78..ad4e0ce 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -135,7 +135,9 @@
 	/* Per cpu primary space access list */
 	__u32	paste[16];			/* 0x0400 */
 
-	__u8	pad_0x04c0[0x0e00-0x0440];	/* 0x0440 */
+	/* br %r1 trampoline */
+	__u16	br_r1_trampoline;		/* 0x0440 */
+	__u8	pad_0x0442[0x0e00-0x0442];	/* 0x0442 */
 
 	/*
 	 * 0xe00 contains the address of the IPL Parameter Information
@@ -150,7 +152,8 @@
 	__u8	pad_0x0e20[0x0f00-0x0e20];	/* 0x0e20 */
 
 	/* Extended facility list */
-	__u64	stfle_fac_list[32];		/* 0x0f00 */
+	__u64	stfle_fac_list[16];		/* 0x0f00 */
+	__u64	alt_stfle_fac_list[16];		/* 0x0f80 */
 	__u8	pad_0x1000[0x11b0-0x1000];	/* 0x1000 */
 
 	/* Pointer to vector register save area */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
new file mode 100644
index 0000000..b4bd8c4
--- /dev/null
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_EXPOLINE_H
+#define _ASM_S390_EXPOLINE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+extern int nospec_disable;
+
+void nospec_init_branches(void);
+void nospec_auto_detect(void);
+void nospec_revert(s32 *start, s32 *end);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
new file mode 100644
index 0000000..9a56e73
--- /dev/null
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_NOSPEC_ASM_H
+#define _ASM_S390_NOSPEC_ASM_H
+
+#include <asm/alternative-asm.h>
+#include <asm/asm-offsets.h>
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_EXPOLINE
+
+_LC_BR_R1 = __LC_BR_R1
+
+/*
+ * The expoline macros are used to create thunks in the same format
+ * as gcc generates them. The 'comdat' section flag makes sure that
+ * the various thunks are merged into a single copy.
+ */
+	.macro __THUNK_PROLOG_NAME name
+	.pushsection .text.\name,"axG",@progbits,\name,comdat
+	.globl \name
+	.hidden \name
+	.type \name,@function
+\name:
+	.cfi_startproc
+	.endm
+
+	.macro __THUNK_EPILOG
+	.cfi_endproc
+	.popsection
+	.endm
+
+	.macro __THUNK_PROLOG_BR r1,r2
+	__THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
+	.endm
+
+	.macro __THUNK_PROLOG_BC d0,r1,r2
+	__THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
+	.endm
+
+	.macro __THUNK_BR r1,r2
+	jg	__s390x_indirect_jump_r\r2\()use_r\r1
+	.endm
+
+	.macro __THUNK_BC d0,r1,r2
+	jg	__s390x_indirect_branch_\d0\()_\r2\()use_\r1
+	.endm
+
+	.macro __THUNK_BRASL r1,r2,r3
+	brasl	\r1,__s390x_indirect_jump_r\r3\()use_r\r2
+	.endm
+
+	.macro	__DECODE_RR expand,reg,ruse
+	.set __decode_fail,1
+	.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \reg,%r\r1
+	.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \ruse,%r\r2
+	\expand \r1,\r2
+	.set __decode_fail,0
+	.endif
+	.endr
+	.endif
+	.endr
+	.if __decode_fail == 1
+	.error "__DECODE_RR failed"
+	.endif
+	.endm
+
+	.macro	__DECODE_RRR expand,rsave,rtarget,ruse
+	.set __decode_fail,1
+	.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \rsave,%r\r1
+	.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \rtarget,%r\r2
+	.irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \ruse,%r\r3
+	\expand \r1,\r2,\r3
+	.set __decode_fail,0
+	.endif
+	.endr
+	.endif
+	.endr
+	.endif
+	.endr
+	.if __decode_fail == 1
+	.error "__DECODE_RRR failed"
+	.endif
+	.endm
+
+	.macro	__DECODE_DRR expand,disp,reg,ruse
+	.set __decode_fail,1
+	.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \reg,%r\r1
+	.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \ruse,%r\r2
+	\expand \disp,\r1,\r2
+	.set __decode_fail,0
+	.endif
+	.endr
+	.endif
+	.endr
+	.if __decode_fail == 1
+	.error "__DECODE_DRR failed"
+	.endif
+	.endm
+
+	.macro __THUNK_EX_BR reg,ruse
+	# Be very careful when adding instructions to this macro!
+	# The ALTERNATIVE replacement code has a .+10 which targets
+	# the "br \reg" after the code has been patched.
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+	exrl	0,555f
+	j	.
+#else
+	.ifc \reg,%r1
+	ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
+	j	.
+	.else
+	larl	\ruse,555f
+	ex	0,0(\ruse)
+	j	.
+	.endif
+#endif
+555:	br	\reg
+	.endm
+
+	.macro __THUNK_EX_BC disp,reg,ruse
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+	exrl	0,556f
+	j	.
+#else
+	larl	\ruse,556f
+	ex	0,0(\ruse)
+	j	.
+#endif
+556:	b	\disp(\reg)
+	.endm
+
+	.macro GEN_BR_THUNK reg,ruse=%r1
+	__DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
+	__THUNK_EX_BR \reg,\ruse
+	__THUNK_EPILOG
+	.endm
+
+	.macro GEN_B_THUNK disp,reg,ruse=%r1
+	__DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
+	__THUNK_EX_BC \disp,\reg,\ruse
+	__THUNK_EPILOG
+	.endm
+
+	.macro BR_EX reg,ruse=%r1
+557:	__DECODE_RR __THUNK_BR,\reg,\ruse
+	.pushsection .s390_indirect_branches,"a",@progbits
+	.long	557b-.
+	.popsection
+	.endm
+
+	 .macro B_EX disp,reg,ruse=%r1
+558:	__DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
+	.pushsection .s390_indirect_branches,"a",@progbits
+	.long	558b-.
+	.popsection
+	.endm
+
+	.macro BASR_EX rsave,rtarget,ruse=%r1
+559:	__DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
+	.pushsection .s390_indirect_branches,"a",@progbits
+	.long	559b-.
+	.popsection
+	.endm
+
+#else
+	.macro GEN_BR_THUNK reg,ruse=%r1
+	.endm
+
+	.macro GEN_B_THUNK disp,reg,ruse=%r1
+	.endm
+
+	 .macro BR_EX reg,ruse=%r1
+	br	\reg
+	.endm
+
+	 .macro B_EX disp,reg,ruse=%r1
+	b	\disp(\reg)
+	.endm
+
+	.macro BASR_EX rsave,rtarget,ruse=%r1
+	basr	\rsave,\rtarget
+	.endm
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_NOSPEC_ASM_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6bcbbec..d584212 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -84,6 +84,7 @@
 extern const struct seq_operations cpuinfo_op;
 extern int sysctl_ieee_emulation_warnings;
 extern void execve_tail(void);
+extern void __bpon(void);
 
 /*
  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
@@ -359,6 +360,9 @@
 	memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));	\
 }
 
+extern int s390_isolate_bp(void);
+extern int s390_isolate_bp_guest(void);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f15c039..84f2ae4 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -79,6 +79,8 @@
 #define TIF_SECCOMP		5	/* secure computing */
 #define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
 #define TIF_UPROBE		7	/* breakpointed or single-stepping */
+#define TIF_ISOLATE_BP		8	/* Run process with isolated BP */
+#define TIF_ISOLATE_BP_GUEST	9	/* Run KVM guests with isolated BP */
 #define TIF_31BIT		16	/* 32bit process */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	18	/* restore signal mask in do_signal() */
@@ -94,6 +96,8 @@
 #define _TIF_SECCOMP		_BITUL(TIF_SECCOMP)
 #define _TIF_SYSCALL_TRACEPOINT	_BITUL(TIF_SYSCALL_TRACEPOINT)
 #define _TIF_UPROBE		_BITUL(TIF_UPROBE)
+#define _TIF_ISOLATE_BP		_BITUL(TIF_ISOLATE_BP)
+#define _TIF_ISOLATE_BP_GUEST	_BITUL(TIF_ISOLATE_BP_GUEST)
 #define _TIF_31BIT		_BITUL(TIF_31BIT)
 #define _TIF_SINGLE_STEP	_BITUL(TIF_SINGLE_STEP)
 
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index a2ffec4..81c02e1 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -197,6 +197,7 @@
 #define KVM_SYNC_VRS    (1UL << 6)
 #define KVM_SYNC_RICCB  (1UL << 7)
 #define KVM_SYNC_FPRS   (1UL << 8)
+#define KVM_SYNC_BPBC	(1UL << 10)
 /* definition of registers in kvm_run */
 struct kvm_sync_regs {
 	__u64 prefix;	/* prefix register */
@@ -217,7 +218,9 @@
 	};
 	__u8  reserved[512];	/* for future vector expansion */
 	__u32 fpc;		/* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
-	__u8 padding[52];	/* riccb needs to be 64byte aligned */
+	__u8 bpbc : 1;		/* bp mode */
+	__u8 reserved2 : 7;
+	__u8 padding1[51];	/* riccb needs to be 64byte aligned */
 	__u8 riccb[64];		/* runtime instrumentation controls block */
 };
 
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 1f0fe98..5b13997 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -42,6 +42,7 @@
 CFLAGS_REMOVE_sclp.o	+= $(CC_FLAGS_MARCH)
 CFLAGS_sclp.o		+= -march=z900
 CFLAGS_REMOVE_als.o	+= $(CC_FLAGS_MARCH)
+CFLAGS_REMOVE_als.o	+= $(CC_FLAGS_EXPOLINE)
 CFLAGS_als.o		+= -march=z900
 AFLAGS_REMOVE_head.o	+= $(CC_FLAGS_MARCH)
 AFLAGS_head.o		+= -march=z900
@@ -57,10 +58,14 @@
 obj-y	+= debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o als.o
 obj-y	+= sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
 obj-y	+= runtime_instr.o cache.o fpu.o dumpstack.o
-obj-y	+= entry.o reipl.o relocate_kernel.o
+obj-y	+= entry.o reipl.o relocate_kernel.o alternative.o
+obj-y	+= nospec-branch.o
 
 extra-y				+= head.o head64.o vmlinux.lds
 
+obj-$(CONFIG_SYSFS)		+= nospec-sysfs.o
+CFLAGS_REMOVE_nospec-branch.o	+= $(CC_FLAGS_EXPOLINE)
+
 obj-$(CONFIG_MODULES)		+= module.o
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_SCHED_TOPOLOGY)	+= topology.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
new file mode 100644
index 0000000..b57b293
--- /dev/null
+++ b/arch/s390/kernel/alternative.c
@@ -0,0 +1,112 @@
+#include <linux/module.h>
+#include <asm/alternative.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+#define MAX_PATCH_LEN (255 - 1)
+
+static int __initdata_or_module alt_instr_disabled;
+
+static int __init disable_alternative_instructions(char *str)
+{
+	alt_instr_disabled = 1;
+	return 0;
+}
+
+early_param("noaltinstr", disable_alternative_instructions);
+
+struct brcl_insn {
+	u16 opc;
+	s32 disp;
+} __packed;
+
+static u16 __initdata_or_module nop16 = 0x0700;
+static u32 __initdata_or_module nop32 = 0x47000000;
+static struct brcl_insn __initdata_or_module nop48 = {
+	0xc004, 0
+};
+
+static const void *nops[] __initdata_or_module = {
+	&nop16,
+	&nop32,
+	&nop48
+};
+
+static void __init_or_module add_jump_padding(void *insns, unsigned int len)
+{
+	struct brcl_insn brcl = {
+		0xc0f4,
+		len / 2
+	};
+
+	memcpy(insns, &brcl, sizeof(brcl));
+	insns += sizeof(brcl);
+	len -= sizeof(brcl);
+
+	while (len > 0) {
+		memcpy(insns, &nop16, 2);
+		insns += 2;
+		len -= 2;
+	}
+}
+
+static void __init_or_module add_padding(void *insns, unsigned int len)
+{
+	if (len > 6)
+		add_jump_padding(insns, len);
+	else if (len >= 2)
+		memcpy(insns, nops[len / 2 - 1], len);
+}
+
+static void __init_or_module __apply_alternatives(struct alt_instr *start,
+						  struct alt_instr *end)
+{
+	struct alt_instr *a;
+	u8 *instr, *replacement;
+	u8 insnbuf[MAX_PATCH_LEN];
+
+	/*
+	 * The scan order should be from start to end. A later scanned
+	 * alternative code can overwrite previously scanned alternative code.
+	 */
+	for (a = start; a < end; a++) {
+		int insnbuf_sz = 0;
+
+		instr = (u8 *)&a->instr_offset + a->instr_offset;
+		replacement = (u8 *)&a->repl_offset + a->repl_offset;
+
+		if (!__test_facility(a->facility,
+				     S390_lowcore.alt_stfle_fac_list))
+			continue;
+
+		if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
+			WARN_ONCE(1, "cpu alternatives instructions length is "
+				     "odd, skipping patching\n");
+			continue;
+		}
+
+		memcpy(insnbuf, replacement, a->replacementlen);
+		insnbuf_sz = a->replacementlen;
+
+		if (a->instrlen > a->replacementlen) {
+			add_padding(insnbuf + a->replacementlen,
+				    a->instrlen - a->replacementlen);
+			insnbuf_sz += a->instrlen - a->replacementlen;
+		}
+
+		s390_kernel_write(instr, insnbuf, insnbuf_sz);
+	}
+}
+
+void __init_or_module apply_alternatives(struct alt_instr *start,
+					 struct alt_instr *end)
+{
+	if (!alt_instr_disabled)
+		__apply_alternatives(start, end);
+}
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+void __init apply_alternative_instructions(void)
+{
+	apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index f3df9e0..85c8ead 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -175,6 +175,7 @@
 	OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
 	OFFSET(__LC_GMAP, lowcore, gmap);
 	OFFSET(__LC_PASTE, lowcore, paste);
+	OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
 	/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
 	OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
 	/* hardware defined lowcore locations 0x1000 - 0x18ff */
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 326f717..61fca54 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -8,18 +8,22 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
 #include <asm/ptrace.h>
 #include <asm/sigp.h>
 
+	GEN_BR_THUNK %r9
+	GEN_BR_THUNK %r14
+
 ENTRY(s390_base_mcck_handler)
 	basr	%r13,0
 0:	lg	%r15,__LC_PANIC_STACK	# load panic stack
 	aghi	%r15,-STACK_FRAME_OVERHEAD
 	larl	%r1,s390_base_mcck_handler_fn
-	lg	%r1,0(%r1)
-	ltgr	%r1,%r1
+	lg	%r9,0(%r1)
+	ltgr	%r9,%r9
 	jz	1f
-	basr	%r14,%r1
+	BASR_EX	%r14,%r9
 1:	la	%r1,4095
 	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
 	lpswe	__LC_MCK_OLD_PSW
@@ -36,10 +40,10 @@
 	basr	%r13,0
 0:	aghi	%r15,-STACK_FRAME_OVERHEAD
 	larl	%r1,s390_base_ext_handler_fn
-	lg	%r1,0(%r1)
-	ltgr	%r1,%r1
+	lg	%r9,0(%r1)
+	ltgr	%r9,%r9
 	jz	1f
-	basr	%r14,%r1
+	BASR_EX	%r14,%r9
 1:	lmg	%r0,%r15,__LC_SAVE_AREA_ASYNC
 	ni	__LC_EXT_OLD_PSW+1,0xfd	# clear wait state bit
 	lpswe	__LC_EXT_OLD_PSW
@@ -56,10 +60,10 @@
 	basr	%r13,0
 0:	aghi	%r15,-STACK_FRAME_OVERHEAD
 	larl	%r1,s390_base_pgm_handler_fn
-	lg	%r1,0(%r1)
-	ltgr	%r1,%r1
+	lg	%r9,0(%r1)
+	ltgr	%r9,%r9
 	jz	1f
-	basr	%r14,%r1
+	BASR_EX	%r14,%r9
 	lmg	%r0,%r15,__LC_SAVE_AREA_SYNC
 	lpswe	__LC_PGM_OLD_PSW
 1:	lpswe	disabled_wait_psw-0b(%r13)
@@ -116,7 +120,7 @@
 	larl	%r4,.Lcontinue_psw	# Restore PSW flags
 	lpswe	0(%r4)
 .Lcontinue:
-	br	%r14
+	BR_EX	%r14
 .align 16
 .Lrestart_psw:
 	.long	0x00080000,0x80000000 + .Lrestart_part2
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 6257898..0c7a7d5 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -299,6 +299,11 @@
 {
 	stfle(S390_lowcore.stfle_fac_list,
 	      ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+	memcpy(S390_lowcore.alt_stfle_fac_list,
+	       S390_lowcore.stfle_fac_list,
+	       sizeof(S390_lowcore.alt_stfle_fac_list));
+	if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
+		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
 }
 
 static __init void detect_diag9c(void)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3bc2825..a4fd000 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -24,6 +24,7 @@
 #include <asm/setup.h>
 #include <asm/nmi.h>
 #include <asm/export.h>
+#include <asm/nospec-insn.h>
 
 __PT_R0      =	__PT_GPRS
 __PT_R1      =	__PT_GPRS + 8
@@ -105,6 +106,7 @@
 	j	3f
 1:	LAST_BREAK %r14
 	UPDATE_VTIME %r14,%r15,\timer
+	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 2:	lg	%r15,__LC_ASYNC_STACK	# load async stack
 3:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	.endm
@@ -163,6 +165,72 @@
 		tm	off+\addr, \mask
 	.endm
 
+	.macro BPOFF
+	.pushsection .altinstr_replacement, "ax"
+660:	.long	0xb2e8c000
+	.popsection
+661:	.long	0x47000000
+	.pushsection .altinstructions, "a"
+	.long 661b - .
+	.long 660b - .
+	.word 82
+	.byte 4
+	.byte 4
+	.popsection
+	.endm
+
+	.macro BPON
+	.pushsection .altinstr_replacement, "ax"
+662:	.long	0xb2e8d000
+	.popsection
+663:	.long	0x47000000
+	.pushsection .altinstructions, "a"
+	.long 663b - .
+	.long 662b - .
+	.word 82
+	.byte 4
+	.byte 4
+	.popsection
+	.endm
+
+	.macro BPENTER tif_ptr,tif_mask
+	.pushsection .altinstr_replacement, "ax"
+662:	.word	0xc004, 0x0000, 0x0000	# 6 byte nop
+	.word	0xc004, 0x0000, 0x0000	# 6 byte nop
+	.popsection
+664:	TSTMSK	\tif_ptr,\tif_mask
+	jz	. + 8
+	.long	0xb2e8d000
+	.pushsection .altinstructions, "a"
+	.long 664b - .
+	.long 662b - .
+	.word 82
+	.byte 12
+	.byte 12
+	.popsection
+	.endm
+
+	.macro BPEXIT tif_ptr,tif_mask
+	TSTMSK	\tif_ptr,\tif_mask
+	.pushsection .altinstr_replacement, "ax"
+662:	jnz	. + 8
+	.long	0xb2e8d000
+	.popsection
+664:	jz	. + 8
+	.long	0xb2e8c000
+	.pushsection .altinstructions, "a"
+	.long 664b - .
+	.long 662b - .
+	.word 82
+	.byte 8
+	.byte 8
+	.popsection
+	.endm
+
+	GEN_BR_THUNK %r9
+	GEN_BR_THUNK %r14
+	GEN_BR_THUNK %r14,%r11
+
 	.section .kprobes.text, "ax"
 .Ldummy:
 	/*
@@ -175,6 +243,11 @@
 	 */
 	nop	0
 
+ENTRY(__bpon)
+	.globl __bpon
+	BPON
+	BR_EX	%r14
+
 /*
  * Scheduler resume function, called by switch_to
  *  gpr2 = (task_struct *) prev
@@ -201,9 +274,9 @@
 	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
 	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
 	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
-	bzr	%r14
+	jz	0f
 	.insn	s,0xb2800000,__LC_LPP		# set program parameter
-	br	%r14
+0:	BR_EX	%r14
 
 .L__critical_start:
 
@@ -215,9 +288,11 @@
  */
 ENTRY(sie64a)
 	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
+	lg	%r12,__LC_CURRENT
 	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
 	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
 	xc	__SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
+	mvc	__SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
 	jno	.Lsie_load_guest_gprs
 	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
@@ -234,7 +309,11 @@
 	jnz	.Lsie_skip
 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 	jo	.Lsie_skip			# exit if fp/vx regs changed
+	BPEXIT	__SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 	sie	0(%r14)
+.Lsie_exit:
+	BPOFF
+	BPENTER	__SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 .Lsie_skip:
 	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
@@ -255,9 +334,15 @@
 sie_exit:
 	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
 	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
+	xgr	%r0,%r0				# clear guest registers to
+	xgr	%r1,%r1				# prevent speculative use
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	xgr	%r4,%r4
+	xgr	%r5,%r5
 	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
 	lg	%r2,__SF_EMPTY+16(%r15)		# return exit reason code
-	br	%r14
+	BR_EX	%r14
 .Lsie_fault:
 	lghi	%r14,-EFAULT
 	stg	%r14,__SF_EMPTY+16(%r15)	# set exit reason code
@@ -280,6 +365,7 @@
 	stpt	__LC_SYNC_ENTER_TIMER
 .Lsysc_stmg:
 	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
+	BPOFF
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
 	lghi	%r14,_PIF_SYSCALL
@@ -289,12 +375,15 @@
 	LAST_BREAK %r13
 .Lsysc_vtime:
 	UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
+	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 	stmg	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
 	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
 	stg	%r14,__PT_FLAGS(%r11)
 .Lsysc_do_svc:
+	# clear user controlled register to prevent speculative use
+	xgr	%r0,%r0
 	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
 	llgh	%r8,__PT_INT_CODE+2(%r11)
 	slag	%r8,%r8,2			# shift and test for svc 0
@@ -312,7 +401,7 @@
 	lgf	%r9,0(%r8,%r10)			# get system call add.
 	TSTMSK	__TI_flags(%r12),_TIF_TRACE
 	jnz	.Lsysc_tracesys
-	basr	%r14,%r9			# call sys_xxxx
+	BASR_EX	%r14,%r9			# call sys_xxxx
 	stg	%r2,__PT_R2(%r11)		# store return value
 
 .Lsysc_return:
@@ -324,6 +413,7 @@
 	jnz	.Lsysc_work			# check for work
 	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
 	jnz	.Lsysc_work
+	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
 .Lsysc_restore:
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
@@ -451,7 +541,7 @@
 	lmg	%r3,%r7,__PT_R3(%r11)
 	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
 	lg	%r2,__PT_ORIG_GPR2(%r11)
-	basr	%r14,%r9		# call sys_xxx
+	BASR_EX	%r14,%r9		# call sys_xxx
 	stg	%r2,__PT_R2(%r11)	# store return value
 .Lsysc_tracenogo:
 	TSTMSK	__TI_flags(%r12),_TIF_TRACE
@@ -475,7 +565,7 @@
 	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
 ENTRY(kernel_thread_starter)
 	la	%r2,0(%r10)
-	basr	%r14,%r9
+	BASR_EX	%r14,%r9
 	j	.Lsysc_tracenogo
 
 /*
@@ -484,6 +574,7 @@
 
 ENTRY(pgm_check_handler)
 	stpt	__LC_SYNC_ENTER_TIMER
+	BPOFF
 	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
@@ -508,6 +599,7 @@
 	j	3f
 2:	LAST_BREAK %r14
 	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 	lg	%r15,__LC_KERNEL_STACK
 	lg	%r14,__TI_task(%r12)
 	aghi	%r14,__TASK_thread	# pointer to thread_struct
@@ -517,6 +609,15 @@
 	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
 3:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	stmg	%r0,%r7,__PT_R0(%r11)
+	# clear user controlled registers to prevent speculative use
+	xgr	%r0,%r0
+	xgr	%r1,%r1
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	xgr	%r4,%r4
+	xgr	%r5,%r5
+	xgr	%r6,%r6
+	xgr	%r7,%r7
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
 	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
@@ -538,9 +639,9 @@
 	nill	%r10,0x007f
 	sll	%r10,2
 	je	.Lpgm_return
-	lgf	%r1,0(%r10,%r1)		# load address of handler routine
+	lgf	%r9,0(%r10,%r1)		# load address of handler routine
 	lgr	%r2,%r11		# pass pointer to pt_regs
-	basr	%r14,%r1		# branch to interrupt-handler
+	BASR_EX	%r14,%r9		# branch to interrupt-handler
 .Lpgm_return:
 	LOCKDEP_SYS_EXIT
 	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
@@ -573,6 +674,7 @@
 ENTRY(io_int_handler)
 	STCK	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
+	BPOFF
 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
@@ -580,6 +682,16 @@
 	lmg	%r8,%r9,__LC_IO_OLD_PSW
 	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 	stmg	%r0,%r7,__PT_R0(%r11)
+	# clear user controlled registers to prevent speculative use
+	xgr	%r0,%r0
+	xgr	%r1,%r1
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	xgr	%r4,%r4
+	xgr	%r5,%r5
+	xgr	%r6,%r6
+	xgr	%r7,%r7
+	xgr	%r10,%r10
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
 	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
@@ -614,9 +726,13 @@
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+	jno	.Lio_exit_kernel
+	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
 .Lio_exit_timer:
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+.Lio_exit_kernel:
 	lmg	%r11,%r15,__PT_R11(%r11)
 	lpswe	__LC_RETURN_PSW
 .Lio_done:
@@ -748,6 +864,7 @@
 ENTRY(ext_int_handler)
 	STCK	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
+	BPOFF
 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
@@ -755,6 +872,16 @@
 	lmg	%r8,%r9,__LC_EXT_OLD_PSW
 	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 	stmg	%r0,%r7,__PT_R0(%r11)
+	# clear user controlled registers to prevent speculative use
+	xgr	%r0,%r0
+	xgr	%r1,%r1
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	xgr	%r4,%r4
+	xgr	%r5,%r5
+	xgr	%r6,%r6
+	xgr	%r7,%r7
+	xgr	%r10,%r10
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
 	lghi	%r1,__LC_EXT_PARAMS2
@@ -787,11 +914,12 @@
 .Lpsw_idle_stcctm:
 #endif
 	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
+	BPON
 	STCK	__CLOCK_IDLE_ENTER(%r2)
 	stpt	__TIMER_IDLE_ENTER(%r2)
 .Lpsw_idle_lpsw:
 	lpswe	__SF_EMPTY(%r15)
-	br	%r14
+	BR_EX	%r14
 .Lpsw_idle_end:
 
 /*
@@ -805,7 +933,7 @@
 	lg	%r2,__LC_CURRENT
 	aghi	%r2,__TASK_thread
 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
-	bor	%r14
+	jo	.Lsave_fpu_regs_exit
 	stfpc	__THREAD_FPU_fpc(%r2)
 .Lsave_fpu_regs_fpc_end:
 	lg	%r3,__THREAD_FPU_regs(%r2)
@@ -835,7 +963,8 @@
 	std	15,120(%r3)
 .Lsave_fpu_regs_done:
 	oi	__LC_CPU_FLAGS+7,_CIF_FPU
-	br	%r14
+.Lsave_fpu_regs_exit:
+	BR_EX	%r14
 .Lsave_fpu_regs_end:
 #if IS_ENABLED(CONFIG_KVM)
 EXPORT_SYMBOL(save_fpu_regs)
@@ -855,7 +984,7 @@
 	lg	%r4,__LC_CURRENT
 	aghi	%r4,__TASK_thread
 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
-	bnor	%r14
+	jno	.Lload_fpu_regs_exit
 	lfpc	__THREAD_FPU_fpc(%r4)
 	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
 	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
@@ -884,7 +1013,8 @@
 	ld	15,120(%r4)
 .Lload_fpu_regs_done:
 	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
-	br	%r14
+.Lload_fpu_regs_exit:
+	BR_EX	%r14
 .Lload_fpu_regs_end:
 
 .L__critical_end:
@@ -894,6 +1024,7 @@
  */
 ENTRY(mcck_int_handler)
 	STCK	__LC_MCCK_CLOCK
+	BPOFF
 	la	%r1,4095		# revalidate r1
 	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
 	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
@@ -925,6 +1056,16 @@
 .Lmcck_skip:
 	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
 	stmg	%r0,%r7,__PT_R0(%r11)
+	# clear user controlled registers to prevent speculative use
+	xgr	%r0,%r0
+	xgr	%r1,%r1
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	xgr	%r4,%r4
+	xgr	%r5,%r5
+	xgr	%r6,%r6
+	xgr	%r7,%r7
+	xgr	%r10,%r10
 	mvc	__PT_R8(64,%r11),0(%r14)
 	stmg	%r8,%r9,__PT_PSW(%r11)
 	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
@@ -950,6 +1091,7 @@
 	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
 	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
 	jno	0f
+	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 0:	lmg	%r11,%r15,__PT_R11(%r11)
@@ -1045,7 +1187,7 @@
 	jl	0f
 	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
 	jl	.Lcleanup_load_fpu_regs
-0:	br	%r14
+0:	BR_EX	%r14
 
 	.align	8
 .Lcleanup_table:
@@ -1070,11 +1212,12 @@
 	.quad	.Lsie_done
 
 .Lcleanup_sie:
+	BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 	lg	%r9,__SF_EMPTY(%r15)		# get control block pointer
 	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 	larl	%r9,sie_exit			# skip forward to sie_exit
-	br	%r14
+	BR_EX	%r14
 #endif
 
 .Lcleanup_system_call:
@@ -1116,7 +1259,8 @@
 	srag	%r9,%r9,23
 	jz	0f
 	mvc	__TI_last_break(8,%r12),16(%r11)
-0:	# set up saved register r11
+0:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+	# set up saved register r11
 	lg	%r15,__LC_KERNEL_STACK
 	la	%r9,STACK_FRAME_OVERHEAD(%r15)
 	stg	%r9,24(%r11)		# r11 pt_regs pointer
@@ -1131,7 +1275,7 @@
 	stg	%r15,56(%r11)		# r15 stack pointer
 	# set new psw address and exit
 	larl	%r9,.Lsysc_do_svc
-	br	%r14
+	BR_EX	%r14,%r11
 .Lcleanup_system_call_insn:
 	.quad	system_call
 	.quad	.Lsysc_stmg
@@ -1141,7 +1285,7 @@
 
 .Lcleanup_sysc_tif:
 	larl	%r9,.Lsysc_tif
-	br	%r14
+	BR_EX	%r14,%r11
 
 .Lcleanup_sysc_restore:
 	# check if stpt has been executed
@@ -1158,14 +1302,14 @@
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
 1:	lmg	%r8,%r9,__LC_RETURN_PSW
-	br	%r14
+	BR_EX	%r14,%r11
 .Lcleanup_sysc_restore_insn:
 	.quad	.Lsysc_exit_timer
 	.quad	.Lsysc_done - 4
 
 .Lcleanup_io_tif:
 	larl	%r9,.Lio_tif
-	br	%r14
+	BR_EX	%r14,%r11
 
 .Lcleanup_io_restore:
 	# check if stpt has been executed
@@ -1179,7 +1323,7 @@
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
 1:	lmg	%r8,%r9,__LC_RETURN_PSW
-	br	%r14
+	BR_EX	%r14,%r11
 .Lcleanup_io_restore_insn:
 	.quad	.Lio_exit_timer
 	.quad	.Lio_done - 4
@@ -1232,17 +1376,17 @@
 	# prepare return psw
 	nihh	%r8,0xfcfd		# clear irq & wait state bits
 	lg	%r9,48(%r11)		# return from psw_idle
-	br	%r14
+	BR_EX	%r14,%r11
 .Lcleanup_idle_insn:
 	.quad	.Lpsw_idle_lpsw
 
 .Lcleanup_save_fpu_regs:
 	larl	%r9,save_fpu_regs
-	br	%r14
+	BR_EX	%r14,%r11
 
 .Lcleanup_load_fpu_regs:
 	larl	%r9,load_fpu_regs
-	br	%r14
+	BR_EX	%r14,%r11
 
 /*
  * Integer constants
@@ -1258,7 +1402,6 @@
 .Lsie_critical_length:
 	.quad	.Lsie_done - .Lsie_gmap
 #endif
-
 	.section .rodata, "a"
 #define SYSCALL(esame,emu)	.long esame
 	.globl	sys_call_table
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 295bfb7..df49f2a1 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -563,6 +563,7 @@
 
 static void __ipl_run(void *unused)
 {
+	__bpon();
 	diag308(DIAG308_LOAD_CLEAR, NULL);
 	if (MACHINE_IS_VM)
 		__cpcmd("IPL", NULL, 0, NULL);
@@ -798,6 +799,7 @@
 	/* copy and convert to ebcdic */
 	memcpy(ipb->hdr.loadparm, buf, lp_len);
 	ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
+	ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
 	return len;
 }
 
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 285d656..7ff9767 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -173,10 +173,9 @@
 		new -= STACK_FRAME_OVERHEAD;
 		((struct stack_frame *) new)->back_chain = old;
 		asm volatile("   la    15,0(%0)\n"
-			     "   basr  14,%2\n"
+			     "   brasl 14,__do_softirq\n"
 			     "   la    15,0(%1)\n"
-			     : : "a" (new), "a" (old),
-			         "a" (__do_softirq)
+			     : : "a" (new), "a" (old)
 			     : "0", "1", "2", "3", "4", "5", "14",
 			       "cc", "memory" );
 	} else {
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 9a17e44..be75e8e 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -8,13 +8,17 @@
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
 #include <asm/ftrace.h>
+#include <asm/nospec-insn.h>
 #include <asm/ptrace.h>
 #include <asm/export.h>
 
+	GEN_BR_THUNK %r1
+	GEN_BR_THUNK %r14
+
 	.section .kprobes.text, "ax"
 
 ENTRY(ftrace_stub)
-	br	%r14
+	BR_EX	%r14
 
 #define STACK_FRAME_SIZE  (STACK_FRAME_OVERHEAD + __PT_SIZE)
 #define STACK_PTREGS	  (STACK_FRAME_OVERHEAD)
@@ -22,7 +26,7 @@
 #define STACK_PTREGS_PSW  (STACK_PTREGS + __PT_PSW)
 
 ENTRY(_mcount)
-	br	%r14
+	BR_EX	%r14
 
 EXPORT_SYMBOL(_mcount)
 
@@ -52,7 +56,7 @@
 #endif
 	lgr	%r3,%r14
 	la	%r5,STACK_PTREGS(%r15)
-	basr	%r14,%r1
+	BASR_EX	%r14,%r1
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 # The j instruction gets runtime patched to a nop instruction.
 # See ftrace_enable_ftrace_graph_caller.
@@ -67,7 +71,7 @@
 #endif
 	lg	%r1,(STACK_PTREGS_PSW+8)(%r15)
 	lmg	%r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
-	br	%r1
+	BR_EX	%r1
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
@@ -80,6 +84,6 @@
 	aghi	%r15,STACK_FRAME_OVERHEAD
 	lgr	%r14,%r2
 	lmg	%r2,%r5,32(%r15)
-	br	%r14
+	BR_EX	%r14
 
 #endif
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index fbc0789..64ccfdf 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,9 @@
 #include <linux/kernel.h>
 #include <linux/moduleloader.h>
 #include <linux/bug.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
+#include <asm/facility.h>
 
 #if 0
 #define DEBUGP printk
@@ -167,7 +170,11 @@
 	me->arch.got_offset = me->core_layout.size;
 	me->core_layout.size += me->arch.got_size;
 	me->arch.plt_offset = me->core_layout.size;
-	me->core_layout.size += me->arch.plt_size;
+	if (me->arch.plt_size) {
+		if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
+			me->arch.plt_size += PLT_ENTRY_SIZE;
+		me->core_layout.size += me->arch.plt_size;
+	}
 	return 0;
 }
 
@@ -321,9 +328,20 @@
 			unsigned int *ip;
 			ip = me->core_layout.base + me->arch.plt_offset +
 				info->plt_offset;
-			ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
-			ip[1] = 0x100a0004;
-			ip[2] = 0x07f10000;
+			ip[0] = 0x0d10e310;	/* basr 1,0  */
+			ip[1] = 0x100a0004;	/* lg	1,10(1) */
+			if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
+				unsigned int *ij;
+				ij = me->core_layout.base +
+					me->arch.plt_offset +
+					me->arch.plt_size - PLT_ENTRY_SIZE;
+				ip[2] = 0xa7f40000 +	/* j __jump_r1 */
+					(unsigned int)(u16)
+					(((unsigned long) ij - 8 -
+					  (unsigned long) ip) / 2);
+			} else {
+				ip[2] = 0x07f10000;	/* br %r1 */
+			}
 			ip[3] = (unsigned int) (val >> 32);
 			ip[4] = (unsigned int) val;
 			info->plt_initialized = 1;
@@ -428,6 +446,45 @@
 		    const Elf_Shdr *sechdrs,
 		    struct module *me)
 {
+	const Elf_Shdr *s;
+	char *secstrings, *secname;
+	void *aseg;
+
+	if (IS_ENABLED(CONFIG_EXPOLINE) &&
+	    !nospec_disable && me->arch.plt_size) {
+		unsigned int *ij;
+
+		ij = me->core_layout.base + me->arch.plt_offset +
+			me->arch.plt_size - PLT_ENTRY_SIZE;
+		if (test_facility(35)) {
+			ij[0] = 0xc6000000;	/* exrl	%r0,.+10	*/
+			ij[1] = 0x0005a7f4;	/* j	.		*/
+			ij[2] = 0x000007f1;	/* br	%r1		*/
+		} else {
+			ij[0] = 0x44000000 | (unsigned int)
+				offsetof(struct lowcore, br_r1_trampoline);
+			ij[1] = 0xa7f40000;	/* j	.		*/
+		}
+	}
+
+	secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+		aseg = (void *) s->sh_addr;
+		secname = secstrings + s->sh_name;
+
+		if (!strcmp(".altinstructions", secname))
+			/* patch .altinstructions */
+			apply_alternatives(aseg, aseg + s->sh_size);
+
+		if (IS_ENABLED(CONFIG_EXPOLINE) &&
+		    (!strncmp(".s390_indirect", secname, 14)))
+			nospec_revert(aseg, aseg + s->sh_size);
+
+		if (IS_ENABLED(CONFIG_EXPOLINE) &&
+		    (!strncmp(".s390_return", secname, 12)))
+			nospec_revert(aseg, aseg + s->sh_size);
+	}
+
 	jump_label_apply_nops(me);
 	return 0;
 }
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
new file mode 100644
index 0000000..d5eed65
--- /dev/null
+++ b/arch/s390/kernel/nospec-branch.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <linux/device.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+static int __init nobp_setup_early(char *str)
+{
+	bool enabled;
+	int rc;
+
+	rc = kstrtobool(str, &enabled);
+	if (rc)
+		return rc;
+	if (enabled && test_facility(82)) {
+		/*
+		 * The user explicitely requested nobp=1, enable it and
+		 * disable the expoline support.
+		 */
+		__set_facility(82, S390_lowcore.alt_stfle_fac_list);
+		if (IS_ENABLED(CONFIG_EXPOLINE))
+			nospec_disable = 1;
+	} else {
+		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+	}
+	return 0;
+}
+early_param("nobp", nobp_setup_early);
+
+static int __init nospec_setup_early(char *str)
+{
+	__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+	return 0;
+}
+early_param("nospec", nospec_setup_early);
+
+static int __init nospec_report(void)
+{
+	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+		pr_info("Spectre V2 mitigation: execute trampolines.\n");
+	if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+		pr_info("Spectre V2 mitigation: limited branch prediction.\n");
+	return 0;
+}
+arch_initcall(nospec_report);
+
+#ifdef CONFIG_EXPOLINE
+
+int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+
+static int __init nospectre_v2_setup_early(char *str)
+{
+	nospec_disable = 1;
+	return 0;
+}
+early_param("nospectre_v2", nospectre_v2_setup_early);
+
+void __init nospec_auto_detect(void)
+{
+	if (IS_ENABLED(CC_USING_EXPOLINE)) {
+		/*
+		 * The kernel has been compiled with expolines.
+		 * Keep expolines enabled and disable nobp.
+		 */
+		nospec_disable = 0;
+		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+	}
+	/*
+	 * If the kernel has not been compiled with expolines the
+	 * nobp setting decides what is done, this depends on the
+	 * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
+	 */
+}
+
+static int __init spectre_v2_setup_early(char *str)
+{
+	if (str && !strncmp(str, "on", 2)) {
+		nospec_disable = 0;
+		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+	}
+	if (str && !strncmp(str, "off", 3))
+		nospec_disable = 1;
+	if (str && !strncmp(str, "auto", 4))
+		nospec_auto_detect();
+	return 0;
+}
+early_param("spectre_v2", spectre_v2_setup_early);
+
+static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+{
+	enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
+	u8 *instr, *thunk, *br;
+	u8 insnbuf[6];
+	s32 *epo;
+
+	/* Second part of the instruction replace is always a nop */
+	for (epo = start; epo < end; epo++) {
+		instr = (u8 *) epo + *epo;
+		if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
+			type = BRCL_EXPOLINE;	/* brcl instruction */
+		else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
+			type = BRASL_EXPOLINE;	/* brasl instruction */
+		else
+			continue;
+		thunk = instr + (*(int *)(instr + 2)) * 2;
+		if (thunk[0] == 0xc6 && thunk[1] == 0x00)
+			/* exrl %r0,<target-br> */
+			br = thunk + (*(int *)(thunk + 2)) * 2;
+		else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
+			 thunk[6] == 0x44 && thunk[7] == 0x00 &&
+			 (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
+			 (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
+			/* larl %rx,<target br> + ex %r0,0(%rx) */
+			br = thunk + (*(int *)(thunk + 2)) * 2;
+		else
+			continue;
+		/* Check for unconditional branch 0x07f? or 0x47f???? */
+		if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
+			continue;
+
+		memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
+		switch (type) {
+		case BRCL_EXPOLINE:
+			insnbuf[0] = br[0];
+			insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+			if (br[0] == 0x47) {
+				/* brcl to b, replace with bc + nopr */
+				insnbuf[2] = br[2];
+				insnbuf[3] = br[3];
+			} else {
+				/* brcl to br, replace with bcr + nop */
+			}
+			break;
+		case BRASL_EXPOLINE:
+			insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+			if (br[0] == 0x47) {
+				/* brasl to b, replace with bas + nopr */
+				insnbuf[0] = 0x4d;
+				insnbuf[2] = br[2];
+				insnbuf[3] = br[3];
+			} else {
+				/* brasl to br, replace with basr + nop */
+				insnbuf[0] = 0x0d;
+			}
+			break;
+		}
+
+		s390_kernel_write(instr, insnbuf, 6);
+	}
+}
+
+void __init_or_module nospec_revert(s32 *start, s32 *end)
+{
+	if (nospec_disable)
+		__nospec_revert(start, end);
+}
+
+extern s32 __nospec_call_start[], __nospec_call_end[];
+extern s32 __nospec_return_start[], __nospec_return_end[];
+void __init nospec_init_branches(void)
+{
+	nospec_revert(__nospec_call_start, __nospec_call_end);
+	nospec_revert(__nospec_return_start, __nospec_return_end);
+}
+
+#endif /* CONFIG_EXPOLINE */
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
new file mode 100644
index 0000000..8affad5
--- /dev/null
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+ssize_t cpu_show_spectre_v1(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+		return sprintf(buf, "Mitigation: execute trampolines\n");
+	if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+		return sprintf(buf, "Mitigation: limited branch prediction\n");
+	return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index fcc634c..96e4fca 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -739,6 +739,10 @@
 	 */
 	rate = 0;
 	if (attr->freq) {
+		if (!attr->sample_freq) {
+			err = -EINVAL;
+			goto out;
+		}
 		rate = freq_to_sample_rate(&si, attr->sample_freq);
 		rate = hw_limit_rate(&si, rate);
 		attr->freq = 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 81d0808..d856263 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -179,3 +179,21 @@
 	.stop	= c_stop,
 	.show	= show_cpuinfo,
 };
+
+int s390_isolate_bp(void)
+{
+	if (!test_facility(82))
+		return -EOPNOTSUPP;
+	set_thread_flag(TIF_ISOLATE_BP);
+	return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp);
+
+int s390_isolate_bp_guest(void)
+{
+	if (!test_facility(82))
+		return -EOPNOTSUPP;
+	set_thread_flag(TIF_ISOLATE_BP_GUEST);
+	return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 89ea8c2..70d635d 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -6,8 +6,11 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
 #include <asm/sigp.h>
 
+	GEN_BR_THUNK %r9
+
 #
 # Issue "store status" for the current CPU to its prefix page
 # and call passed function afterwards
@@ -66,9 +69,9 @@
 	st	%r4,0(%r1)
 	st	%r5,4(%r1)
 	stg	%r2,8(%r1)
-	lgr	%r1,%r2
+	lgr	%r9,%r2
 	lgr	%r2,%r3
-	br	%r1
+	BR_EX	%r9
 
 	.section .bss
 	.align	8
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e974e53..feb9d97 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -63,6 +63,8 @@
 #include <asm/sclp.h>
 #include <asm/sysinfo.h>
 #include <asm/numa.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
 #include "entry.h"
 
 /*
@@ -335,7 +337,9 @@
 	lc->machine_flags = S390_lowcore.machine_flags;
 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
-	       MAX_FACILITY_BIT/8);
+	       sizeof(lc->stfle_fac_list));
+	memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+	       sizeof(lc->alt_stfle_fac_list));
 	if (MACHINE_HAS_VX)
 		lc->vector_save_area_addr =
 			(unsigned long) &lc->vector_save_area;
@@ -372,6 +376,7 @@
 #ifdef CONFIG_SMP
 	lc->spinlock_lockval = arch_spin_lockval(0);
 #endif
+	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
 
 	set_prefix((u32)(unsigned long) lc);
 	lowcore_ptr[0] = lc;
@@ -871,6 +876,9 @@
 	init_mm.end_data = (unsigned long) &_edata;
 	init_mm.brk = (unsigned long) &_end;
 
+	if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
+		nospec_auto_detect();
+
 	parse_early_param();
 #ifdef CONFIG_CRASH_DUMP
 	/* Deactivate elfcorehdr= kernel parameter */
@@ -931,6 +939,10 @@
 	conmode_default();
 	set_preferred_console();
 
+	apply_alternative_instructions();
+	if (IS_ENABLED(CONFIG_EXPOLINE))
+		nospec_init_branches();
+
 	/* Setup zfcpdump support */
 	setup_zfcpdump();
 
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 35531fe..0a31110 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -205,6 +205,7 @@
 	lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
 	lc->cpu_nr = cpu;
 	lc->spinlock_lockval = arch_spin_lockval(cpu);
+	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
 	if (MACHINE_HAS_VX)
 		lc->vector_save_area_addr =
 			(unsigned long) &lc->vector_save_area;
@@ -253,7 +254,9 @@
 	__ctl_store(lc->cregs_save_area, 0, 15);
 	save_access_regs((unsigned int *) lc->access_regs_save_area);
 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
-	       MAX_FACILITY_BIT/8);
+	       sizeof(lc->stfle_fac_list));
+	memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+	       sizeof(lc->alt_stfle_fac_list));
 }
 
 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
@@ -302,6 +305,7 @@
 	mem_assign_absolute(lc->restart_fn, (unsigned long) func);
 	mem_assign_absolute(lc->restart_data, (unsigned long) data);
 	mem_assign_absolute(lc->restart_source, source_cpu);
+	__bpon();
 	asm volatile(
 		"0:	sigp	0,%0,%2	# sigp restart to target cpu\n"
 		"	brc	2,0b	# busy, try again\n"
@@ -875,6 +879,7 @@
 void __noreturn cpu_die(void)
 {
 	idle_task_exit();
+	__bpon();
 	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
 	for (;;) ;
 }
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 2d6b6e8..4e76aaf 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -12,6 +12,7 @@
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
 #include <asm/sigp.h>
 
 /*
@@ -23,6 +24,8 @@
  * (see below) in the resume process.
  * This function runs with disabled interrupts.
  */
+	GEN_BR_THUNK %r14
+
 	.section .text
 ENTRY(swsusp_arch_suspend)
 	stmg	%r6,%r15,__SF_GPRS(%r15)
@@ -102,7 +105,7 @@
 	spx	0x318(%r1)
 	lmg	%r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
 	lghi	%r2,0
-	br	%r14
+	BR_EX	%r14
 
 /*
  * Restore saved memory image to correct place and restore register context.
@@ -200,7 +203,7 @@
 	lghi	%r1,0
 	sam31
 	sigp	%r1,%r0,SIGP_SET_ARCHITECTURE
-	basr	%r14,%r3
+	brasl	%r14,_sclp_print_early
 	larl	%r3,.Ldisabled_wait_31
 	lpsw	0(%r3)
 4:
@@ -266,7 +269,7 @@
 	/* Return 0 */
 	lmg	%r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
 	lghi	%r2,0
-	br	%r14
+	BR_EX	%r14
 
 	.section .data..nosave,"aw",@progbits
 	.align	8
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 66956c0..3d04dfd 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -147,6 +147,15 @@
 	return orig;
 }
 
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+			     struct pt_regs *regs)
+{
+	if (ctx == RP_CHECK_CHAIN_CALL)
+		return user_stack_pointer(regs) <= ret->stack;
+	else
+		return user_stack_pointer(regs) < ret->stack;
+}
+
 /* Instruction Emulation */
 
 static void adjust_psw_addr(psw_t *psw, unsigned long len)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 3667d20..dd96b46 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,8 +31,14 @@
 {
 	. = 0x00000000;
 	.text : {
-	_text = .;		/* Text and read-only data */
+		/* Text and read-only data */
 		HEAD_TEXT
+		/*
+		 * E.g. perf doesn't like symbols starting at address zero,
+		 * therefore skip the initial PSW and channel program located
+		 * at address zero and let _text start at 0x200.
+		 */
+	_text = 0x200;
 		TEXT_TEXT
 		SCHED_TEXT
 		CPUIDLE_TEXT
@@ -93,6 +99,43 @@
 		EXIT_DATA
 	}
 
+	/*
+	 * struct alt_inst entries. From the header (alternative.h):
+	 * "Alternative instructions for different CPU types or capabilities"
+	 * Think locking instructions on spinlocks.
+	 * Note, that it is a part of __init region.
+	 */
+	. = ALIGN(8);
+	.altinstructions : {
+		__alt_instructions = .;
+		*(.altinstructions)
+		__alt_instructions_end = .;
+	}
+
+	/*
+	 * And here are the replacement instructions. The linker sticks
+	 * them as binary blobs. The .altinstructions has enough data to
+	 * get the address and the length of them to patch the kernel safely.
+	 * Note, that it is a part of __init region.
+	 */
+	.altinstr_replacement : {
+		*(.altinstr_replacement)
+	}
+
+	/*
+	 * Table with the patch locations to undo expolines
+	*/
+	.nospec_call_table : {
+		__nospec_call_start = . ;
+		*(.s390_indirect*)
+		__nospec_call_end = . ;
+	}
+	.nospec_return_table : {
+		__nospec_return_start = . ;
+		*(.s390_return*)
+		__nospec_return_end = . ;
+	}
+
 	/* early.c uses stsi, which requires page aligned data. */
 	. = ALIGN(PAGE_SIZE);
 	INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index a70ff09..2032ab8 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -401,6 +401,9 @@
 	case KVM_CAP_S390_RI:
 		r = test_facility(64);
 		break;
+	case KVM_CAP_S390_BPB:
+		r = test_facility(82);
+		break;
 	default:
 		r = 0;
 	}
@@ -1713,6 +1716,8 @@
 	kvm_s390_set_prefix(vcpu, 0);
 	if (test_kvm_facility(vcpu->kvm, 64))
 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+	if (test_kvm_facility(vcpu->kvm, 82))
+		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
 	 */
@@ -1829,7 +1834,6 @@
 	if (test_fp_ctl(current->thread.fpu.fpc))
 		/* User space provided an invalid FPC, let's clear it */
 		current->thread.fpu.fpc = 0;
-
 	save_access_regs(vcpu->arch.host_acrs);
 	restore_access_regs(vcpu->run->s.regs.acrs);
 	gmap_enable(vcpu->arch.enabled_gmap);
@@ -1877,6 +1881,7 @@
 	current->thread.fpu.fpc = 0;
 	vcpu->arch.sie_block->gbea = 1;
 	vcpu->arch.sie_block->pp = 0;
+	vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
 	kvm_clear_async_pf_completion_queue(vcpu);
 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -2744,6 +2749,11 @@
 		if (riccb->valid)
 			vcpu->arch.sie_block->ecb3 |= 0x01;
 	}
+	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+	    test_kvm_facility(vcpu->kvm, 82)) {
+		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+	}
 
 	kvm_run->kvm_dirty_regs = 0;
 }
@@ -2762,6 +2772,7 @@
 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
 }
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index d8673e2..51f842c 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -217,6 +217,12 @@
 	memcpy(scb_o->gcr, scb_s->gcr, 128);
 	scb_o->pp = scb_s->pp;
 
+	/* branch prediction */
+	if (test_kvm_facility(vcpu->kvm, 82)) {
+		scb_o->fpf &= ~FPF_BPBC;
+		scb_o->fpf |= scb_s->fpf & FPF_BPBC;
+	}
+
 	/* interrupt intercept */
 	switch (scb_s->icptcode) {
 	case ICPT_PROGI:
@@ -259,6 +265,7 @@
 	scb_s->ecb3 = 0;
 	scb_s->ecd = 0;
 	scb_s->fac = 0;
+	scb_s->fpf = 0;
 
 	rc = prepare_cpuflags(vcpu, vsie_page);
 	if (rc)
@@ -316,6 +323,9 @@
 			prefix_unmapped(vsie_page);
 		scb_s->ecb |= scb_o->ecb & 0x10U;
 	}
+	/* branch prediction */
+	if (test_kvm_facility(vcpu->kvm, 82))
+		scb_s->fpf |= scb_o->fpf & FPF_BPBC;
 	/* SIMD */
 	if (test_kvm_facility(vcpu->kvm, 129)) {
 		scb_s->eca |= scb_o->eca & 0x00020000U;
@@ -539,7 +549,7 @@
 
 	gpa = scb_o->itdba & ~0xffUL;
 	if (gpa && (scb_s->ecb & 0x10U)) {
-		if (!(gpa & ~0x1fffU)) {
+		if (!(gpa & ~0x1fffUL)) {
 			rc = set_validity_icpt(scb_s, 0x0080U);
 			goto unpin;
 		}
@@ -754,6 +764,7 @@
 {
 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+	int guest_bp_isolation;
 	int rc;
 
 	handle_last_fault(vcpu, vsie_page);
@@ -764,6 +775,20 @@
 		s390_handle_mcck();
 
 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
+	/* save current guest state of bp isolation override */
+	guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
+
+	/*
+	 * The guest is running with BPBC, so we have to force it on for our
+	 * nested guest. This is done by enabling BPBC globally, so the BPBC
+	 * control in the SCB (which the nested guest can modify) is simply
+	 * ignored.
+	 */
+	if (test_kvm_facility(vcpu->kvm, 82) &&
+	    vcpu->arch.sie_block->fpf & FPF_BPBC)
+		set_thread_flag(TIF_ISOLATE_BP_GUEST);
+
 	local_irq_disable();
 	guest_enter_irqoff();
 	local_irq_enable();
@@ -773,6 +798,11 @@
 	local_irq_disable();
 	guest_exit_irqoff();
 	local_irq_enable();
+
+	/* restore guest state for bp isolation override */
+	if (!guest_bp_isolation)
+		clear_thread_flag(TIF_ISOLATE_BP_GUEST);
+
 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 
 	if (rc > 0)
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index be9fa65..e7672ed 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -6,6 +6,9 @@
 
 #include <linux/linkage.h>
 #include <asm/export.h>
+#include <asm/nospec-insn.h>
+
+	GEN_BR_THUNK %r14
 
 /*
  * memset implementation
@@ -39,7 +42,7 @@
 .Lmemset_clear_rest:
 	larl	%r3,.Lmemset_xc
 	ex	%r4,0(%r3)
-	br	%r14
+	BR_EX	%r14
 .Lmemset_fill:
 	stc	%r3,0(%r2)
 	cghi	%r4,1
@@ -56,7 +59,7 @@
 .Lmemset_fill_rest:
 	larl	%r3,.Lmemset_mvc
 	ex	%r4,0(%r3)
-	br	%r14
+	BR_EX	%r14
 .Lmemset_xc:
 	xc	0(1,%r1),0(%r1)
 .Lmemset_mvc:
@@ -79,7 +82,7 @@
 .Lmemcpy_rest:
 	larl	%r5,.Lmemcpy_mvc
 	ex	%r4,0(%r5)
-	br	%r14
+	BR_EX	%r14
 .Lmemcpy_loop:
 	mvc	0(256,%r1),0(%r3)
 	la	%r1,256(%r1)
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
index a1c917d..fa716f2 100644
--- a/arch/s390/net/bpf_jit.S
+++ b/arch/s390/net/bpf_jit.S
@@ -8,6 +8,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-insn.h>
 #include "bpf_jit.h"
 
 /*
@@ -53,7 +54,7 @@
 	clg	%r3,STK_OFF_HLEN(%r15);	/* Offset + SIZE > hlen? */	\
 	jh	sk_load_##NAME##_slow;					\
 	LOAD	%r14,-SIZE(%r3,%r12);	/* Get data from skb */		\
-	b	OFF_OK(%r6);		/* Return */			\
+	B_EX	OFF_OK,%r6;		/* Return */			\
 									\
 sk_load_##NAME##_slow:;							\
 	lgr	%r2,%r7;		/* Arg1 = skb pointer */	\
@@ -63,11 +64,14 @@
 	brasl	%r14,skb_copy_bits;	/* Get data from skb */		\
 	LOAD	%r14,STK_OFF_TMP(%r15);	/* Load from temp bufffer */	\
 	ltgr	%r2,%r2;		/* Set cc to (%r2 != 0) */	\
-	br	%r6;			/* Return */
+	BR_EX	%r6;			/* Return */
 
 sk_load_common(word, 4, llgf)	/* r14 = *(u32 *) (skb->data+offset) */
 sk_load_common(half, 2, llgh)	/* r14 = *(u16 *) (skb->data+offset) */
 
+	GEN_BR_THUNK %r6
+	GEN_B_THUNK OFF_OK,%r6
+
 /*
  * Load 1 byte from SKB (optimized version)
  */
@@ -79,7 +83,7 @@
 	clg	%r3,STK_OFF_HLEN(%r15)	# Offset >= hlen?
 	jnl	sk_load_byte_slow
 	llgc	%r14,0(%r3,%r12)	# Get byte from skb
-	b	OFF_OK(%r6)		# Return OK
+	B_EX	OFF_OK,%r6		# Return OK
 
 sk_load_byte_slow:
 	lgr	%r2,%r7			# Arg1 = skb pointer
@@ -89,7 +93,7 @@
 	brasl	%r14,skb_copy_bits	# Get data from skb
 	llgc	%r14,STK_OFF_TMP(%r15)	# Load result from temp buffer
 	ltgr	%r2,%r2			# Set cc to (%r2 != 0)
-	br	%r6			# Return cc
+	BR_EX	%r6			# Return cc
 
 #define sk_negative_common(NAME, SIZE, LOAD)				\
 sk_load_##NAME##_slow_neg:;						\
@@ -103,7 +107,7 @@
 	jz	bpf_error;						\
 	LOAD	%r14,0(%r2);		/* Get data from pointer */	\
 	xr	%r3,%r3;		/* Set cc to zero */		\
-	br	%r6;			/* Return cc */
+	BR_EX	%r6;			/* Return cc */
 
 sk_negative_common(word, 4, llgf)
 sk_negative_common(half, 2, llgh)
@@ -112,4 +116,4 @@
 bpf_error:
 # force a return 0 from jit handler
 	ltgr	%r15,%r15	# Set condition code
-	br	%r6
+	BR_EX	%r6
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e8dee62..e7ce257 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -24,6 +24,8 @@
 #include <linux/bpf.h>
 #include <asm/cacheflush.h>
 #include <asm/dis.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
 #include "bpf_jit.h"
 
 int bpf_jit_enable __read_mostly;
@@ -41,6 +43,8 @@
 	int base_ip;		/* Base address for literal pool */
 	int ret0_ip;		/* Address of return 0 */
 	int exit_ip;		/* Address of exit */
+	int r1_thunk_ip;	/* Address of expoline thunk for 'br %r1' */
+	int r14_thunk_ip;	/* Address of expoline thunk for 'br %r14' */
 	int tail_call_start;	/* Tail call start offset */
 	int labels[1];		/* Labels for local jumps */
 };
@@ -251,6 +255,19 @@
 	REG_SET_SEEN(b2);					\
 })
 
+#define EMIT6_PCREL_RILB(op, b, target)				\
+({								\
+	int rel = (target - jit->prg) / 2;			\
+	_EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff);	\
+	REG_SET_SEEN(b);					\
+})
+
+#define EMIT6_PCREL_RIL(op, target)				\
+({								\
+	int rel = (target - jit->prg) / 2;			\
+	_EMIT6(op | rel >> 16, rel & 0xffff);			\
+})
+
 #define _EMIT6_IMM(op, imm)					\
 ({								\
 	unsigned int __imm = (imm);				\
@@ -470,8 +487,45 @@
 	EMIT4(0xb9040000, REG_2, BPF_REG_0);
 	/* Restore registers */
 	save_restore_regs(jit, REGS_RESTORE);
+	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+		jit->r14_thunk_ip = jit->prg;
+		/* Generate __s390_indirect_jump_r14 thunk */
+		if (test_facility(35)) {
+			/* exrl %r0,.+10 */
+			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
+		} else {
+			/* larl %r1,.+14 */
+			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
+			/* ex 0,0(%r1) */
+			EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
+		}
+		/* j . */
+		EMIT4_PCREL(0xa7f40000, 0);
+	}
 	/* br %r14 */
 	_EMIT2(0x07fe);
+
+	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
+	    (jit->seen & SEEN_FUNC)) {
+		jit->r1_thunk_ip = jit->prg;
+		/* Generate __s390_indirect_jump_r1 thunk */
+		if (test_facility(35)) {
+			/* exrl %r0,.+10 */
+			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
+			/* j . */
+			EMIT4_PCREL(0xa7f40000, 0);
+			/* br %r1 */
+			_EMIT2(0x07f1);
+		} else {
+			/* larl %r1,.+14 */
+			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
+			/* ex 0,S390_lowcore.br_r1_tampoline */
+			EMIT4_DISP(0x44000000, REG_0, REG_0,
+				   offsetof(struct lowcore, br_r1_trampoline));
+			/* j . */
+			EMIT4_PCREL(0xa7f40000, 0);
+		}
+	}
 }
 
 /*
@@ -977,8 +1031,13 @@
 		/* lg %w1,<d(imm)>(%l) */
 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
 			      EMIT_CONST_U64(func));
-		/* basr %r14,%w1 */
-		EMIT2(0x0d00, REG_14, REG_W1);
+		if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+			/* brasl %r14,__s390_indirect_jump_r1 */
+			EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
+		} else {
+			/* basr %r14,%w1 */
+			EMIT2(0x0d00, REG_14, REG_W1);
+		}
 		/* lgr %b0,%r2: load return value into %b0 */
 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
 		if (bpf_helper_changes_skb_data((void *)func)) {
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index d007874..8f8cf94 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -27,21 +27,12 @@
 	return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
 }
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	u32 oparg = (encoded_op << 8) >> 20;
-	u32 cmparg = (encoded_op << 20) >> 20;
 	u32 oldval, newval, prev;
 	int ret;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 	pagefault_disable();
 
 	do {
@@ -80,17 +71,8 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = ((int)oldval < (int)cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = ((int)oldval >= (int)cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = ((int)oldval <= (int)cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = ((int)oldval > (int)cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
 
 	return ret;
 }
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index c001f78..28cc612 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -255,7 +255,7 @@
 	mov.l	@r8, r8
 	jsr	@r8
 	 nop
-	bra	__restore_all
+	bra	ret_from_exception
 	 nop
 	CFI_ENDPROC
 
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 24827a3..89d299c 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -82,7 +82,11 @@
 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
 
 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+static inline int atomic_xchg(atomic_t *v, int new)
+{
+	return xchg(&v->counter, new);
+}
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h
index 4e899b0..1cfd89d 100644
--- a/arch/sparc/include/asm/futex_64.h
+++ b/arch/sparc/include/asm/futex_64.h
@@ -29,22 +29,14 @@
 	: "r" (uaddr), "r" (oparg), "i" (-EFAULT)	\
 	: "memory")
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret, tem;
 
-	if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
-		return -EFAULT;
 	if (unlikely((((unsigned long) uaddr) & 0x3UL)))
 		return -EINVAL;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
 	pagefault_disable();
 
 	switch (op) {
@@ -69,17 +61,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index b6802b9..81ad06a 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -952,7 +952,7 @@
 			  pmd_t *pmd);
 
 #define __HAVE_ARCH_PMDP_INVALIDATE
-extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 			    pmd_t *pmdp);
 
 #define __HAVE_ARCH_PGTABLE_DEPOSIT
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 59d5038..9cc600b 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1733,9 +1733,14 @@
 
 		lp->rcv_nxt = p->seqid;
 
+		/*
+		 * If this is a control-only packet, there is nothing
+		 * else to do but advance the rx queue since the packet
+		 * was already processed above.
+		 */
 		if (!(p->type & LDC_DATA)) {
 			new = rx_advance(lp, new);
-			goto no_data;
+			break;
 		}
 		if (p->stype & (LDC_ACK | LDC_NACK)) {
 			err = data_ack_nack(lp, p);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index c56a195..b2722ed 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -219,17 +219,28 @@
 	}
 }
 
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+		unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+	pmd_t old;
+
+	do {
+		old = *pmdp;
+	} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
+
+	return old;
+}
+
 /*
  * This routine is only called when splitting a THP
  */
-void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 		     pmd_t *pmdp)
 {
-	pmd_t entry = *pmdp;
+	pmd_t old, entry;
 
-	pmd_val(entry) &= ~_PAGE_VALID;
-
-	set_pmd_at(vma->vm_mm, address, pmdp, entry);
+	entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
+	old = pmdp_establish(vma, address, pmdp, entry);
 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 
 	/*
@@ -240,6 +251,8 @@
 	if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
 	    !is_huge_zero_page(pmd_page(entry)))
 		(vma->vm_mm)->context.thp_pte_count--;
+
+	return old;
 }
 
 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index e64a1b7..83c1e63 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -106,12 +106,9 @@
 	lock = __atomic_hashed_lock((int __force *)uaddr)
 #endif
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int uninitialized_var(val), ret;
 
 	__futex_prolog();
@@ -119,12 +116,6 @@
 	/* The 32-bit futex code makes this assumption, so validate it here. */
 	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 	pagefault_disable();
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -148,30 +139,9 @@
 	}
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ:
-			ret = (val == cmparg);
-			break;
-		case FUTEX_OP_CMP_NE:
-			ret = (val != cmparg);
-			break;
-		case FUTEX_OP_CMP_LT:
-			ret = (val < cmparg);
-			break;
-		case FUTEX_OP_CMP_GE:
-			ret = (val >= cmparg);
-			break;
-		case FUTEX_OP_CMP_LE:
-			ret = (val <= cmparg);
-			break;
-		case FUTEX_OP_CMP_GT:
-			ret = (val > cmparg);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = val;
+
 	return ret;
 }
 
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index 2db18cb..c019709 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -12,6 +12,7 @@
 #include <sys/mount.h>
 #include <sys/socket.h>
 #include <sys/stat.h>
+#include <sys/sysmacros.h>
 #include <sys/un.h>
 #include <sys/types.h>
 #include <os.h>
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index a86d7cc..bf0acb8 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -16,6 +16,7 @@
 #include <os.h>
 #include <sysdep/mcontext.h>
 #include <um_malloc.h>
+#include <sys/ucontext.h>
 
 void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
 	[SIGTRAP]	= relay_signal,
@@ -159,7 +160,7 @@
 
 static void hard_handler(int sig, siginfo_t *si, void *p)
 {
-	struct ucontext *uc = p;
+	ucontext_t *uc = p;
 	mcontext_t *mc = &uc->uc_mcontext;
 	unsigned long pending = 1UL << sig;
 
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index cc69e37..c0ad1bb 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -330,7 +330,8 @@
 	if (status != EFI_SUCCESS)
 		goto free_struct;
 
-	memcpy(rom->romdata, pci->romimage, pci->romsize);
+	memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
+	       pci->romsize);
 	return status;
 
 free_struct:
@@ -436,7 +437,8 @@
 	if (status != EFI_SUCCESS)
 		goto free_struct;
 
-	memcpy(rom->romdata, pci->romimage, pci->romsize);
+	memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
+	       pci->romsize);
 	return status;
 
 free_struct:
diff --git a/arch/x86/boot/compressed/error.h b/arch/x86/boot/compressed/error.h
index 2e59dac..d732e60 100644
--- a/arch/x86/boot/compressed/error.h
+++ b/arch/x86/boot/compressed/error.h
@@ -1,7 +1,9 @@
 #ifndef BOOT_COMPRESSED_ERROR_H
 #define BOOT_COMPRESSED_ERROR_H
 
+#include <linux/compiler.h>
+
 void warn(char *m);
-void error(char *m);
+void error(char *m) __noreturn;
 
 #endif /* BOOT_COMPRESSED_ERROR_H */
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
new file mode 100644
index 0000000..308aac3
--- /dev/null
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -0,0 +1,464 @@
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_FHANDLE is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_PCSPKR_PLATFORM is not set
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SMP=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+CONFIG_PARAVIRT_SPINLOCKS=y
+CONFIG_MCORE2=y
+CONFIG_PROCESSOR_SELECT=y
+# CONFIG_CPU_SUP_CENTAUR is not set
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+# CONFIG_MICROCODE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_TRANSPARENT_HUGEPAGE=y
+# CONFIG_MTRR is not set
+CONFIG_HZ_100=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_PHYSICAL_START=0x200000
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_PHYSICAL_ALIGN=0x1000000
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0 reboot=p nopti"
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_ACPI_PROCFS_POWER=y
+# CONFIG_ACPI_FAN is not set
+# CONFIG_ACPI_THERMAL is not set
+# CONFIG_X86_PM_TIMER is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_X86_ACPI_CPUFREQ=y
+# CONFIG_X86_ACPI_CPUFREQ_CPB is not set
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_MSI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_IA32_EMULATION=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_RFKILL=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEBUG_DEVRES=y
+CONFIG_OF=y
+CONFIG_OF_UNITTEST=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+# CONFIG_ETHERNET is not set
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+# CONFIG_WLAN_VENDOR_ATH is not set
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+CONFIG_MAC80211_HWSIM=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=48
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_INTEL is not set
+# CONFIG_HW_RANDOM_AMD is not set
+# CONFIG_HW_RANDOM_VIA is not set
+CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_HPET=y
+# CONFIG_HPET_MMAP_DEFAULT is not set
+# CONFIG_DEVPORT is not set
+# CONFIG_ACPI_I2C_OPREGION is not set
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_PTP_1588_CLOCK=y
+# CONFIG_HWMON is not set
+# CONFIG_X86_PKG_TEMP_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_SOFT_WATCHDOG=y
+CONFIG_MEDIA_SUPPORT=y
+# CONFIG_DVB_TUNER_DIB0070 is not set
+# CONFIG_DVB_TUNER_DIB0090 is not set
+# CONFIG_VGA_ARB is not set
+CONFIG_DRM=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_FB=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_DUMMY_HCD=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_SW_SYNC=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_VSOC=y
+CONFIG_ION=y
+# CONFIG_X86_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+# CONFIG_FIRMWARE_MEMMAP is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_SDCARD_FS=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_UPROBE_EVENT=y
+CONFIG_IO_DELAY_NONE=y
+CONFIG_DEBUG_BOOT_PARAMS=y
+CONFIG_OPTIMIZE_INLINING=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PATH=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SHA512=y
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 34b3fa2..9e32d40 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -2,6 +2,8 @@
 # Arch-specific CryptoAPI modules.
 #
 
+OBJECT_FILES_NON_STANDARD := y
+
 avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
 avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
 				$(comma)4)$(comma)%ymm2,yes,no)
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index 8648158..f8fe11d 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -66,8 +66,6 @@
 	void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
 	int err;
 
-	fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
-
 	err = blkcipher_walk_virt(desc, walk);
 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
@@ -79,6 +77,7 @@
 
 		/* Process multi-block batch */
 		if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
+			fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
 			do {
 				fn(ctx, wdst, wsrc);
 
diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
index 2f87563..2e14acc 100644
--- a/arch/x86/crypto/sha1-mb/Makefile
+++ b/arch/x86/crypto/sha1-mb/Makefile
@@ -2,6 +2,8 @@
 # Arch-specific CryptoAPI modules.
 #
 
+OBJECT_FILES_NON_STANDARD := y
+
 avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
                                 $(comma)4)$(comma)%ymm2,yes,no)
 ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
index 41089e7..45b4fca 100644
--- a/arch/x86/crypto/sha256-mb/Makefile
+++ b/arch/x86/crypto/sha256-mb/Makefile
@@ -2,6 +2,8 @@
 # Arch-specific CryptoAPI modules.
 #
 
+OBJECT_FILES_NON_STANDARD := y
+
 avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
                                 $(comma)4)$(comma)%ymm2,yes,no)
 ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index d540966..51a858e 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -171,7 +171,8 @@
 		 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
 VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
-	$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+	$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) \
+	$(filter --target=% --gcc-toolchain=%,$(KBUILD_CFLAGS))
 GCOV_PROFILE := n
 
 #
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index f73796d..655a65e 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -26,6 +26,7 @@
 #include <linux/cpu.h>
 #include <linux/bitops.h>
 #include <linux/device.h>
+#include <linux/nospec.h>
 
 #include <asm/apic.h>
 #include <asm/stacktrace.h>
@@ -303,17 +304,20 @@
 
 	config = attr->config;
 
-	cache_type = (config >>  0) & 0xff;
+	cache_type = (config >> 0) & 0xff;
 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
 		return -EINVAL;
+	cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
 
 	cache_op = (config >>  8) & 0xff;
 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
 		return -EINVAL;
+	cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
 
 	cache_result = (config >> 16) & 0xff;
 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
 		return -EINVAL;
+	cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
 
 	val = hw_cache_event_ids[cache_type][cache_op][cache_result];
 
@@ -420,6 +424,8 @@
 	if (attr->config >= x86_pmu.max_events)
 		return -EINVAL;
 
+	attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
+
 	/*
 	 * The generic map:
 	 */
@@ -1149,16 +1155,13 @@
 
 	per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
 
-	if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
-	    local64_read(&hwc->prev_count) != (u64)-left) {
-		/*
-		 * The hw event starts counting from this event offset,
-		 * mark it to be able to extra future deltas:
-		 */
-		local64_set(&hwc->prev_count, (u64)-left);
+	/*
+	 * The hw event starts counting from this event offset,
+	 * mark it to be able to extra future deltas:
+	 */
+	local64_set(&hwc->prev_count, (u64)-left);
 
-		wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
-	}
+	wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
 
 	/*
 	 * Due to erratum on certan cpu we need
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 6f353a8..8150393 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2066,16 +2066,23 @@
 	int bit, loops;
 	u64 status;
 	int handled;
+	int pmu_enabled;
 
 	cpuc = this_cpu_ptr(&cpu_hw_events);
 
 	/*
+	 * Save the PMU state.
+	 * It needs to be restored when leaving the handler.
+	 */
+	pmu_enabled = cpuc->enabled;
+	/*
 	 * No known reason to not always do late ACK,
 	 * but just in case do it opt-in.
 	 */
 	if (!x86_pmu.late_ack)
 		apic_write(APIC_LVTPC, APIC_DM_NMI);
 	intel_bts_disable_local();
+	cpuc->enabled = 0;
 	__intel_pmu_disable_all();
 	handled = intel_pmu_drain_bts_buffer();
 	handled += intel_bts_interrupt();
@@ -2173,7 +2180,8 @@
 
 done:
 	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
-	if (cpuc->enabled)
+	cpuc->enabled = pmu_enabled;
+	if (pmu_enabled)
 		__intel_pmu_enable_all(0, true);
 	intel_bts_enable_local();
 
@@ -3019,7 +3027,7 @@
  * Therefore the effective (average) period matches the requested period,
  * despite coarser hardware granularity.
  */
-static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
+static u64 bdw_limit_period(struct perf_event *event, u64 left)
 {
 	if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
 			X86_CONFIG(.event=0xc0, .umask=0x01)) {
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 1076c9a..47d526c 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -90,6 +90,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/perf_event.h>
+#include <linux/nospec.h>
 #include <asm/cpu_device_id.h>
 #include <asm/intel-family.h>
 #include "../perf_event.h"
@@ -300,6 +301,7 @@
 	} else if (event->pmu == &cstate_pkg_pmu) {
 		if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
 			return -EINVAL;
+		cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
 		if (!pkg_msr[cfg].attr)
 			return -EINVAL;
 		event->hw.event_base = pkg_msr[cfg].msr;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8e7a3f1..f26e26e 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1110,6 +1110,7 @@
 	if (pebs == NULL)
 		return;
 
+	regs->flags &= ~PERF_EFLAGS_EXACT;
 	sample_type = event->attr.sample_type;
 	dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
 
@@ -1154,7 +1155,6 @@
 	 */
 	*regs = *iregs;
 	regs->flags = pebs->flags;
-	set_linear_ip(regs, pebs->ip);
 
 	if (sample_type & PERF_SAMPLE_REGS_INTR) {
 		regs->ax = pebs->ax;
@@ -1190,13 +1190,22 @@
 #endif
 	}
 
-	if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
-		regs->ip = pebs->real_ip;
-		regs->flags |= PERF_EFLAGS_EXACT;
-	} else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
-		regs->flags |= PERF_EFLAGS_EXACT;
-	else
-		regs->flags &= ~PERF_EFLAGS_EXACT;
+	if (event->attr.precise_ip > 1) {
+		/* Haswell and later have the eventing IP, so use it: */
+		if (x86_pmu.intel_cap.pebs_format >= 2) {
+			set_linear_ip(regs, pebs->real_ip);
+			regs->flags |= PERF_EFLAGS_EXACT;
+		} else {
+			/* Otherwise use PEBS off-by-1 IP: */
+			set_linear_ip(regs, pebs->ip);
+
+			/* ... and try to fix it up using the LBR entries: */
+			if (intel_pmu_pebs_fixup_ip(regs))
+				regs->flags |= PERF_EFLAGS_EXACT;
+		}
+	} else
+		set_linear_ip(regs, pebs->ip);
+
 
 	if ((sample_type & PERF_SAMPLE_ADDR) &&
 	    x86_pmu.intel_cap.pebs_format >= 1)
@@ -1263,17 +1272,84 @@
 	return NULL;
 }
 
+/*
+ * Special variant of intel_pmu_save_and_restart() for auto-reload.
+ */
+static int
+intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int shift = 64 - x86_pmu.cntval_bits;
+	u64 period = hwc->sample_period;
+	u64 prev_raw_count, new_raw_count;
+	s64 new, old;
+
+	WARN_ON(!period);
+
+	/*
+	 * drain_pebs() only happens when the PMU is disabled.
+	 */
+	WARN_ON(this_cpu_read(cpu_hw_events.enabled));
+
+	prev_raw_count = local64_read(&hwc->prev_count);
+	rdpmcl(hwc->event_base_rdpmc, new_raw_count);
+	local64_set(&hwc->prev_count, new_raw_count);
+
+	/*
+	 * Since the counter increments a negative counter value and
+	 * overflows on the sign switch, giving the interval:
+	 *
+	 *   [-period, 0]
+	 *
+	 * the difference between two consequtive reads is:
+	 *
+	 *   A) value2 - value1;
+	 *      when no overflows have happened in between,
+	 *
+	 *   B) (0 - value1) + (value2 - (-period));
+	 *      when one overflow happened in between,
+	 *
+	 *   C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
+	 *      when @n overflows happened in between.
+	 *
+	 * Here A) is the obvious difference, B) is the extension to the
+	 * discrete interval, where the first term is to the top of the
+	 * interval and the second term is from the bottom of the next
+	 * interval and C) the extension to multiple intervals, where the
+	 * middle term is the whole intervals covered.
+	 *
+	 * An equivalent of C, by reduction, is:
+	 *
+	 *   value2 - value1 + n * period
+	 */
+	new = ((s64)(new_raw_count << shift) >> shift);
+	old = ((s64)(prev_raw_count << shift) >> shift);
+	local64_add(new - old + count * period, &event->count);
+
+	perf_event_update_userpage(event);
+
+	return 0;
+}
+
 static void __intel_pmu_pebs_event(struct perf_event *event,
 				   struct pt_regs *iregs,
 				   void *base, void *top,
 				   int bit, int count)
 {
+	struct hw_perf_event *hwc = &event->hw;
 	struct perf_sample_data data;
 	struct pt_regs regs;
 	void *at = get_next_pebs_record_by_bit(base, top, bit);
 
-	if (!intel_pmu_save_and_restart(event) &&
-	    !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
+	if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
+		/*
+		 * Now, auto-reload is only enabled in fixed period mode.
+		 * The reload value is always hwc->sample_period.
+		 * May need to change it, if auto-reload is enabled in
+		 * freq mode later.
+		 */
+		intel_pmu_save_and_restart_reload(event, count);
+	} else if (!intel_pmu_save_and_restart(event))
 		return;
 
 	while (count > 1) {
@@ -1325,8 +1401,11 @@
 		return;
 
 	n = top - at;
-	if (n <= 0)
+	if (n <= 0) {
+		if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+			intel_pmu_save_and_restart_reload(event, 0);
 		return;
+	}
 
 	__intel_pmu_pebs_event(event, iregs, at, top, 0, n);
 }
@@ -1349,8 +1428,22 @@
 
 	ds->pebs_index = ds->pebs_buffer_base;
 
-	if (unlikely(base >= top))
+	if (unlikely(base >= top)) {
+		/*
+		 * The drain_pebs() could be called twice in a short period
+		 * for auto-reload event in pmu::read(). There are no
+		 * overflows have happened in between.
+		 * It needs to call intel_pmu_save_and_restart_reload() to
+		 * update the event->count for this case.
+		 */
+		for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
+				 x86_pmu.max_pebs_events) {
+			event = cpuc->events[bit];
+			if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+				intel_pmu_save_and_restart_reload(event, 0);
+		}
 		return;
+	}
 
 	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
 		struct pebs_record_nhm *p = at;
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 4bb3ec6..be0b196 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -1,4 +1,5 @@
 #include <linux/perf_event.h>
+#include <linux/nospec.h>
 #include <asm/intel-family.h>
 
 enum perf_msr_id {
@@ -136,9 +137,6 @@
 	if (event->attr.type != event->pmu->type)
 		return -ENOENT;
 
-	if (cfg >= PERF_MSR_EVENT_MAX)
-		return -EINVAL;
-
 	/* unsupported modes and filters */
 	if (event->attr.exclude_user   ||
 	    event->attr.exclude_kernel ||
@@ -149,6 +147,11 @@
 	    event->attr.sample_period) /* no sampling */
 		return -EINVAL;
 
+	if (cfg >= PERF_MSR_EVENT_MAX)
+		return -EINVAL;
+
+	cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
+
 	if (!msr[cfg].attr)
 		return -EINVAL;
 
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index bcbb1d2..f356317 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -548,7 +548,7 @@
 	struct x86_pmu_quirk *quirks;
 	int		perfctr_second_write;
 	bool		late_ack;
-	unsigned	(*limit_period)(struct perf_event *event, unsigned l);
+	u64		(*limit_period)(struct perf_event *event, u64 l);
 
 	/*
 	 * sysfs attrs
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index a248531..c278f27 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -197,6 +197,9 @@
 #define X86_FEATURE_RETPOLINE	( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
 
+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD	( 7*32+17) /* Speculative Store Bypass Disable */
+
 #define X86_FEATURE_RSB_CTXSW	( 7*32+19) /* "" Fill RSB on context switches */
 
 /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
@@ -204,6 +207,13 @@
 
 #define X86_FEATURE_USE_IBPB	( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
 #define X86_FEATURE_USE_IBRS_FW	( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD	( 7*32+24) /* "" AMD SSBD implementation */
+#define X86_FEATURE_IBRS	( 7*32+25) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB	( 7*32+26) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP	( 7*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN		( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
@@ -261,9 +271,10 @@
 /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
 #define X86_FEATURE_CLZERO	(13*32+0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF	(13*32+1) /* Instructions Retired Count */
-#define X86_FEATURE_IBPB	(13*32+12) /* Indirect Branch Prediction Barrier */
-#define X86_FEATURE_IBRS	(13*32+14) /* Indirect Branch Restricted Speculation */
-#define X86_FEATURE_STIBP	(13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_IBPB	(13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS	(13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP	(13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD	(13*32+25) /* Virtualized Speculative Store Bypass Disable */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
 #define X86_FEATURE_DTHERM	(14*32+ 0) /* Digital Thermal Sensor */
@@ -299,6 +310,7 @@
 #define X86_FEATURE_SUCCOR	(17*32+1) /* Uncorrectable error containment and recovery */
 #define X86_FEATURE_SMCA	(17*32+3) /* Scalable MCA */
 
+
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
 #define X86_FEATURE_AVX512_4VNNIW	(18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
@@ -306,6 +318,7 @@
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_SPEC_CTRL_SSBD	(18*32+31) /* "" Speculative Store Bypass Disable */
 
 /*
  * BUG word(s)
@@ -335,5 +348,6 @@
 #define X86_BUG_CPU_MELTDOWN	X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
 #define X86_BUG_SPECTRE_V1	X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
 #define X86_BUG_SPECTRE_V2	X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index b4c1f54..f4dc9b6 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -41,20 +41,11 @@
 		       "+m" (*uaddr), "=&r" (tem)		\
 		     : "r" (oparg), "i" (-EFAULT), "1" (0))
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret, tem;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 	pagefault_disable();
 
 	switch (op) {
@@ -80,30 +71,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ:
-			ret = (oldval == cmparg);
-			break;
-		case FUTEX_OP_CMP_NE:
-			ret = (oldval != cmparg);
-			break;
-		case FUTEX_OP_CMP_LT:
-			ret = (oldval < cmparg);
-			break;
-		case FUTEX_OP_CMP_GE:
-			ret = (oldval >= cmparg);
-			break;
-		case FUTEX_OP_CMP_LE:
-			ret = (oldval <= cmparg);
-			break;
-		case FUTEX_OP_CMP_GT:
-			ret = (oldval > cmparg);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 39bcefc..bb07878 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -68,6 +68,11 @@
 extern struct legacy_pic *legacy_pic;
 extern struct legacy_pic null_legacy_pic;
 
+static inline bool has_legacy_pic(void)
+{
+	return legacy_pic != &null_legacy_pic;
+}
+
 static inline int nr_legacy_irqs(void)
 {
 	return legacy_pic->nr_legacy_irqs;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 20cfeeb..7598a6c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -864,7 +864,7 @@
 	int (*hardware_setup)(void);               /* __init */
 	void (*hardware_unsetup)(void);            /* __exit */
 	bool (*cpu_has_accelerated_tpr)(void);
-	bool (*cpu_has_high_real_mode_segbase)(void);
+	bool (*has_emulated_msr)(int index);
 	void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
 	int (*vm_init)(struct kvm *kvm);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 5a295bb..7336508 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -113,7 +113,7 @@
 
 	#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
-		/* pkey 0 is the default and always allocated */
+		/* pkey 0 is the default and allocated implicitly */
 		mm->context.pkey_allocation_map = 0x1;
 		/* -1 means unallocated or invalid */
 		mm->context.execute_only_pkey = -1;
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index c768bc1..1ec13e2 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -40,6 +40,8 @@
 #define MSR_IA32_SPEC_CTRL		0x00000048 /* Speculation Control */
 #define SPEC_CTRL_IBRS			(1 << 0)   /* Indirect Branch Restricted Speculation */
 #define SPEC_CTRL_STIBP			(1 << 1)   /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_SSBD_SHIFT		2	   /* Speculative Store Bypass Disable bit */
+#define SPEC_CTRL_SSBD			(1 << SPEC_CTRL_SSBD_SHIFT)   /* Speculative Store Bypass Disable */
 
 #define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
 #define PRED_CMD_IBPB			(1 << 0)   /* Indirect Branch Prediction Barrier */
@@ -61,6 +63,11 @@
 #define MSR_IA32_ARCH_CAPABILITIES	0x0000010a
 #define ARCH_CAP_RDCL_NO		(1 << 0)   /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL		(1 << 1)   /* Enhanced IBRS support */
+#define ARCH_CAP_SSB_NO			(1 << 4)   /*
+						    * Not susceptible to Speculative Store Bypass
+						    * attack, so no Speculative Store Bypass
+						    * control required.
+						    */
 
 #define MSR_IA32_BBL_CR_CTL		0x00000119
 #define MSR_IA32_BBL_CR_CTL3		0x0000011e
@@ -135,6 +142,7 @@
 
 /* DEBUGCTLMSR bits (others vary by model): */
 #define DEBUGCTLMSR_LBR			(1UL <<  0) /* last branch recording */
+#define DEBUGCTLMSR_BTF_SHIFT		1
 #define DEBUGCTLMSR_BTF			(1UL <<  1) /* single-step on branches */
 #define DEBUGCTLMSR_TR			(1UL <<  6)
 #define DEBUGCTLMSR_BTS			(1UL <<  7)
@@ -315,6 +323,8 @@
 #define MSR_AMD64_IBSOPDATA4		0xc001103d
 #define MSR_AMD64_IBS_REG_COUNT_MAX	8 /* includes MSR_AMD64_IBSBRTARGET */
 
+#define MSR_AMD64_VIRT_SPEC_CTRL	0xc001011f
+
 /* Fam 17h MSRs */
 #define MSR_F17H_IRPERF			0xc00000e9
 
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index f928ad9..8b38df9 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -217,6 +217,14 @@
 	SPECTRE_V2_IBRS,
 };
 
+/* The Speculative Store Bypass disable variants */
+enum ssb_mitigation {
+	SPEC_STORE_BYPASS_NONE,
+	SPEC_STORE_BYPASS_DISABLE,
+	SPEC_STORE_BYPASS_PRCTL,
+	SPEC_STORE_BYPASS_SECCOMP,
+};
+
 extern char __indirect_thunk_start[];
 extern char __indirect_thunk_end[];
 
@@ -241,22 +249,27 @@
 #endif
 }
 
-#define alternative_msr_write(_msr, _val, _feature)		\
-	asm volatile(ALTERNATIVE("",				\
-				 "movl %[msr], %%ecx\n\t"	\
-				 "movl %[val], %%eax\n\t"	\
-				 "movl $0, %%edx\n\t"		\
-				 "wrmsr",			\
-				 _feature)			\
-		     : : [msr] "i" (_msr), [val] "i" (_val)	\
-		     : "eax", "ecx", "edx", "memory")
+static __always_inline
+void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+{
+	asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
+		: : "c" (msr),
+		    "a" ((u32)val),
+		    "d" ((u32)(val >> 32)),
+		    [feature] "i" (feature)
+		: "memory");
+}
 
 static inline void indirect_branch_prediction_barrier(void)
 {
-	alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
-			      X86_FEATURE_USE_IBPB);
+	u64 val = PRED_CMD_IBPB;
+
+	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
 }
 
+/* The Intel SPEC CTRL MSR base value cache */
+extern u64 x86_spec_ctrl_base;
+
 /*
  * With retpoline, we must use IBRS to restrict branch prediction
  * before calling into firmware.
@@ -265,14 +278,18 @@
  */
 #define firmware_restrict_branch_speculation_start()			\
 do {									\
+	u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;			\
+									\
 	preempt_disable();						\
-	alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS,	\
+	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
 			      X86_FEATURE_USE_IBRS_FW);			\
 } while (0)
 
 #define firmware_restrict_branch_speculation_end()			\
 do {									\
-	alternative_msr_write(MSR_IA32_SPEC_CTRL, 0,			\
+	u64 val = x86_spec_ctrl_base;					\
+									\
+	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
 			      X86_FEATURE_USE_IBRS_FW);			\
 	preempt_enable();						\
 } while (0)
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
new file mode 100644
index 0000000..7dc777a
--- /dev/null
+++ b/arch/x86/include/asm/orc_types.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and BP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED		0
+#define ORC_REG_PREV_SP			1
+#define ORC_REG_DX			2
+#define ORC_REG_DI			3
+#define ORC_REG_BP			4
+#define ORC_REG_SP			5
+#define ORC_REG_R10			6
+#define ORC_REG_R13			7
+#define ORC_REG_BP_INDIRECT		8
+#define ORC_REG_SP_INDIRECT		9
+#define ORC_REG_MAX			15
+
+/*
+ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
+ * caller's SP right before it made the call).  Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
+ * to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
+ * points to the iret return frame.
+ *
+ * The UNWIND_HINT macros are used only for the unwind_hint struct.  They
+ * aren't used in struct orc_entry due to size and complexity constraints.
+ * Objtool converts them to real types when it converts the hints to orc
+ * entries.
+ */
+#define ORC_TYPE_CALL			0
+#define ORC_TYPE_REGS			1
+#define ORC_TYPE_REGS_IRET		2
+#define UNWIND_HINT_TYPE_SAVE		3
+#define UNWIND_HINT_TYPE_RESTORE	4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard.  It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder.  It tells the
+ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
+ * the stack for a given code address.  Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+	s16		sp_offset;
+	s16		bp_offset;
+	unsigned	sp_reg:4;
+	unsigned	bp_reg:4;
+	unsigned	type:2;
+};
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack for the ORC unwinder.
+ *
+ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
+ */
+struct unwind_hint {
+	u32		ip;
+	s16		sp_offset;
+	u8		sp_reg;
+	u8		type;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index b3b09b9..c50d6dc 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_PKEYS_H
 #define _ASM_X86_PKEYS_H
 
+#define ARCH_DEFAULT_PKEY	0
+
 #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
 
 extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
@@ -14,7 +16,7 @@
 static inline int execute_only_pkey(struct mm_struct *mm)
 {
 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
-		return 0;
+		return ARCH_DEFAULT_PKEY;
 
 	return __execute_only_pkey(mm);
 }
@@ -48,13 +50,21 @@
 {
 	/*
 	 * "Allocated" pkeys are those that have been returned
-	 * from pkey_alloc().  pkey 0 is special, and never
-	 * returned from pkey_alloc().
+	 * from pkey_alloc() or pkey 0 which is allocated
+	 * implicitly when the mm is created.
 	 */
-	if (pkey <= 0)
+	if (pkey < 0)
 		return false;
 	if (pkey >= arch_max_pkey())
 		return false;
+	/*
+	 * The exec-only pkey is set in the allocation map, but
+	 * is not available to any of the user interfaces like
+	 * mprotect_pkey().
+	 */
+	if (pkey == mm->context.execute_only_pkey)
+		return false;
+
 	return mm_pkey_allocation_map(mm) & (1U << pkey);
 }
 
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
new file mode 100644
index 0000000..ae7c2c5
--- /dev/null
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_SPECCTRL_H_
+#define _ASM_X86_SPECCTRL_H_
+
+#include <linux/thread_info.h>
+#include <asm/nospec-branch.h>
+
+/*
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
+ * the guest's version of VIRT_SPEC_CTRL, if emulated.
+ */
+extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
+
+/**
+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
+ * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *				(may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+	x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
+}
+
+/**
+ * x86_spec_ctrl_restore_host - Restore host speculation control registers
+ * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *				(may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+	x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
+}
+
+/* AMD specific Speculative Store Bypass MSR data */
+extern u64 x86_amd_ls_cfg_base;
+extern u64 x86_amd_ls_cfg_ssbd_mask;
+
+static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+{
+	BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+	return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+}
+
+static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+	BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+	return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+}
+
+static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+{
+	return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+}
+
+#ifdef CONFIG_SMP
+extern void speculative_store_bypass_ht_init(void);
+#else
+static inline void speculative_store_bypass_ht_init(void) { }
+#endif
+
+extern void speculative_store_bypass_update(unsigned long tif);
+
+static inline void speculative_store_bypass_update_current(void)
+{
+	speculative_store_bypass_update(current_thread_info()->flags);
+}
+
+#endif
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 89978b9..2d8788a 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -83,6 +83,7 @@
 #define TIF_SIGPENDING		2	/* signal pending */
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 #define TIF_SINGLESTEP		4	/* reenable singlestep on user return*/
+#define TIF_SSBD		5	/* Reduced data speculation */
 #define TIF_SYSCALL_EMU		6	/* syscall emulation active */
 #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
 #define TIF_SECCOMP		8	/* secure computing */
@@ -104,8 +105,9 @@
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
-#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
+#define _TIF_SSBD		(1 << TIF_SSBD)
 #define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
@@ -139,7 +141,7 @@
 
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW							\
-	(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
+	(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
 
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 99185a0..686a58d 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -111,6 +111,16 @@
 	}
 }
 
+static inline void cr4_toggle_bits(unsigned long mask)
+{
+	unsigned long cr4;
+
+	cr4 = this_cpu_read(cpu_tlbstate.cr4);
+	cr4 ^= mask;
+	this_cpu_write(cpu_tlbstate.cr4, cr4);
+	__write_cr4(cr4);
+}
+
 /* Read the CR4 shadow. */
 static inline unsigned long cr4_read_shadow(void)
 {
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
new file mode 100644
index 0000000..5e02b11
--- /dev/null
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -0,0 +1,103 @@
+#ifndef _ASM_X86_UNWIND_HINTS_H
+#define _ASM_X86_UNWIND_HINTS_H
+
+#include "orc_types.h"
+
+#ifdef __ASSEMBLY__
+
+/*
+ * In asm, there are two kinds of code: normal C-type callable functions and
+ * the rest.  The normal callable functions can be called by other code, and
+ * don't do anything unusual with the stack.  Such normal callable functions
+ * are annotated with the ENTRY/ENDPROC macros.  Most asm code falls in this
+ * category.  In this case, no special debugging annotations are needed because
+ * objtool can automatically generate the ORC data for the ORC unwinder to read
+ * at runtime.
+ *
+ * Anything which doesn't fall into the above category, such as syscall and
+ * interrupt handlers, tends to not be called directly by other functions, and
+ * often does unusual non-C-function-type things with the stack pointer.  Such
+ * code needs to be annotated such that objtool can understand it.  The
+ * following CFI hint macros are for this type of code.
+ *
+ * These macros provide hints to objtool about the state of the stack at each
+ * instruction.  Objtool starts from the hints and follows the code flow,
+ * making automatic CFI adjustments when it sees pushes and pops, filling out
+ * the debuginfo as necessary.  It will also warn if it sees any
+ * inconsistencies.
+ */
+.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL
+#ifdef CONFIG_STACK_VALIDATION
+.Lunwind_hint_ip_\@:
+	.pushsection .discard.unwind_hints
+		/* struct unwind_hint */
+		.long .Lunwind_hint_ip_\@ - .
+		.short \sp_offset
+		.byte \sp_reg
+		.byte \type
+	.popsection
+#endif
+.endm
+
+.macro UNWIND_HINT_EMPTY
+	UNWIND_HINT sp_reg=ORC_REG_UNDEFINED
+.endm
+
+.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0
+	.if \base == %rsp && \indirect
+		.set sp_reg, ORC_REG_SP_INDIRECT
+	.elseif \base == %rsp
+		.set sp_reg, ORC_REG_SP
+	.elseif \base == %rbp
+		.set sp_reg, ORC_REG_BP
+	.elseif \base == %rdi
+		.set sp_reg, ORC_REG_DI
+	.elseif \base == %rdx
+		.set sp_reg, ORC_REG_DX
+	.elseif \base == %r10
+		.set sp_reg, ORC_REG_R10
+	.else
+		.error "UNWIND_HINT_REGS: bad base register"
+	.endif
+
+	.set sp_offset, \offset
+
+	.if \iret
+		.set type, ORC_TYPE_REGS_IRET
+	.elseif \extra == 0
+		.set type, ORC_TYPE_REGS_IRET
+		.set sp_offset, \offset + (16*8)
+	.else
+		.set type, ORC_TYPE_REGS
+	.endif
+
+	UNWIND_HINT sp_reg=sp_reg sp_offset=sp_offset type=type
+.endm
+
+.macro UNWIND_HINT_IRET_REGS base=%rsp offset=0
+	UNWIND_HINT_REGS base=\base offset=\offset iret=1
+.endm
+
+.macro UNWIND_HINT_FUNC sp_offset=8
+	UNWIND_HINT sp_offset=\sp_offset
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#define UNWIND_HINT(sp_reg, sp_offset, type)			\
+	"987: \n\t"						\
+	".pushsection .discard.unwind_hints\n\t"		\
+	/* struct unwind_hint */				\
+	".long 987b - .\n\t"					\
+	".short " __stringify(sp_offset) "\n\t"		\
+	".byte " __stringify(sp_reg) "\n\t"			\
+	".byte " __stringify(type) "\n\t"			\
+	".popsection\n\t"
+
+#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE)
+
+#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_UNWIND_HINTS_H */
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 809134c..90ab9a7 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -1 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X64_MSGBUF_H
+#define __ASM_X64_MSGBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
 #include <asm-generic/msgbuf.h>
+#else
+/*
+ * The msqid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct msqid64_ds {
+	struct ipc64_perm msg_perm;
+	__kernel_time_t msg_stime;	/* last msgsnd time */
+	__kernel_time_t msg_rtime;	/* last msgrcv time */
+	__kernel_time_t msg_ctime;	/* last change time */
+	__kernel_ulong_t msg_cbytes;	/* current number of bytes on queue */
+	__kernel_ulong_t msg_qnum;	/* number of messages in queue */
+	__kernel_ulong_t msg_qbytes;	/* max number of bytes on queue */
+	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
+	__kernel_pid_t msg_lrpid;	/* last receive pid */
+	__kernel_ulong_t __unused4;
+	__kernel_ulong_t __unused5;
+};
+
+#endif
+
+#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 83c05fc..644421f 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -1 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X86_SHMBUF_H
+#define __ASM_X86_SHMBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
 #include <asm-generic/shmbuf.h>
+#else
+/*
+ * The shmid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct shmid64_ds {
+	struct ipc64_perm	shm_perm;	/* operation perms */
+	size_t			shm_segsz;	/* size of segment (bytes) */
+	__kernel_time_t		shm_atime;	/* last attach time */
+	__kernel_time_t		shm_dtime;	/* last detach time */
+	__kernel_time_t		shm_ctime;	/* last change time */
+	__kernel_pid_t		shm_cpid;	/* pid of creator */
+	__kernel_pid_t		shm_lpid;	/* pid of last operator */
+	__kernel_ulong_t	shm_nattch;	/* no. of current attaches */
+	__kernel_ulong_t	__unused4;
+	__kernel_ulong_t	__unused5;
+};
+
+struct shminfo64 {
+	__kernel_ulong_t	shmmax;
+	__kernel_ulong_t	shmmin;
+	__kernel_ulong_t	shmmni;
+	__kernel_ulong_t	shmseg;
+	__kernel_ulong_t	shmall;
+	__kernel_ulong_t	__unused1;
+	__kernel_ulong_t	__unused2;
+	__kernel_ulong_t	__unused3;
+	__kernel_ulong_t	__unused4;
+};
+
+#endif
+
+#endif /* __ASM_X86_SHMBUF_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 79076d7..4c9c615 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -29,6 +29,7 @@
 OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o	:= y
 OBJECT_FILES_NON_STANDARD_mcount_$(BITS).o		:= y
 OBJECT_FILES_NON_STANDARD_test_nx.o			:= y
+OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o	:= y
 
 # If instrumentation of this dir is enabled, boot hangs during first second.
 # Probably could be more selective here, but note that files related to irqs,
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 26b78d8..85a9e17 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_wakeup_$(BITS).o := y
+
 obj-$(CONFIG_ACPI)		+= boot.o
 obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup_$(BITS).o
 obj-$(CONFIG_ACPI_APEI)		+= apei.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index c6583ef..76cf21f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1403,7 +1403,7 @@
 	 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
 	 */
 	value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
-	if (!cpu && (pic_mode || !value)) {
+	if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
 		value = APIC_DM_EXTINT;
 		apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
 	} else {
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c375bc6..4c2be99 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -9,6 +9,7 @@
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
+#include <asm/spec-ctrl.h>
 #include <asm/smp.h>
 #include <asm/pci-direct.h>
 #include <asm/delay.h>
@@ -542,6 +543,26 @@
 		rdmsrl(MSR_FAM10H_NODE_ID, value);
 		nodes_per_socket = ((value >> 3) & 7) + 1;
 	}
+
+	if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+		unsigned int bit;
+
+		switch (c->x86) {
+		case 0x15: bit = 54; break;
+		case 0x16: bit = 33; break;
+		case 0x17: bit = 10; break;
+		default: return;
+		}
+		/*
+		 * Try to cache the base value so further operations can
+		 * avoid RMW. If that faults, do not enable SSBD.
+		 */
+		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
+			setup_force_cpu_cap(X86_FEATURE_SSBD);
+			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+		}
+	}
 }
 
 static void early_init_amd(struct cpuinfo_x86 *c)
@@ -728,6 +749,17 @@
 	}
 }
 
+static void init_amd_zn(struct cpuinfo_x86 *c)
+{
+	set_cpu_cap(c, X86_FEATURE_ZEN);
+	/*
+	 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
+	 * all up to and including B1.
+	 */
+	if (c->x86_model <= 1 && c->x86_stepping <= 1)
+		set_cpu_cap(c, X86_FEATURE_CPB);
+}
+
 static void init_amd(struct cpuinfo_x86 *c)
 {
 	u32 dummy;
@@ -758,6 +790,7 @@
 	case 0x10: init_amd_gh(c); break;
 	case 0x12: init_amd_ln(c); break;
 	case 0x15: init_amd_bd(c); break;
+	case 0x17: init_amd_zn(c); break;
 	}
 
 	/* Enable workaround for FXSAVE leak */
@@ -824,8 +857,9 @@
 		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
 			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
 
-	/* AMD CPUs don't reset SS attributes on SYSRET */
-	set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+	if (!cpu_has(c, X86_FEATURE_XENPV))
+		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
 }
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index b8b0b6e..86af9b1 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -11,8 +11,10 @@
 #include <linux/utsname.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
 
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 #include <asm/cmdline.h>
 #include <asm/bugs.h>
 #include <asm/processor.h>
@@ -26,6 +28,27 @@
 #include <asm/intel-family.h>
 
 static void __init spectre_v2_select_mitigation(void);
+static void __init ssb_select_mitigation(void);
+
+/*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+u64 __ro_after_init x86_spec_ctrl_base;
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+
+/*
+ * The vendor and possibly platform specific bits which can be modified in
+ * x86_spec_ctrl_base.
+ */
+static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
+
+/*
+ * AMD specific MSR info for Speculative Store Bypass control.
+ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
+ */
+u64 __ro_after_init x86_amd_ls_cfg_base;
+u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
 
 void __init check_bugs(void)
 {
@@ -36,9 +59,27 @@
 		print_cpu_info(&boot_cpu_data);
 	}
 
+	/*
+	 * Read the SPEC_CTRL MSR to account for reserved bits which may
+	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+	 * init code as it is not enumerated and depends on the family.
+	 */
+	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+	/* Allow STIBP in MSR_SPEC_CTRL if supported */
+	if (boot_cpu_has(X86_FEATURE_STIBP))
+		x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
+
 	/* Select the proper spectre mitigation before patching alternatives */
 	spectre_v2_select_mitigation();
 
+	/*
+	 * Select proper mitigation for any exposure to the Speculative Store
+	 * Bypass vulnerability.
+	 */
+	ssb_select_mitigation();
+
 #ifdef CONFIG_X86_32
 	/*
 	 * Check whether we are able to run this kernel safely on SMP.
@@ -92,7 +133,76 @@
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
-static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+	SPECTRE_V2_NONE;
+
+void
+x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+{
+	u64 msrval, guestval, hostval = x86_spec_ctrl_base;
+	struct thread_info *ti = current_thread_info();
+
+	/* Is MSR_SPEC_CTRL implemented ? */
+	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+		/*
+		 * Restrict guest_spec_ctrl to supported values. Clear the
+		 * modifiable bits in the host base value and or the
+		 * modifiable bits from the guest value.
+		 */
+		guestval = hostval & ~x86_spec_ctrl_mask;
+		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
+
+		/* SSBD controlled in MSR_SPEC_CTRL */
+		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+
+		if (hostval != guestval) {
+			msrval = setguest ? guestval : hostval;
+			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
+		}
+	}
+
+	/*
+	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
+	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
+	 */
+	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
+		return;
+
+	/*
+	 * If the host has SSBD mitigation enabled, force it in the host's
+	 * virtual MSR value. If its not permanently enabled, evaluate
+	 * current's TIF_SSBD thread flag.
+	 */
+	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
+		hostval = SPEC_CTRL_SSBD;
+	else
+		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
+
+	/* Sanitize the guest value */
+	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
+
+	if (hostval != guestval) {
+		unsigned long tif;
+
+		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
+				 ssbd_spec_ctrl_to_tif(hostval);
+
+		speculative_store_bypass_update(tif);
+	}
+}
+EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
+
+static void x86_amd_ssb_disable(void)
+{
+	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+		wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
+	else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+		wrmsrl(MSR_AMD64_LS_CFG, msrval);
+}
 
 #ifdef RETPOLINE
 static bool spectre_v2_bad_module;
@@ -311,32 +421,289 @@
 }
 
 #undef pr_fmt
+#define pr_fmt(fmt)	"Speculative Store Bypass: " fmt
+
+static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
+
+/* The kernel command line selection */
+enum ssb_mitigation_cmd {
+	SPEC_STORE_BYPASS_CMD_NONE,
+	SPEC_STORE_BYPASS_CMD_AUTO,
+	SPEC_STORE_BYPASS_CMD_ON,
+	SPEC_STORE_BYPASS_CMD_PRCTL,
+	SPEC_STORE_BYPASS_CMD_SECCOMP,
+};
+
+static const char *ssb_strings[] = {
+	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",
+	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",
+	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl",
+	[SPEC_STORE_BYPASS_SECCOMP]	= "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
+};
+
+static const struct {
+	const char *option;
+	enum ssb_mitigation_cmd cmd;
+} ssb_mitigation_options[] = {
+	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
+	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
+	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
+	{ "prctl",	SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
+	{ "seccomp",	SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
+};
+
+static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+{
+	enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
+	char arg[20];
+	int ret, i;
+
+	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+		return SPEC_STORE_BYPASS_CMD_NONE;
+	} else {
+		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
+					  arg, sizeof(arg));
+		if (ret < 0)
+			return SPEC_STORE_BYPASS_CMD_AUTO;
+
+		for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
+			if (!match_option(arg, ret, ssb_mitigation_options[i].option))
+				continue;
+
+			cmd = ssb_mitigation_options[i].cmd;
+			break;
+		}
+
+		if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
+			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+			return SPEC_STORE_BYPASS_CMD_AUTO;
+		}
+	}
+
+	return cmd;
+}
+
+static enum ssb_mitigation __init __ssb_select_mitigation(void)
+{
+	enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
+	enum ssb_mitigation_cmd cmd;
+
+	if (!boot_cpu_has(X86_FEATURE_SSBD))
+		return mode;
+
+	cmd = ssb_parse_cmdline();
+	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
+	    (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
+	     cmd == SPEC_STORE_BYPASS_CMD_AUTO))
+		return mode;
+
+	switch (cmd) {
+	case SPEC_STORE_BYPASS_CMD_AUTO:
+	case SPEC_STORE_BYPASS_CMD_SECCOMP:
+		/*
+		 * Choose prctl+seccomp as the default mode if seccomp is
+		 * enabled.
+		 */
+		if (IS_ENABLED(CONFIG_SECCOMP))
+			mode = SPEC_STORE_BYPASS_SECCOMP;
+		else
+			mode = SPEC_STORE_BYPASS_PRCTL;
+		break;
+	case SPEC_STORE_BYPASS_CMD_ON:
+		mode = SPEC_STORE_BYPASS_DISABLE;
+		break;
+	case SPEC_STORE_BYPASS_CMD_PRCTL:
+		mode = SPEC_STORE_BYPASS_PRCTL;
+		break;
+	case SPEC_STORE_BYPASS_CMD_NONE:
+		break;
+	}
+
+	/*
+	 * We have three CPU feature flags that are in play here:
+	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
+	 *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
+	 *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+	 */
+	if (mode == SPEC_STORE_BYPASS_DISABLE) {
+		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
+		/*
+		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
+		 * a completely different MSR and bit dependent on family.
+		 */
+		switch (boot_cpu_data.x86_vendor) {
+		case X86_VENDOR_INTEL:
+			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+			x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+			break;
+		case X86_VENDOR_AMD:
+			x86_amd_ssb_disable();
+			break;
+		}
+	}
+
+	return mode;
+}
+
+static void ssb_select_mitigation(void)
+{
+	ssb_mode = __ssb_select_mitigation();
+
+	if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+		pr_info("%s\n", ssb_strings[ssb_mode]);
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Speculation prctl: " fmt
+
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+	bool update;
+
+	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
+	    ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
+		return -ENXIO;
+
+	switch (ctrl) {
+	case PR_SPEC_ENABLE:
+		/* If speculation is force disabled, enable is not allowed */
+		if (task_spec_ssb_force_disable(task))
+			return -EPERM;
+		task_clear_spec_ssb_disable(task);
+		update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+		break;
+	case PR_SPEC_DISABLE:
+		task_set_spec_ssb_disable(task);
+		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+		break;
+	case PR_SPEC_FORCE_DISABLE:
+		task_set_spec_ssb_disable(task);
+		task_set_spec_ssb_force_disable(task);
+		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	/*
+	 * If being set on non-current task, delay setting the CPU
+	 * mitigation until it is next scheduled.
+	 */
+	if (task == current && update)
+		speculative_store_bypass_update_current();
+
+	return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+			     unsigned long ctrl)
+{
+	switch (which) {
+	case PR_SPEC_STORE_BYPASS:
+		return ssb_prctl_set(task, ctrl);
+	default:
+		return -ENODEV;
+	}
+}
+
+#ifdef CONFIG_SECCOMP
+void arch_seccomp_spec_mitigate(struct task_struct *task)
+{
+	if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
+		ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+}
+#endif
+
+static int ssb_prctl_get(struct task_struct *task)
+{
+	switch (ssb_mode) {
+	case SPEC_STORE_BYPASS_DISABLE:
+		return PR_SPEC_DISABLE;
+	case SPEC_STORE_BYPASS_SECCOMP:
+	case SPEC_STORE_BYPASS_PRCTL:
+		if (task_spec_ssb_force_disable(task))
+			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+		if (task_spec_ssb_disable(task))
+			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+	default:
+		if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+			return PR_SPEC_ENABLE;
+		return PR_SPEC_NOT_AFFECTED;
+	}
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+	switch (which) {
+	case PR_SPEC_STORE_BYPASS:
+		return ssb_prctl_get(task);
+	default:
+		return -ENODEV;
+	}
+}
+
+void x86_spec_ctrl_setup_ap(void)
+{
+	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+		x86_amd_ssb_disable();
+}
 
 #ifdef CONFIG_SYSFS
+
+static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+			       char *buf, unsigned int bug)
+{
+	if (!boot_cpu_has_bug(bug))
+		return sprintf(buf, "Not affected\n");
+
+	switch (bug) {
+	case X86_BUG_CPU_MELTDOWN:
+		if (boot_cpu_has(X86_FEATURE_KAISER))
+			return sprintf(buf, "Mitigation: PTI\n");
+
+		break;
+
+	case X86_BUG_SPECTRE_V1:
+		return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+
+	case X86_BUG_SPECTRE_V2:
+		return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+			       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+			       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+			       spectre_v2_module_string());
+
+	case X86_BUG_SPEC_STORE_BYPASS:
+		return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+
+	default:
+		break;
+	}
+
+	return sprintf(buf, "Vulnerable\n");
+}
+
 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
-		return sprintf(buf, "Not affected\n");
-	if (boot_cpu_has(X86_FEATURE_KAISER))
-		return sprintf(buf, "Mitigation: PTI\n");
-	return sprintf(buf, "Vulnerable\n");
+	return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
 }
 
 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
-		return sprintf(buf, "Not affected\n");
-	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
 }
 
 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
-		return sprintf(buf, "Not affected\n");
+	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+}
 
-	return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
-		       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
-		       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
-		       spectre_v2_module_string());
+ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
 }
 #endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 301bbd1..b0fd028 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -725,17 +725,32 @@
 	 * and they also have a different bit for STIBP support. Also,
 	 * a hypervisor might have set the individual AMD bits even on
 	 * Intel CPUs, for finer-grained selection of what's available.
-	 *
-	 * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
-	 * features, which are visible in /proc/cpuinfo and used by the
-	 * kernel. So set those accordingly from the Intel bits.
 	 */
 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
 		set_cpu_cap(c, X86_FEATURE_IBRS);
 		set_cpu_cap(c, X86_FEATURE_IBPB);
+		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 	}
+
 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
 		set_cpu_cap(c, X86_FEATURE_STIBP);
+
+	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
+	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
+		set_cpu_cap(c, X86_FEATURE_SSBD);
+
+	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+		set_cpu_cap(c, X86_FEATURE_IBRS);
+		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+	}
+
+	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
+		set_cpu_cap(c, X86_FEATURE_IBPB);
+
+	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+		set_cpu_cap(c, X86_FEATURE_STIBP);
+		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+	}
 }
 
 void get_cpu_cap(struct cpuinfo_x86 *c)
@@ -879,21 +894,55 @@
 	{}
 };
 
-static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
+static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_PINEVIEW	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_LINCROFT	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_PENWELL		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_CLOVERVIEW	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_CEDARVIEW	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_CORE_YONAH		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
+	{ X86_VENDOR_CENTAUR,	5,					},
+	{ X86_VENDOR_INTEL,	5,					},
+	{ X86_VENDOR_NSC,	5,					},
+	{ X86_VENDOR_AMD,	0x12,					},
+	{ X86_VENDOR_AMD,	0x11,					},
+	{ X86_VENDOR_AMD,	0x10,					},
+	{ X86_VENDOR_AMD,	0xf,					},
+	{ X86_VENDOR_ANY,	4,					},
+	{}
+};
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
 	u64 ia32_cap = 0;
 
-	if (x86_match_cpu(cpu_no_meltdown))
-		return false;
-
 	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
 		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
 
+	if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+	   !(ia32_cap & ARCH_CAP_SSB_NO))
+		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+	if (x86_match_cpu(cpu_no_speculation))
+		return;
+
+	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+	setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+	if (x86_match_cpu(cpu_no_meltdown))
+		return;
+
 	/* Rogue Data Cache Load? No! */
 	if (ia32_cap & ARCH_CAP_RDCL_NO)
-		return false;
+		return;
 
-	return true;
+	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 }
 
 /*
@@ -942,12 +991,7 @@
 
 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 
-	if (!x86_match_cpu(cpu_no_speculation)) {
-		if (cpu_vulnerable_to_meltdown(c))
-			setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-		setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
-		setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
-	}
+	cpu_set_bug_bits(c);
 
 	fpu__init_system(c);
 
@@ -1315,6 +1359,7 @@
 #endif
 	mtrr_ap_init();
 	validate_apic_and_package_id(c);
+	x86_spec_ctrl_setup_ap();
 }
 
 struct msr_range {
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 2584265..3b19d82 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -46,4 +46,7 @@
 
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+
+extern void x86_spec_ctrl_setup_ap(void);
+
 #endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8fb1d65..93781e3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -153,7 +153,10 @@
 		setup_clear_cpu_cap(X86_FEATURE_IBPB);
 		setup_clear_cpu_cap(X86_FEATURE_STIBP);
 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
 		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+		setup_clear_cpu_cap(X86_FEATURE_SSBD);
+		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
 	}
 
 	/*
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 4bcd30c..79291d6 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -474,7 +474,6 @@
  */
 static void save_mc_for_early(u8 *mc)
 {
-#ifdef CONFIG_HOTPLUG_CPU
 	/* Synchronization during CPU hotplug. */
 	static DEFINE_MUTEX(x86_cpu_microcode_mutex);
 
@@ -521,7 +520,6 @@
 
 out:
 	mutex_unlock(&x86_cpu_microcode_mutex);
-#endif
 }
 
 static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 3fe45f8..7a07b15 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -11,6 +11,7 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/of_irq.h>
+#include <linux/libfdt.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/of_pci.h>
@@ -199,19 +200,22 @@
 static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
 			      unsigned int nr_irqs, void *arg)
 {
-	struct of_phandle_args *irq_data = (void *)arg;
+	struct irq_fwspec *fwspec = (struct irq_fwspec *)arg;
 	struct of_ioapic_type *it;
 	struct irq_alloc_info tmp;
+	int type_index;
 
-	if (WARN_ON(irq_data->args_count < 2))
-		return -EINVAL;
-	if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type))
+	if (WARN_ON(fwspec->param_count < 2))
 		return -EINVAL;
 
-	it = &of_ioapic_type[irq_data->args[1]];
+	type_index = fwspec->param[1];
+	if (type_index >= ARRAY_SIZE(of_ioapic_type))
+		return -EINVAL;
+
+	it = &of_ioapic_type[type_index];
 	ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
 	tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
-	tmp.ioapic_pin = irq_data->args[0];
+	tmp.ioapic_pin = fwspec->param[0];
 
 	return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
 }
@@ -276,14 +280,15 @@
 
 	map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
 
-	initial_boot_params = dt = early_memremap(initial_dtb, map_len);
-	size = of_get_flat_dt_size();
+	dt = early_memremap(initial_dtb, map_len);
+	size = fdt_totalsize(dt);
 	if (map_len < size) {
 		early_memunmap(dt, map_len);
-		initial_boot_params = dt = early_memremap(initial_dtb, size);
+		dt = early_memremap(initial_dtb, size);
 		map_len = size;
 	}
 
+	early_init_dt_verify(dt);
 	unflatten_and_copy_device_tree();
 	early_memunmap(dt, map_len);
 }
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index b8d3f1b..91c48cd 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -51,6 +51,7 @@
 #include <linux/ftrace.h>
 #include <linux/frame.h>
 #include <linux/kasan.h>
+#include <linux/moduleloader.h>
 
 #include <asm/text-patching.h>
 #include <asm/cacheflush.h>
@@ -405,6 +406,14 @@
 	return length;
 }
 
+/* Recover page to RW mode before releasing it */
+void free_insn_page(void *page)
+{
+	set_memory_nx((unsigned long)page & PAGE_MASK, 1);
+	set_memory_rw((unsigned long)page & PAGE_MASK, 1);
+	module_memfree(page);
+}
+
 static int arch_copy_kprobe(struct kprobe *p)
 {
 	int ret;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index fa671b9..1808a9c 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -28,6 +28,7 @@
 #include <linux/kdebug.h>
 #include <linux/kallsyms.h>
 #include <linux/ftrace.h>
+#include <linux/frame.h>
 
 #include <asm/text-patching.h>
 #include <asm/cacheflush.h>
@@ -91,6 +92,7 @@
 }
 
 asm (
+			"optprobe_template_func:\n"
 			".global optprobe_template_entry\n"
 			"optprobe_template_entry:\n"
 #ifdef CONFIG_X86_64
@@ -128,7 +130,12 @@
 			"	popf\n"
 #endif
 			".global optprobe_template_end\n"
-			"optprobe_template_end:\n");
+			"optprobe_template_end:\n"
+			".type optprobe_template_func, @function\n"
+			".size optprobe_template_func, .-optprobe_template_func\n");
+
+void optprobe_template_func(void);
+STACK_FRAME_NON_STANDARD(optprobe_template_func);
 
 #define TMPL_MOVE_IDX \
 	((long)&optprobe_template_val - (long)&optprobe_template_entry)
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 469b23d..fd7e993 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -71,12 +71,17 @@
 static void machine_kexec_free_page_tables(struct kimage *image)
 {
 	free_page((unsigned long)image->arch.pgd);
+	image->arch.pgd = NULL;
 #ifdef CONFIG_X86_PAE
 	free_page((unsigned long)image->arch.pmd0);
+	image->arch.pmd0 = NULL;
 	free_page((unsigned long)image->arch.pmd1);
+	image->arch.pmd1 = NULL;
 #endif
 	free_page((unsigned long)image->arch.pte0);
+	image->arch.pte0 = NULL;
 	free_page((unsigned long)image->arch.pte1);
+	image->arch.pte1 = NULL;
 }
 
 static int machine_kexec_alloc_page_tables(struct kimage *image)
@@ -93,7 +98,6 @@
 	    !image->arch.pmd0 || !image->arch.pmd1 ||
 #endif
 	    !image->arch.pte0 || !image->arch.pte1) {
-		machine_kexec_free_page_tables(image);
 		return -ENOMEM;
 	}
 	return 0;
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index a5784a1..eae59ca 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -37,8 +37,11 @@
 static void free_transition_pgtable(struct kimage *image)
 {
 	free_page((unsigned long)image->arch.pud);
+	image->arch.pud = NULL;
 	free_page((unsigned long)image->arch.pmd);
+	image->arch.pmd = NULL;
 	free_page((unsigned long)image->arch.pte);
+	image->arch.pte = NULL;
 }
 
 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
@@ -79,7 +82,6 @@
 	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
 	return 0;
 err:
-	free_transition_pgtable(image);
 	return result;
 }
 
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 54b2711..e9195a1 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -33,6 +33,7 @@
 #include <asm/mce.h>
 #include <asm/vm86.h>
 #include <asm/switch_to.h>
+#include <asm/spec-ctrl.h>
 
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -121,11 +122,6 @@
 	fpu__clear(&tsk->thread.fpu);
 }
 
-static void hard_disable_TSC(void)
-{
-	cr4_set_bits(X86_CR4_TSD);
-}
-
 void disable_TSC(void)
 {
 	preempt_disable();
@@ -134,15 +130,10 @@
 		 * Must flip the CPU state synchronously with
 		 * TIF_NOTSC in the current running context.
 		 */
-		hard_disable_TSC();
+		cr4_set_bits(X86_CR4_TSD);
 	preempt_enable();
 }
 
-static void hard_enable_TSC(void)
-{
-	cr4_clear_bits(X86_CR4_TSD);
-}
-
 static void enable_TSC(void)
 {
 	preempt_disable();
@@ -151,7 +142,7 @@
 		 * Must flip the CPU state synchronously with
 		 * TIF_NOTSC in the current running context.
 		 */
-		hard_enable_TSC();
+		cr4_clear_bits(X86_CR4_TSD);
 	preempt_enable();
 }
 
@@ -179,48 +170,199 @@
 	return 0;
 }
 
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
-		      struct tss_struct *tss)
+static inline void switch_to_bitmap(struct tss_struct *tss,
+				    struct thread_struct *prev,
+				    struct thread_struct *next,
+				    unsigned long tifp, unsigned long tifn)
 {
-	struct thread_struct *prev, *next;
-
-	prev = &prev_p->thread;
-	next = &next_p->thread;
-
-	if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
-	    test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
-		unsigned long debugctl = get_debugctlmsr();
-
-		debugctl &= ~DEBUGCTLMSR_BTF;
-		if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
-			debugctl |= DEBUGCTLMSR_BTF;
-
-		update_debugctlmsr(debugctl);
-	}
-
-	if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
-	    test_tsk_thread_flag(next_p, TIF_NOTSC)) {
-		/* prev and next are different */
-		if (test_tsk_thread_flag(next_p, TIF_NOTSC))
-			hard_disable_TSC();
-		else
-			hard_enable_TSC();
-	}
-
-	if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+	if (tifn & _TIF_IO_BITMAP) {
 		/*
 		 * Copy the relevant range of the IO bitmap.
 		 * Normally this is 128 bytes or less:
 		 */
 		memcpy(tss->io_bitmap, next->io_bitmap_ptr,
 		       max(prev->io_bitmap_max, next->io_bitmap_max));
-	} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
+	} else if (tifp & _TIF_IO_BITMAP) {
 		/*
 		 * Clear any possible leftover bits:
 		 */
 		memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
 	}
+}
+
+#ifdef CONFIG_SMP
+
+struct ssb_state {
+	struct ssb_state	*shared_state;
+	raw_spinlock_t		lock;
+	unsigned int		disable_state;
+	unsigned long		local_state;
+};
+
+#define LSTATE_SSB	0
+
+static DEFINE_PER_CPU(struct ssb_state, ssb_state);
+
+void speculative_store_bypass_ht_init(void)
+{
+	struct ssb_state *st = this_cpu_ptr(&ssb_state);
+	unsigned int this_cpu = smp_processor_id();
+	unsigned int cpu;
+
+	st->local_state = 0;
+
+	/*
+	 * Shared state setup happens once on the first bringup
+	 * of the CPU. It's not destroyed on CPU hotunplug.
+	 */
+	if (st->shared_state)
+		return;
+
+	raw_spin_lock_init(&st->lock);
+
+	/*
+	 * Go over HT siblings and check whether one of them has set up the
+	 * shared state pointer already.
+	 */
+	for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
+		if (cpu == this_cpu)
+			continue;
+
+		if (!per_cpu(ssb_state, cpu).shared_state)
+			continue;
+
+		/* Link it to the state of the sibling: */
+		st->shared_state = per_cpu(ssb_state, cpu).shared_state;
+		return;
+	}
+
+	/*
+	 * First HT sibling to come up on the core.  Link shared state of
+	 * the first HT sibling to itself. The siblings on the same core
+	 * which come up later will see the shared state pointer and link
+	 * themself to the state of this CPU.
+	 */
+	st->shared_state = st;
+}
+
+/*
+ * Logic is: First HT sibling enables SSBD for both siblings in the core
+ * and last sibling to disable it, disables it for the whole core. This how
+ * MSR_SPEC_CTRL works in "hardware":
+ *
+ *  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
+ */
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+	struct ssb_state *st = this_cpu_ptr(&ssb_state);
+	u64 msr = x86_amd_ls_cfg_base;
+
+	if (!static_cpu_has(X86_FEATURE_ZEN)) {
+		msr |= ssbd_tif_to_amd_ls_cfg(tifn);
+		wrmsrl(MSR_AMD64_LS_CFG, msr);
+		return;
+	}
+
+	if (tifn & _TIF_SSBD) {
+		/*
+		 * Since this can race with prctl(), block reentry on the
+		 * same CPU.
+		 */
+		if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
+			return;
+
+		msr |= x86_amd_ls_cfg_ssbd_mask;
+
+		raw_spin_lock(&st->shared_state->lock);
+		/* First sibling enables SSBD: */
+		if (!st->shared_state->disable_state)
+			wrmsrl(MSR_AMD64_LS_CFG, msr);
+		st->shared_state->disable_state++;
+		raw_spin_unlock(&st->shared_state->lock);
+	} else {
+		if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
+			return;
+
+		raw_spin_lock(&st->shared_state->lock);
+		st->shared_state->disable_state--;
+		if (!st->shared_state->disable_state)
+			wrmsrl(MSR_AMD64_LS_CFG, msr);
+		raw_spin_unlock(&st->shared_state->lock);
+	}
+}
+#else
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+	u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+
+	wrmsrl(MSR_AMD64_LS_CFG, msr);
+}
+#endif
+
+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
+{
+	/*
+	 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
+	 * so ssbd_tif_to_spec_ctrl() just works.
+	 */
+	wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+}
+
+static __always_inline void intel_set_ssb_state(unsigned long tifn)
+{
+	u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+
+	wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+}
+
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+{
+	if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+		amd_set_ssb_virt_state(tifn);
+	else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+		amd_set_core_ssb_state(tifn);
+	else
+		intel_set_ssb_state(tifn);
+}
+
+void speculative_store_bypass_update(unsigned long tif)
+{
+	preempt_disable();
+	__speculative_store_bypass_update(tif);
+	preempt_enable();
+}
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+		      struct tss_struct *tss)
+{
+	struct thread_struct *prev, *next;
+	unsigned long tifp, tifn;
+
+	prev = &prev_p->thread;
+	next = &next_p->thread;
+
+	tifn = READ_ONCE(task_thread_info(next_p)->flags);
+	tifp = READ_ONCE(task_thread_info(prev_p)->flags);
+	switch_to_bitmap(tss, prev, next, tifp, tifn);
+
 	propagate_user_return_notify(prev_p, next_p);
+
+	if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
+	    arch_has_block_step()) {
+		unsigned long debugctl, msk;
+
+		rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+		debugctl &= ~DEBUGCTLMSR_BTF;
+		msk = tifn & _TIF_BLOCKSTEP;
+		debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
+		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+	}
+
+	if ((tifp ^ tifn) & _TIF_NOTSC)
+		cr4_toggle_bits(X86_CR4_TSD);
+
+	if ((tifp ^ tifn) & _TIF_SSBD)
+		__speculative_store_bypass_update(tifn);
 }
 
 /*
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 03f21db..4a12362 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -9,6 +9,7 @@
 #include <linux/sched.h>
 #include <linux/tboot.h>
 #include <linux/delay.h>
+#include <linux/frame.h>
 #include <acpi/reboot.h>
 #include <asm/io.h>
 #include <asm/apic.h>
@@ -127,6 +128,7 @@
 #ifdef CONFIG_APM_MODULE
 EXPORT_SYMBOL(machine_real_restart);
 #endif
+STACK_FRAME_NON_STANDARD(machine_real_restart);
 
 /*
  * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e803d72..10b22fc 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -75,6 +75,7 @@
 #include <asm/i8259.h>
 #include <asm/realmode.h>
 #include <asm/misc.h>
+#include <asm/spec-ctrl.h>
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
@@ -229,6 +230,8 @@
 	 */
 	check_tsc_sync_target();
 
+	speculative_store_bypass_ht_init();
+
 	/*
 	 * Lock vector_lock and initialize the vectors on this cpu
 	 * before setting the cpu online. We must set it online with
@@ -1325,6 +1328,8 @@
 	set_mtrr_aps_delayed_init();
 
 	smp_quirk_init_udelay();
+
+	speculative_store_bypass_ht_init();
 }
 
 void arch_enable_nonboot_cpus_begin(void)
@@ -1492,6 +1497,7 @@
 	cpumask_clear(topology_core_cpumask(cpu));
 	c->phys_proc_id = 0;
 	c->cpu_core_id = 0;
+	c->booted_cores = 0;
 	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
 	recompute_smt_state();
 }
@@ -1591,6 +1597,8 @@
 	void *mwait_ptr;
 	int i;
 
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+		return;
 	if (!this_cpu_has(X86_FEATURE_MWAIT))
 		return;
 	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index d07a9390..769c370 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -24,6 +24,7 @@
 #include <asm/geode.h>
 #include <asm/apic.h>
 #include <asm/intel-family.h>
+#include <asm/i8259.h>
 
 unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -366,6 +367,8 @@
 		tsc_clocksource_reliable = 1;
 	if (!strncmp(str, "noirqtime", 9))
 		no_sched_irq_time = 1;
+	if (!strcmp(str, "unstable"))
+		mark_tsc_unstable("boot parameter");
 	return 1;
 }
 
@@ -407,7 +410,7 @@
 	hpet2 -= hpet1;
 	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
 	do_div(tmp, 1000000);
-	do_div(deltatsc, tmp);
+	deltatsc = div64_u64(deltatsc, tmp);
 
 	return (unsigned long) deltatsc;
 }
@@ -454,6 +457,20 @@
 	unsigned long tscmin, tscmax;
 	int pitcnt;
 
+	if (!has_legacy_pic()) {
+		/*
+		 * Relies on tsc_early_delay_calibrate() to have given us semi
+		 * usable udelay(), wait for the same 50ms we would have with
+		 * the PIT loop below.
+		 */
+		udelay(10 * USEC_PER_MSEC);
+		udelay(10 * USEC_PER_MSEC);
+		udelay(10 * USEC_PER_MSEC);
+		udelay(10 * USEC_PER_MSEC);
+		udelay(10 * USEC_PER_MSEC);
+		return ULONG_MAX;
+	}
+
 	/* Set the Gate high, disable speaker */
 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
 
@@ -578,6 +595,9 @@
 	u64 tsc, delta;
 	unsigned long d1, d2;
 
+	if (!has_legacy_pic())
+		return 0;
+
 	/* Set the Gate high, disable speaker */
 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
 
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index c7194e9..4ef267f 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -353,6 +353,7 @@
 	/DISCARD/ : {
 		*(.eh_frame)
 		*(__func_stack_frame_non_standard)
+		*(__unreachable)
 	}
 }
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 93f924d..7e5119c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -357,7 +357,7 @@
 
 	/* cpuid 0x80000008.ebx */
 	const u32 kvm_cpuid_8000_0008_ebx_x86_features =
-		F(IBPB) | F(IBRS);
+		F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
 
 	/* cpuid 0xC0000001.edx */
 	const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -382,7 +382,7 @@
 
 	/* cpuid 7.0.edx*/
 	const u32 kvm_cpuid_7_0_edx_x86_features =
-		F(SPEC_CTRL) | F(ARCH_CAPABILITIES);
+		F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
 
 	/* all calls to cpuid_count() should be made on the same cpu */
 	get_cpu();
@@ -468,6 +468,11 @@
 				entry->ecx &= ~F(PKU);
 			entry->edx &= kvm_cpuid_7_0_edx_x86_features;
 			cpuid_mask(&entry->edx, CPUID_7_EDX);
+			/*
+			 * We emulate ARCH_CAPABILITIES in software even
+			 * if the host doesn't support it.
+			 */
+			entry->edx |= F(ARCH_CAPABILITIES);
 		} else {
 			entry->ebx = 0;
 			entry->ecx = 0;
@@ -618,13 +623,20 @@
 			g_phys_as = phys_as;
 		entry->eax = g_phys_as | (virt_as << 8);
 		entry->edx = 0;
-		/* IBRS and IBPB aren't necessarily present in hardware cpuid */
-		if (boot_cpu_has(X86_FEATURE_IBPB))
-			entry->ebx |= F(IBPB);
-		if (boot_cpu_has(X86_FEATURE_IBRS))
-			entry->ebx |= F(IBRS);
+		/*
+		 * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
+		 * hardware cpuid
+		 */
+		if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
+			entry->ebx |= F(AMD_IBPB);
+		if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
+			entry->ebx |= F(AMD_IBRS);
+		if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+			entry->ebx |= F(VIRT_SSBD);
 		entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
 		cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+		if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+			entry->ebx |= F(VIRT_SSBD);
 		break;
 	}
 	case 0x80000019:
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index d1beb71..c383697 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -165,21 +165,21 @@
 	struct kvm_cpuid_entry2 *best;
 
 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
-	if (best && (best->ebx & bit(X86_FEATURE_IBPB)))
+	if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB)))
 		return true;
 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
 	return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
 }
 
-static inline bool guest_cpuid_has_ibrs(struct kvm_vcpu *vcpu)
+static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
 
 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
-	if (best && (best->ebx & bit(X86_FEATURE_IBRS)))
+	if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS)))
 		return true;
 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
-	return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
+	return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SSBD)));
 }
 
 static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
@@ -190,6 +190,15 @@
 	return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
 }
 
+static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+	return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
+}
+
+
 
 /*
  * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b24b3c6..a8a86be 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -299,8 +299,16 @@
 	if (!lapic_in_kernel(vcpu))
 		return;
 
+	/*
+	 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
+	 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
+	 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
+	 * version first and level-triggered interrupts never get EOIed in
+	 * IOAPIC.
+	 */
 	feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
-	if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
+	if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
+	    !ioapic_in_kernel(vcpu->kvm))
 		v |= APIC_LVR_DIRECTED_EOI;
 	kvm_lapic_set_reg(apic, APIC_LVR, v);
 }
@@ -1363,8 +1371,10 @@
 
 static void cancel_hv_tscdeadline(struct kvm_lapic *apic)
 {
+	preempt_disable();
 	kvm_x86_ops->cancel_hv_timer(apic->vcpu);
 	apic->lapic_timer.hv_timer_in_use = false;
+	preempt_enable();
 }
 
 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8c99f2f..c4cd128 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -36,6 +36,7 @@
 #include <linux/slab.h>
 #include <linux/amd-iommu.h>
 #include <linux/hashtable.h>
+#include <linux/frame.h>
 
 #include <asm/apic.h>
 #include <asm/perf_event.h>
@@ -45,7 +46,7 @@
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
 #include <asm/microcode.h>
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -185,6 +186,12 @@
 	} host;
 
 	u64 spec_ctrl;
+	/*
+	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
+	 * translated into the appropriate L2_CFG bits on the host to
+	 * perform speculative control.
+	 */
+	u64 virt_spec_ctrl;
 
 	u32 *msrpm;
 
@@ -1561,6 +1568,7 @@
 	u32 eax = 1;
 
 	svm->spec_ctrl = 0;
+	svm->virt_spec_ctrl = 0;
 
 	if (!init_event) {
 		svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
@@ -1879,6 +1887,7 @@
 		 */
 		if (var->unusable)
 			var->db = 0;
+		/* This is symmetric with svm_set_segment() */
 		var->dpl = to_svm(vcpu)->vmcb->save.cpl;
 		break;
 	}
@@ -2024,18 +2033,14 @@
 	s->base = var->base;
 	s->limit = var->limit;
 	s->selector = var->selector;
-	if (var->unusable)
-		s->attrib = 0;
-	else {
-		s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
-		s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
-		s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
-		s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
-		s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
-		s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
-		s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
-		s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
-	}
+	s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+	s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+	s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+	s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
+	s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+	s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+	s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+	s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
 
 	/*
 	 * This is always accurate, except if SYSRET returned to a segment
@@ -2044,7 +2049,8 @@
 	 * would entail passing the CPL to userspace and back.
 	 */
 	if (seg == VCPU_SREG_SS)
-		svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+		/* This is symmetric with svm_get_segment() */
+		svm->vmcb->save.cpl = (var->dpl & 3);
 
 	mark_dirty(svm->vmcb, VMCB_SEG);
 }
@@ -3547,11 +3553,18 @@
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has_ibrs(vcpu))
+		    !guest_cpuid_has_spec_ctrl(vcpu))
 			return 1;
 
 		msr_info->data = svm->spec_ctrl;
 		break;
+	case MSR_AMD64_VIRT_SPEC_CTRL:
+		if (!msr_info->host_initiated &&
+		    !guest_cpuid_has_virt_ssbd(vcpu))
+			return 1;
+
+		msr_info->data = svm->virt_spec_ctrl;
+		break;
 	case MSR_IA32_UCODE_REV:
 		msr_info->data = 0x01000065;
 		break;
@@ -3645,7 +3658,7 @@
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr->host_initiated &&
-		    !guest_cpuid_has_ibrs(vcpu))
+		    !guest_cpuid_has_spec_ctrl(vcpu))
 			return 1;
 
 		/* The STIBP bit doesn't fault even if it's not advertised */
@@ -3686,6 +3699,16 @@
 			break;
 		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
 		break;
+	case MSR_AMD64_VIRT_SPEC_CTRL:
+		if (!msr->host_initiated &&
+		    !guest_cpuid_has_virt_ssbd(vcpu))
+			return 1;
+
+		if (data & ~SPEC_CTRL_SSBD)
+			return 1;
+
+		svm->virt_spec_ctrl = data;
+		break;
 	case MSR_STAR:
 		svm->vmcb->save.star = data;
 		break;
@@ -4919,8 +4942,7 @@
 	 * is no need to worry about the conditional branch over the wrmsr
 	 * being speculatively taken.
 	 */
-	if (svm->spec_ctrl)
-		native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+	x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
 
 	asm volatile (
 		"push %%" _ASM_BP "; \n\t"
@@ -5014,6 +5036,18 @@
 #endif
 		);
 
+	/* Eliminate branch target predictions from guest mode */
+	vmexit_fill_RSB();
+
+#ifdef CONFIG_X86_64
+	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+#else
+	loadsegment(fs, svm->host.fs);
+#ifndef CONFIG_X86_32_LAZY_GS
+	loadsegment(gs, svm->host.gs);
+#endif
+#endif
+
 	/*
 	 * We do not use IBRS in the kernel. If this vCPU has used the
 	 * SPEC_CTRL MSR it may have left it on; save the value and
@@ -5032,20 +5066,7 @@
 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-	if (svm->spec_ctrl)
-		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
-
-	/* Eliminate branch target predictions from guest mode */
-	vmexit_fill_RSB();
-
-#ifdef CONFIG_X86_64
-	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
-#else
-	loadsegment(fs, svm->host.fs);
-#ifndef CONFIG_X86_32_LAZY_GS
-	loadsegment(gs, svm->host.gs);
-#endif
-#endif
+	x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
 
 	reload_tss(vcpu);
 
@@ -5091,6 +5112,7 @@
 
 	mark_all_clean(svm->vmcb);
 }
+STACK_FRAME_NON_STANDARD(svm_vcpu_run);
 
 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
 {
@@ -5147,7 +5169,7 @@
 	return false;
 }
 
-static bool svm_has_high_real_mode_segbase(void)
+static bool svm_has_emulated_msr(int index)
 {
 	return true;
 }
@@ -5464,7 +5486,7 @@
 	.hardware_enable = svm_hardware_enable,
 	.hardware_disable = svm_hardware_disable,
 	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
-	.cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+	.has_emulated_msr = svm_has_emulated_msr,
 
 	.vcpu_create = svm_create_vcpu,
 	.vcpu_free = svm_free_vcpu,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9c51710..ebceda2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/tboot.h>
 #include <linux/hrtimer.h>
+#include <linux/frame.h>
 #include <linux/nospec.h>
 #include "kvm_cache_regs.h"
 #include "x86.h"
@@ -50,7 +51,7 @@
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/microcode.h>
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 
 #include "trace.h"
 #include "pmu.h"
@@ -2558,6 +2559,8 @@
 		return;
 	}
 
+	WARN_ON_ONCE(vmx->emulation_required);
+
 	if (kvm_exception_is_soft(nr)) {
 		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
 			     vmx->vcpu.arch.event_exit_inst_len);
@@ -3020,7 +3023,7 @@
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has_ibrs(vcpu))
+		    !guest_cpuid_has_spec_ctrl(vcpu))
 			return 1;
 
 		msr_info->data = to_vmx(vcpu)->spec_ctrl;
@@ -3137,11 +3140,11 @@
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has_ibrs(vcpu))
+		    !guest_cpuid_has_spec_ctrl(vcpu))
 			return 1;
 
 		/* The STIBP bit doesn't fault even if it's not advertised */
-		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
 			return 1;
 
 		vmx->spec_ctrl = data;
@@ -6430,12 +6433,12 @@
 			goto out;
 		}
 
-		if (err != EMULATE_DONE) {
-			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-			vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
-			vcpu->run->internal.ndata = 0;
-			return 0;
-		}
+		if (err != EMULATE_DONE)
+			goto emulation_error;
+
+		if (vmx->emulation_required && !vmx->rmode.vm86_active &&
+		    vcpu->arch.exception.pending)
+			goto emulation_error;
 
 		if (vcpu->arch.halt_request) {
 			vcpu->arch.halt_request = 0;
@@ -6451,6 +6454,12 @@
 
 out:
 	return ret;
+
+emulation_error:
+	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+	vcpu->run->internal.ndata = 0;
+	return 0;
 }
 
 static int __grow_ple_window(int val)
@@ -7924,11 +7933,13 @@
 {
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 	int cr = exit_qualification & 15;
-	int reg = (exit_qualification >> 8) & 15;
-	unsigned long val = kvm_register_readl(vcpu, reg);
+	int reg;
+	unsigned long val;
 
 	switch ((exit_qualification >> 4) & 3) {
 	case 0: /* mov to cr */
+		reg = (exit_qualification >> 8) & 15;
+		val = kvm_register_readl(vcpu, reg);
 		switch (cr) {
 		case 0:
 			if (vmcs12->cr0_guest_host_mask &
@@ -7983,6 +7994,7 @@
 		 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
 		 * cr0. Other attempted changes are ignored, with no exit.
 		 */
+		val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
 		if (vmcs12->cr0_guest_host_mask & 0xe &
 		    (val ^ vmcs12->cr0_read_shadow))
 			return true;
@@ -8686,10 +8698,23 @@
 			);
 	}
 }
+STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
 
-static bool vmx_has_high_real_mode_segbase(void)
+static bool vmx_has_emulated_msr(int index)
 {
-	return enable_unrestricted_guest || emulate_invalid_guest_state;
+	switch (index) {
+	case MSR_IA32_SMBASE:
+		/*
+		 * We cannot do SMM unless we can run the guest in big
+		 * real mode.
+		 */
+		return enable_unrestricted_guest || emulate_invalid_guest_state;
+	case MSR_AMD64_VIRT_SPEC_CTRL:
+		/* This is AMD only.  */
+		return false;
+	default:
+		return true;
+	}
 }
 
 static bool vmx_mpx_supported(void)
@@ -8912,10 +8937,10 @@
 	 * is no need to worry about the conditional branch over the wrmsr
 	 * being speculatively taken.
 	 */
-	if (vmx->spec_ctrl)
-		native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+	x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
 
 	vmx->__launched = vmx->loaded_vmcs->launched;
+
 	asm(
 		/* Store host registers */
 		"push %%" _ASM_DX "; push %%" _ASM_BP ";"
@@ -9051,8 +9076,7 @@
 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-	if (vmx->spec_ctrl)
-		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+	x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
 
 	/* Eliminate branch target predictions from guest mode */
 	vmexit_fill_RSB();
@@ -9115,6 +9139,7 @@
 	vmx_recover_nmi_blocking(vmx);
 	vmx_complete_interrupts(vmx);
 }
+STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
 
 static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
 {
@@ -10660,8 +10685,7 @@
 		vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
 	}
 
-	if (nested_cpu_has_ept(vmcs12))
-		vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
+	vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
 
 	if (nested_cpu_has_vid(vmcs12))
 		vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
@@ -11344,7 +11368,7 @@
 	.hardware_enable = hardware_enable,
 	.hardware_disable = hardware_disable,
 	.cpu_has_accelerated_tpr = report_flexpriority,
-	.cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
+	.has_emulated_msr = vmx_has_emulated_msr,
 
 	.vcpu_create = vmx_create_vcpu,
 	.vcpu_free = vmx_free_vcpu,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3aaaf30..4aa265a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1002,6 +1002,7 @@
 	MSR_IA32_MCG_CTL,
 	MSR_IA32_MCG_EXT_CTL,
 	MSR_IA32_SMBASE,
+	MSR_AMD64_VIRT_SPEC_CTRL,
 };
 
 static unsigned num_emulated_msrs;
@@ -2664,7 +2665,7 @@
 		 * fringe case that is not enabled except via specific settings
 		 * of the module parameters.
 		 */
-		r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+		r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
 		break;
 	case KVM_CAP_COALESCED_MMIO:
 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
@@ -4130,13 +4131,14 @@
 		mutex_unlock(&kvm->lock);
 		break;
 	case KVM_XEN_HVM_CONFIG: {
+		struct kvm_xen_hvm_config xhc;
 		r = -EFAULT;
-		if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
-				   sizeof(struct kvm_xen_hvm_config)))
+		if (copy_from_user(&xhc, argp, sizeof(xhc)))
 			goto out;
 		r = -EINVAL;
-		if (kvm->arch.xen_hvm_config.flags)
+		if (xhc.flags)
 			goto out;
+		memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
 		r = 0;
 		break;
 	}
@@ -4226,14 +4228,8 @@
 	num_msrs_to_save = j;
 
 	for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
-		switch (emulated_msrs[i]) {
-		case MSR_IA32_SMBASE:
-			if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
-				continue;
-			break;
-		default:
-			break;
-		}
+		if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+			continue;
 
 		if (j < i)
 			emulated_msrs[j] = emulated_msrs[i];
@@ -7263,6 +7259,7 @@
 {
 	struct msr_data apic_base_msr;
 	int mmu_reset_needed = 0;
+	int cpuid_update_needed = 0;
 	int pending_vec, max_bits, idx;
 	struct desc_ptr dt;
 
@@ -7294,8 +7291,10 @@
 	vcpu->arch.cr0 = sregs->cr0;
 
 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
+	cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
+				(X86_CR4_OSXSAVE | X86_CR4_PKE));
 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
-	if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
+	if (cpuid_update_needed)
 		kvm_update_cpuid(vcpu);
 
 	idx = srcu_read_lock(&vcpu->kvm->srcu);
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 7e48807..45a53df 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -55,7 +55,7 @@
 	movq  %r12, 3*8(%rsp)
 	movq  %r14, 4*8(%rsp)
 	movq  %r13, 5*8(%rsp)
-	movq  %rbp, 6*8(%rsp)
+	movq  %r15, 6*8(%rsp)
 
 	movq  %r8, (%rsp)
 	movq  %r9, 1*8(%rsp)
@@ -74,7 +74,7 @@
 	/* main loop. clear in 64 byte blocks */
 	/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
 	/* r11:	temp3, rdx: temp4, r12 loopcnt */
-	/* r10:	temp5, rbp: temp6, r14 temp7, r13 temp8 */
+	/* r10:	temp5, r15: temp6, r14 temp7, r13 temp8 */
 	.p2align 4
 .Lloop:
 	source
@@ -89,7 +89,7 @@
 	source
 	movq  32(%rdi), %r10
 	source
-	movq  40(%rdi), %rbp
+	movq  40(%rdi), %r15
 	source
 	movq  48(%rdi), %r14
 	source
@@ -103,7 +103,7 @@
 	adcq  %r11, %rax
 	adcq  %rdx, %rax
 	adcq  %r10, %rax
-	adcq  %rbp, %rax
+	adcq  %r15, %rax
 	adcq  %r14, %rax
 	adcq  %r13, %rax
 
@@ -121,7 +121,7 @@
 	dest
 	movq %r10, 32(%rsi)
 	dest
-	movq %rbp, 40(%rsi)
+	movq %r15, 40(%rsi)
 	dest
 	movq %r14, 48(%rsi)
 	dest
@@ -203,7 +203,7 @@
 	movq 3*8(%rsp), %r12
 	movq 4*8(%rsp), %r14
 	movq 5*8(%rsp), %r13
-	movq 6*8(%rsp), %rbp
+	movq 6*8(%rsp), %r15
 	addq $7*8, %rsp
 	ret
 
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index c815564..10ffa7e 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -13,14 +13,14 @@
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
 	pushq %rbx
-	pushq %rbp
+	pushq %r12
 	movq	%rdi, %r10	/* Save pointer */
 	xorl	%r11d, %r11d	/* Return value */
 	movl    (%rdi), %eax
 	movl    4(%rdi), %ecx
 	movl    8(%rdi), %edx
 	movl    12(%rdi), %ebx
-	movl    20(%rdi), %ebp
+	movl    20(%rdi), %r12d
 	movl    24(%rdi), %esi
 	movl    28(%rdi), %edi
 1:	\op
@@ -29,10 +29,10 @@
 	movl    %ecx, 4(%r10)
 	movl    %edx, 8(%r10)
 	movl    %ebx, 12(%r10)
-	movl    %ebp, 20(%r10)
+	movl    %r12d, 20(%r10)
 	movl    %esi, 24(%r10)
 	movl    %edi, 28(%r10)
-	popq %rbp
+	popq %r12
 	popq %rbx
 	ret
 3:
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 7df8e3a..d35d0e4 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1014,8 +1014,7 @@
 	after_bootmem = 1;
 
 	/* Register memory areas for /proc/kcore */
-	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
-			 PAGE_SIZE, KCORE_OTHER);
+	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
 
 	mem_init_print_info(NULL);
 }
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 73dcb0e1..dcd6714 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -279,9 +279,11 @@
 
 	/*
 	 * The .rodata section needs to be read-only. Using the pfn
-	 * catches all aliases.
+	 * catches all aliases.  This also includes __ro_after_init,
+	 * so do not enforce until kernel_set_to_readonly is true.
 	 */
-	if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
+	if (kernel_set_to_readonly &&
+	    within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
 		   __pa_symbol(__end_rodata) >> PAGE_SHIFT))
 		pgprot_val(forbidden) |= _PAGE_RW;
 
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index b97ef29..a3b63e5 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -1,5 +1,6 @@
 #include <linux/mm.h>
 #include <linux/gfp.h>
+#include <linux/hugetlb.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/tlb.h>
@@ -577,6 +578,10 @@
 	    (mtrr != MTRR_TYPE_WRBACK))
 		return 0;
 
+	/* Bail out if we are we on a populated non-leaf entry: */
+	if (pud_present(*pud) && !pud_huge(*pud))
+		return 0;
+
 	prot = pgprot_4k_2_large(prot);
 
 	set_pte((pte_t *)pud, pfn_pte(
@@ -605,6 +610,10 @@
 		return 0;
 	}
 
+	/* Bail out if we are we on a populated non-leaf entry: */
+	if (pmd_present(*pmd) && !pmd_huge(*pmd))
+		return 0;
+
 	prot = pgprot_4k_2_large(prot);
 
 	set_pte((pte_t *)pmd, pfn_pte(
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index f88ce0e..0bbec04 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -95,26 +95,27 @@
 	 */
 	if (pkey != -1)
 		return pkey;
-	/*
-	 * Look for a protection-key-drive execute-only mapping
-	 * which is now being given permissions that are not
-	 * execute-only.  Move it back to the default pkey.
-	 */
-	if (vma_is_pkey_exec_only(vma) &&
-	    (prot & (PROT_READ|PROT_WRITE))) {
-		return 0;
-	}
+
 	/*
 	 * The mapping is execute-only.  Go try to get the
 	 * execute-only protection key.  If we fail to do that,
 	 * fall through as if we do not have execute-only
-	 * support.
+	 * support in this mm.
 	 */
 	if (prot == PROT_EXEC) {
 		pkey = execute_only_pkey(vma->vm_mm);
 		if (pkey > 0)
 			return pkey;
+	} else if (vma_is_pkey_exec_only(vma)) {
+		/*
+		 * Protections are *not* PROT_EXEC, but the mapping
+		 * is using the exec-only pkey.  This mapping was
+		 * PROT_EXEC and will no longer be.  Move back to
+		 * the default pkey.
+		 */
+		return ARCH_DEFAULT_PKEY;
 	}
+
 	/*
 	 * This is a vanilla, non-pkey mprotect (or we failed to
 	 * setup execute-only), inherit the pkey from the VMA we
diff --git a/arch/x86/net/Makefile b/arch/x86/net/Makefile
index 90568c3..fefb4b6 100644
--- a/arch/x86/net/Makefile
+++ b/arch/x86/net/Makefile
@@ -1,4 +1,6 @@
 #
 # Arch-specific network modules
 #
+OBJECT_FILES_NON_STANDARD_bpf_jit.o += y
+
 obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
index 066619b..7a25502 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -1,4 +1,5 @@
 OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y
 
 obj-$(CONFIG_EFI) 		+= quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
 obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 274dfc4..a0e85f2 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -832,9 +832,11 @@
 
 	/*
 	 * We don't do virtual mode, since we don't do runtime services, on
-	 * non-native EFI
+	 * non-native EFI. With efi=old_map, we don't do runtime services in
+	 * kexec kernel because in the initial boot something else might
+	 * have been mapped at these virtual addresses.
 	 */
-	if (!efi_is_native()) {
+	if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
 		efi_memmap_unmap();
 		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 		return;
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
index a6a198c..0504187 100644
--- a/arch/x86/power/Makefile
+++ b/arch/x86/power/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_hibernate_asm_$(BITS).o := y
+
 # __restore_processor_state() restores %gs after S3 resume and so should not
 # itself be stack-protected
 nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index 9f14bd3..74b516c 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -142,7 +142,7 @@
 #endif
 }
 
-int swsusp_arch_resume(void)
+asmlinkage int swsusp_arch_resume(void)
 {
 	int error;
 
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 9634557..0cb1dd4 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -149,7 +149,7 @@
 	return 0;
 }
 
-int swsusp_arch_resume(void)
+asmlinkage int swsusp_arch_resume(void)
 {
 	int error;
 
diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
index 1518d28..27361cb 100644
--- a/arch/x86/um/stub_segv.c
+++ b/arch/x86/um/stub_segv.c
@@ -6,11 +6,12 @@
 #include <sysdep/stub.h>
 #include <sysdep/faultinfo.h>
 #include <sysdep/mcontext.h>
+#include <sys/ucontext.h>
 
 void __attribute__ ((__section__ (".__syscall_stub")))
 stub_segv_handler(int sig, siginfo_t *info, void *p)
 {
-	struct ucontext *uc = p;
+	ucontext_t *uc = p;
 
 	GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
 			      &uc->uc_mcontext);
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index e47e527..4a54059 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,3 +1,6 @@
+OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_xen-pvh.o := y
+
 ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
 CFLAGS_REMOVE_spinlock.o = -pg
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2bea87c..e3a3f5a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -75,6 +75,7 @@
 #include <asm/mwait.h>
 #include <asm/pci_x86.h>
 #include <asm/cpu.h>
+#include <asm/unwind_hints.h>
 
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
@@ -1452,10 +1453,12 @@
 		 * GDT. The new GDT has  __KERNEL_CS with CS.L = 1
 		 * and we are jumping to reload it.
 		 */
-		asm volatile ("pushq %0\n"
+		asm volatile (UNWIND_HINT_SAVE
+			      "pushq %0\n"
 			      "leaq 1f(%%rip),%0\n"
 			      "pushq %0\n"
 			      "lretq\n"
+			      UNWIND_HINT_RESTORE
 			      "1:\n"
 			      : "=&r" (dummy) : "0" (__KERNEL_CS));
 
@@ -1977,10 +1980,8 @@
 
 static void xen_set_cpu_features(struct cpuinfo_x86 *c)
 {
-	if (xen_pv_domain()) {
-		clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+	if (xen_pv_domain())
 		set_cpu_cap(c, X86_FEATURE_XENPV);
-	}
 }
 
 static void xen_pin_vcpu(int cpu)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 418f1b8..c92f75f 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1317,8 +1317,6 @@
 	struct mmuext_op *op;
 	struct multicall_space mcs;
 
-	trace_xen_mmu_flush_tlb_all(0);
-
 	preempt_disable();
 
 	mcs = xen_mc_entry(sizeof(*op));
@@ -1336,8 +1334,6 @@
 	struct mmuext_op *op;
 	struct multicall_space mcs;
 
-	trace_xen_mmu_flush_tlb(0);
-
 	preempt_disable();
 
 	mcs = xen_mc_entry(sizeof(*op));
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 72bfc1c..5bfbc1c 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -44,18 +44,10 @@
 	: "r" (uaddr), "I" (-EFAULT), "r" (oparg)	\
 	: "memory")
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 #if !XCHAL_HAVE_S32C1I
 	return -ENOSYS;
@@ -89,19 +81,10 @@
 
 	pagefault_enable();
 
-	if (ret)
-		return ret;
+	if (!ret)
+		*oval = oldval;
 
-	switch (cmp) {
-	case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
-	case FUTEX_OP_CMP_NE: return (oldval != cmparg);
-	case FUTEX_OP_CMP_LT: return (oldval < cmparg);
-	case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
-	case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
-	case FUTEX_OP_CMP_GT: return (oldval > cmparg);
-	}
-
-	return -ENOSYS;
+	return ret;
 }
 
 static inline int
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 63f72f0..80dedde 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -175,6 +175,9 @@
 	if (!bio_is_rw(bio))
 		return false;
 
+	if (!bio_sectors(bio))
+		return false;
+
 	/* Already protected? */
 	if (bio_integrity(bio))
 		return false;
diff --git a/block/bio.c b/block/bio.c
index 07f287b..91b6462 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -42,9 +42,9 @@
  * break badly! cannot be bigger than what you can fit into an
  * unsigned short
  */
-#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
+#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
 static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
-	BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
+	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
 };
 #undef BV
 
@@ -565,6 +565,15 @@
 }
 EXPORT_SYMBOL(bio_phys_segments);
 
+static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src)
+{
+#ifdef CONFIG_PFK
+	dst->bi_crypt_key = src->bi_crypt_key;
+	dst->bi_iter.bi_dun = src->bi_iter.bi_dun;
+#endif
+	dst->bi_dio_inode = src->bi_dio_inode;
+}
+
 /**
  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
  * 	@bio: destination bio
@@ -589,7 +598,8 @@
 	bio->bi_opf = bio_src->bi_opf;
 	bio->bi_iter = bio_src->bi_iter;
 	bio->bi_io_vec = bio_src->bi_io_vec;
-
+	bio->bi_dio_inode = bio_src->bi_dio_inode;
+	bio_clone_crypt_key(bio, bio_src);
 	bio_clone_blkcg_association(bio, bio_src);
 }
 EXPORT_SYMBOL(__bio_clone_fast);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index abde370..f44daa1 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -6,9 +6,9 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/scatterlist.h>
-
+#include <linux/pfk.h>
 #include <trace/events/block.h>
-
+#include <linux/pfk.h>
 #include "blk.h"
 
 static struct bio *blk_bio_discard_split(struct request_queue *q,
@@ -725,6 +725,11 @@
 	}
 }
 
+static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
+{
+	return (!pfk_allow_merge_bio(bio, nxt));
+}
+
 /*
  * Has to be called with the request spinlock acquired
  */
@@ -752,6 +757,8 @@
 	    !blk_write_same_mergeable(req->bio, next->bio))
 		return 0;
 
+	if (crypto_not_mergeable(req->bio, next->bio))
+		return 0;
 	/*
 	 * If we are allowed to merge, then append bio list
 	 * from next to rq and release next. merge_requests_fn
@@ -862,6 +869,8 @@
 	    !blk_write_same_mergeable(rq->bio, bio))
 		return false;
 
+	if (crypto_not_mergeable(rq->bio, bio))
+		return false;
 	return true;
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8c74712..0668b66 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1265,13 +1265,13 @@
 
 	blk_queue_bounce(q, &bio);
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
 		bio_io_error(bio);
 		return BLK_QC_T_NONE;
 	}
 
-	blk_queue_split(q, &bio, q->bio_split);
-
 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
 	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
 		return BLK_QC_T_NONE;
@@ -1592,7 +1592,8 @@
 {
 	unsigned flush_start_tag = set->queue_depth;
 
-	blk_mq_tag_idle(hctx);
+	if (blk_mq_hw_queue_mapped(hctx))
+		blk_mq_tag_idle(hctx);
 
 	if (set->ops->exit_request)
 		set->ops->exit_request(set->driver_data,
@@ -1907,6 +1908,9 @@
 	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
 
 	blk_mq_sysfs_unregister(q);
+
+	/* protect against switching io scheduler  */
+	mutex_lock(&q->sysfs_lock);
 	for (i = 0; i < set->nr_hw_queues; i++) {
 		int node;
 
@@ -1956,6 +1960,7 @@
 		}
 	}
 	q->nr_hw_queues = i;
+	mutex_unlock(&q->sysfs_lock);
 	blk_mq_sysfs_register(q);
 }
 
@@ -2014,15 +2019,15 @@
 
 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
-	mutex_lock(&all_q_mutex);
 	get_online_cpus();
+	mutex_lock(&all_q_mutex);
 
 	list_add_tail(&q->all_q_node, &all_q_list);
 	blk_mq_add_queue_tag_set(set, q);
 	blk_mq_map_swqueue(q, cpu_online_mask);
 
-	put_online_cpus();
 	mutex_unlock(&all_q_mutex);
+	put_online_cpus();
 
 	return q;
 
diff --git a/block/elevator.c b/block/elevator.c
index f7d973a..6dd2ca4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -425,7 +425,10 @@
 	/*
 	 * First try one-hit cache.
 	 */
-	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
+	if (q->last_merge) {
+		if (!elv_bio_merge_ok(q->last_merge, bio))
+			return ELEVATOR_NO_MERGE;
+
 		ret = blk_try_merge(q->last_merge, bio);
 		if (ret != ELEVATOR_NO_MERGE) {
 			*req = q->last_merge;
diff --git a/block/ioctl.c b/block/ioctl.c
index d4a78d0..c4555b1 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -564,8 +564,6 @@
 		if ((size >> 9) > ~0UL)
 			return -EFBIG;
 		return put_ulong(arg, size >> 9);
-	case BLKGETSTPART:
-		return put_ulong(arg, bdev->bd_part->start_sect);
 	case BLKGETSIZE64:
 		return put_u64(arg, i_size_read(bdev->bd_inode));
 	case BLKTRACESTART:
diff --git a/block/partition-generic.c b/block/partition-generic.c
index a2437c0..298c05f 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -321,8 +321,10 @@
 
 	if (info) {
 		struct partition_meta_info *pinfo = alloc_part_info(disk);
-		if (!pinfo)
+		if (!pinfo) {
+			err = -ENOMEM;
 			goto out_free_stats;
+		}
 		memcpy(pinfo, info, sizeof(*info));
 		p->info = pinfo;
 	}
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 5610cd5..7d8d50c 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -300,7 +300,9 @@
 			continue;
 		bsd_start = le32_to_cpu(p->p_offset);
 		bsd_size = le32_to_cpu(p->p_size);
-		if (memcmp(flavour, "bsd\0", 4) == 0)
+		/* FreeBSD has relative offset if C partition offset is zero */
+		if (memcmp(flavour, "bsd\0", 4) == 0 &&
+		    le32_to_cpu(l->d_partitions[2].p_offset) == 0)
 			bsd_start += offset;
 		if (offset == bsd_start && size == bsd_size)
 			/* full parent partition, we have it already */
diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64
new file mode 100644
index 0000000..8d56143
--- /dev/null
+++ b/build.config.cuttlefish.x86_64
@@ -0,0 +1,16 @@
+ARCH=x86_64
+BRANCH=android-4.9
+CLANG_TRIPLE=x86_64-linux-gnu-
+CROSS_COMPILE=x86_64-linux-androidkernel-
+DEFCONFIG=x86_64_cuttlefish_defconfig
+EXTRA_CMDS=''
+KERNEL_DIR=common
+POST_DEFCONFIG_CMDS="check_defconfig"
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r328903/bin
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
+FILES="
+arch/x86/boot/bzImage
+vmlinux
+System.map
+"
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.arm b/build.config.goldfish.arm
index 866da93..ff5646a 100644
--- a/build.config.goldfish.arm
+++ b/build.config.goldfish.arm
@@ -10,3 +10,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.arm64 b/build.config.goldfish.arm64
index 9c963cf..4c896a6 100644
--- a/build.config.goldfish.arm64
+++ b/build.config.goldfish.arm64
@@ -10,3 +10,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.mips b/build.config.goldfish.mips
index 8af53d2..9a14a44 100644
--- a/build.config.goldfish.mips
+++ b/build.config.goldfish.mips
@@ -9,3 +9,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.mips64 b/build.config.goldfish.mips64
index 2a33d36..6ad9759 100644
--- a/build.config.goldfish.mips64
+++ b/build.config.goldfish.mips64
@@ -9,3 +9,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.x86 b/build.config.goldfish.x86
index f86253f..2266c62 100644
--- a/build.config.goldfish.x86
+++ b/build.config.goldfish.x86
@@ -10,3 +10,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.x86_64 b/build.config.goldfish.x86_64
index e173886..08c42c2 100644
--- a/build.config.goldfish.x86_64
+++ b/build.config.goldfish.x86_64
@@ -10,3 +10,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index ca50eeb..b5953f1 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -157,16 +157,16 @@
 	void *private;
 	int err;
 
-	/* If caller uses non-allowed flag, return error. */
-	if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
-		return -EINVAL;
-
 	if (sock->state == SS_CONNECTED)
 		return -EINVAL;
 
 	if (addr_len != sizeof(*sa))
 		return -EINVAL;
 
+	/* If caller uses non-allowed flag, return error. */
+	if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+		return -EINVAL;
+
 	sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
 	sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
 
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 14402ef..90d73a2 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -91,13 +91,14 @@
 
 	if (nbytes && walk->offset & alignmask && !err) {
 		walk->offset = ALIGN(walk->offset, alignmask + 1);
-		walk->data += walk->offset;
-
 		nbytes = min(nbytes,
 			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
 		walk->entrylen -= nbytes;
 
-		return nbytes;
+		if (nbytes) {
+			walk->data += walk->offset;
+			return nbytes;
+		}
 	}
 
 	if (walk->flags & CRYPTO_ALG_ASYNC)
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index f6a009d..52e5ea3 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -106,6 +106,7 @@
 		pr_devel("sinfo %u: Direct signer is key %x\n",
 			 sinfo->index, key_serial(key));
 		x509 = NULL;
+		sig = sinfo->sig;
 		goto matched;
 	}
 	if (PTR_ERR(key) != -ENOKEY)
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 029f705..ce2df8c 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -102,6 +102,7 @@
 		}
 	}
 
+	ret = -ENOMEM;
 	cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
 	if (!cert->pub->key)
 		goto error_decode;
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index f83de99..56bd612 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,9 +62,6 @@
 	dma_addr_t dma_dest[2];
 	int src_off = 0;
 
-	if (submit->flags & ASYNC_TX_FENCE)
-		dma_flags |= DMA_PREP_FENCE;
-
 	while (src_cnt > 0) {
 		submit->flags = flags_orig;
 		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
@@ -83,6 +80,8 @@
 			if (cb_fn_orig)
 				dma_flags |= DMA_PREP_INTERRUPT;
 		}
+		if (submit->flags & ASYNC_TX_FENCE)
+			dma_flags |= DMA_PREP_FENCE;
 
 		/* Drivers force forward progress in case they can not provide
 		 * a descriptor
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 942ddff..4bb5f93 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1134,8 +1134,10 @@
 	if (!drbg)
 		return;
 	kzfree(drbg->Vbuf);
+	drbg->Vbuf = NULL;
 	drbg->V = NULL;
 	kzfree(drbg->Cbuf);
+	drbg->Cbuf = NULL;
 	drbg->C = NULL;
 	kzfree(drbg->scratchpadbuf);
 	drbg->scratchpadbuf = NULL;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index eb76a4c..8ce203f 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -109,6 +109,7 @@
 		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
 	if (cpumask_empty(tmp)) {
 		mutex_unlock(&round_robin_lock);
+		free_cpumask_var(tmp);
 		return;
 	}
 	for_each_cpu(cpu, tmp) {
@@ -126,6 +127,8 @@
 	mutex_unlock(&round_robin_lock);
 
 	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
+
+	free_cpumask_var(tmp);
 }
 
 static void exit_round_robin(unsigned int tsk_index)
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index c5557d0..667dc5c 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -87,8 +87,8 @@
 static bool device_id_scheme = false;
 module_param(device_id_scheme, bool, 0444);
 
-static bool only_lcd = false;
-module_param(only_lcd, bool, 0444);
+static int only_lcd = -1;
+module_param(only_lcd, int, 0444);
 
 static int register_count;
 static DEFINE_MUTEX(register_count_mutex);
@@ -2069,6 +2069,25 @@
 	return opregion;
 }
 
+static bool dmi_is_desktop(void)
+{
+	const char *chassis_type;
+
+	chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+	if (!chassis_type)
+		return false;
+
+	if (!strcmp(chassis_type, "3") || /*  3: Desktop */
+	    !strcmp(chassis_type, "4") || /*  4: Low Profile Desktop */
+	    !strcmp(chassis_type, "5") || /*  5: Pizza Box */
+	    !strcmp(chassis_type, "6") || /*  6: Mini Tower */
+	    !strcmp(chassis_type, "7") || /*  7: Tower */
+	    !strcmp(chassis_type, "11"))  /* 11: Main Server Chassis */
+		return true;
+
+	return false;
+}
+
 int acpi_video_register(void)
 {
 	int ret = 0;
@@ -2082,6 +2101,20 @@
 		goto leave;
 	}
 
+	/*
+	 * We're seeing a lot of bogus backlight interfaces on newer machines
+	 * without a LCD such as desktops, servers and HDMI sticks. Checking
+	 * the lcd flag fixes this, so enable this on any machines which are
+	 * win8 ready (where we also prefer the native backlight driver, so
+	 * normally the acpi_video code should not register there anyways).
+	 */
+	if (only_lcd == -1) {
+		if (dmi_is_desktop() && acpi_osi_is_win8())
+			only_lcd = true;
+		else
+			only_lcd = false;
+	}
+
 	dmi_check_system(video_dmi_table);
 
 	ret = acpi_bus_register_driver(&acpi_video_bus);
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 80fc0b9..f362841 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -204,6 +204,7 @@
 	u32 fixed_status;
 	u32 fixed_enable;
 	u32 i;
+	acpi_status status;
 
 	ACPI_FUNCTION_NAME(ev_fixed_event_detect);
 
@@ -211,8 +212,12 @@
 	 * Read the fixed feature status and enable registers, as all the cases
 	 * depend on their values. Ignore errors here.
 	 */
-	(void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
-	(void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
+	status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
+	status |=
+	    acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
+	if (ACPI_FAILURE(status)) {
+		return (int_status);
+	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
 			  "Fixed Event Block: Enable %08X Status %08X\n",
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 9179e9a..35a2d9e 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -180,6 +180,12 @@
 
 	ACPI_FUNCTION_TRACE(acpi_enable_event);
 
+	/* If Hardware Reduced flag is set, there are no fixed events */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/* Decode the Fixed Event */
 
 	if (event > ACPI_EVENT_MAX) {
@@ -237,6 +243,12 @@
 
 	ACPI_FUNCTION_TRACE(acpi_disable_event);
 
+	/* If Hardware Reduced flag is set, there are no fixed events */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/* Decode the Fixed Event */
 
 	if (event > ACPI_EVENT_MAX) {
@@ -290,6 +302,12 @@
 
 	ACPI_FUNCTION_TRACE(acpi_clear_event);
 
+	/* If Hardware Reduced flag is set, there are no fixed events */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/* Decode the Fixed Event */
 
 	if (event > ACPI_EVENT_MAX) {
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 5d59cfc..c5d6701 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -308,6 +308,14 @@
 		/* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
 
 		status = AE_OK;
+	} else if (ACPI_FAILURE(status)) {
+
+		/* If return_object exists, delete it */
+
+		if (info->return_object) {
+			acpi_ut_remove_reference(info->return_object);
+			info->return_object = NULL;
+		}
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index db0e903..ac2e8df 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -121,6 +121,9 @@
 			     (u32)(aml_offset +
 				   sizeof(struct acpi_table_header)));
 
+			ACPI_ERROR((AE_INFO,
+				    "Aborting disassembly, AML byte code is corrupt"));
+
 			/* Dump the context surrounding the invalid opcode */
 
 			acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
@@ -129,6 +132,14 @@
 					     sizeof(struct acpi_table_header) -
 					     16));
 			acpi_os_printf(" */\n");
+
+			/*
+			 * Just abort the disassembly, cannot continue because the
+			 * parser is essentially lost. The disassembler can then
+			 * randomly fail because an ill-constructed parse tree
+			 * can result.
+			 */
+			return_ACPI_STATUS(AE_AML_BAD_OPCODE);
 #endif
 		}
 
@@ -293,6 +304,9 @@
 	if (status == AE_CTRL_PARSE_CONTINUE) {
 		return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
 	}
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
 
 	/* Create Op structure and append to parent's argument list */
 
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index b4a2c31..7b665aa 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1518,7 +1518,7 @@
 	}
 
 	acpi_handle_info(ec->handle,
-			 "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+			 "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
 			 ec->gpe, ec->command_addr, ec->data_addr);
 	return ret;
 }
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c7dd7a..dd70d6c 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -128,7 +128,7 @@
 		return -ENOMEM;
 	}
 
-	if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe))
+	if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
 		goto error;
 	if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
 				 &first_ec->global_lock))
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 08b3ca0..b012e94 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -158,7 +158,7 @@
    -------------------------------------------------------------------------- */
 struct acpi_ec {
 	acpi_handle handle;
-	unsigned long gpe;
+	u32 gpe;
 	unsigned long command_addr;
 	unsigned long data_addr;
 	bool global_lock;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b1815b2..3874eec 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -967,8 +967,11 @@
 	if (nd_desc) {
 		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 
+		mutex_lock(&acpi_desc->init_mutex);
 		rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
-				(work_busy(&acpi_desc->work)) ? "+\n" : "\n");
+				work_busy(&acpi_desc->work)
+				&& !acpi_desc->cancel ? "+\n" : "\n");
+		mutex_unlock(&acpi_desc->init_mutex);
 	}
 	device_unlock(dev);
 	return rc;
@@ -2547,15 +2550,21 @@
 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
 {
 	struct nfit_spa *nfit_spa;
-	int rc;
 
-	list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
-		if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
-			/* BLK regions don't need to wait for ars results */
-			rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
-			if (rc)
-				return rc;
-		}
+	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+		int rc, type = nfit_spa_type(nfit_spa->spa);
+
+		/* PMEM and VMEM will be registered by the ARS workqueue */
+		if (type == NFIT_SPA_PM || type == NFIT_SPA_VOLATILE)
+			continue;
+		/* BLK apertures belong to BLK region registration below */
+		if (type == NFIT_SPA_BDW)
+			continue;
+		/* BLK regions don't need to wait for ARS results */
+		rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
+		if (rc)
+			return rc;
+	}
 
 	queue_work(nfit_wq, &acpi_desc->work);
 	return 0;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index bb01dea..9825780 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -161,7 +161,7 @@
 {
 	int ret;
 
-	if (ignore_ppc) {
+	if (ignore_ppc || !pr->performance) {
 		/*
 		 * Only when it is notification event, the _OST object
 		 * will be evaluated. Otherwise it is skipped.
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index cf725d5..145dcf2 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1422,6 +1422,8 @@
 	device_initialize(&device->dev);
 	dev_set_uevent_suppress(&device->dev, true);
 	acpi_init_coherency(device);
+	/* Assume there are unmet deps until acpi_device_dep_initialize() runs */
+	device->dep_unmet = 1;
 }
 
 void acpi_device_add_finalize(struct acpi_device *device)
@@ -1445,6 +1447,14 @@
 	}
 
 	acpi_init_device_object(device, handle, type, sta);
+	/*
+	 * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so
+	 * that we can call acpi_bus_get_status() and use its quirk handling.
+	 * Note this must be done before the get power-/wakeup_dev-flags calls.
+	 */
+	if (type == ACPI_BUS_TYPE_DEVICE)
+		acpi_bus_get_status(device);
+
 	acpi_bus_get_power_flags(device);
 	acpi_bus_get_wakeup_device_flags(device);
 
@@ -1517,9 +1527,11 @@
 			return -ENODEV;
 
 		*type = ACPI_BUS_TYPE_DEVICE;
-		status = acpi_bus_get_status_handle(handle, sta);
-		if (ACPI_FAILURE(status))
-			*sta = 0;
+		/*
+		 * acpi_add_single_object updates this once we've an acpi_device
+		 * so that acpi_bus_get_status' quirk handling can be used.
+		 */
+		*sta = 0;
 		break;
 	case ACPI_TYPE_PROCESSOR:
 		*type = ACPI_BUS_TYPE_PROCESSOR;
@@ -1621,6 +1633,8 @@
 	acpi_status status;
 	int i;
 
+	adev->dep_unmet = 0;
+
 	if (!acpi_has_method(adev->handle, "_DEP"))
 		return;
 
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 02ded25..cdc4737 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -214,6 +214,15 @@
 		},
 	},
 	{
+	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
+	 .callback = video_detect_force_video,
+	 .ident = "SAMSUNG 670Z5E",
+	 .matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
+		},
+	},
+	{
 	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
 	 .callback = video_detect_force_video,
 	 .ident = "SAMSUNG 730U3E/740U3E",
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 7394aac..93888cc 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -69,11 +69,12 @@
 				    struct device_attribute *attr, char *buf)
 {
 	struct amba_device *dev = to_amba_device(_dev);
+	ssize_t len;
 
-	if (!dev->driver_override)
-		return 0;
-
-	return sprintf(buf, "%s\n", dev->driver_override);
+	device_lock(_dev);
+	len = sprintf(buf, "%s\n", dev->driver_override);
+	device_unlock(_dev);
+	return len;
 }
 
 static ssize_t driver_override_store(struct device *_dev,
@@ -81,7 +82,7 @@
 				     const char *buf, size_t count)
 {
 	struct amba_device *dev = to_amba_device(_dev);
-	char *driver_override, *old = dev->driver_override, *cp;
+	char *driver_override, *old, *cp;
 
 	/* We need to keep extra room for a newline */
 	if (count >= (PAGE_SIZE - 1))
@@ -95,12 +96,15 @@
 	if (cp)
 		*cp = '\0';
 
+	device_lock(_dev);
+	old = dev->driver_override;
 	if (strlen(driver_override)) {
 		dev->driver_override = driver_override;
 	} else {
 	       kfree(driver_override);
 	       dev->driver_override = NULL;
 	}
+	device_unlock(_dev);
 
 	kfree(old);
 
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index e7e4560..957eb3c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3001,6 +3001,14 @@
 			else
 				return_error = BR_DEAD_REPLY;
 			mutex_unlock(&context->context_mgr_node_lock);
+			if (target_node && target_proc == proc) {
+				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
+						  proc->pid, thread->pid);
+				return_error = BR_FAILED_REPLY;
+				return_error_param = -EINVAL;
+				return_error_line = __LINE__;
+				goto err_invalid_target_handle;
+			}
 		}
 		if (!target_node) {
 			/*
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index aaa761b..cd2eab6 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -514,8 +514,9 @@
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq <= 0) {
-		dev_err(dev, "no irq\n");
-		return -EINVAL;
+		if (irq != -EPROBE_DEFER)
+			dev_err(dev, "no irq\n");
+		return irq;
 	}
 
 	hpriv->irq = irq;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e08c09f..0e2c0ac 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4366,6 +4366,10 @@
 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
 
+	/* Some Sandisk SSDs lock up hard with NCQ enabled.  Reported on
+	   SD7SN6S256G and SD8SN8U256G */
+	{ "SanDisk SD[78]SN*G",	NULL,		ATA_HORKAGE_NONCQ, },
+
 	/* devices which puke on READ_NATIVE_MAX */
 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
@@ -4422,7 +4426,12 @@
 						ATA_HORKAGE_ZERO_AFTER_TRIM |
 						ATA_HORKAGE_NOLPM, },
 
+	/* Sandisk devices which are known to not handle LPM well */
+	{ "SanDisk SD7UB3Q*G1001",	NULL,	ATA_HORKAGE_NOLPM, },
+
 	/* devices that don't properly handle queued TRIM commands */
+	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9babbc8..fb2c00f 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4156,7 +4156,7 @@
 #ifdef ATA_DEBUG
 	struct scsi_device *scsidev = cmd->device;
 
-	DPRINTK("CDB (%u:%d,%d,%d) %9ph\n",
+	DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
 		ap->print_id,
 		scsidev->channel, scsidev->id, scsidev->lun,
 		cmd->cmnd);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index d3dc954..81bfeec 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -23,6 +23,7 @@
 #include <linux/bitops.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 #include <asm/byteorder.h>
 #include <asm/string.h>
 #include <asm/io.h>
@@ -1458,6 +1459,8 @@
 					return -EFAULT;
 				if (pool < 0 || pool > ZATM_LAST_POOL)
 					return -EINVAL;
+				pool = array_index_nospec(pool,
+							  ZATM_LAST_POOL + 1);
 				spin_lock_irqsave(&zatm_dev->lock, flags);
 				info = zatm_dev->pool_info[pool];
 				if (cmd == ZATM_GETPOOLZ) {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ac43d6f..4272868 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -876,8 +876,8 @@
 	struct device_node *of_node = dev_of_node(dev);
 	int error;
 
-	if (of_node) {
-		error = sysfs_create_link(&dev->kobj, &of_node->kobj,"of_node");
+	if (of_node && of_node_kobj(of_node)) {
+		error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
 		if (error)
 			dev_warn(dev, "Error %d creating of_node link\n",error);
 		/* An error here doesn't warrant bringing down the device */
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4fe86f7..f9e1010 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -610,14 +610,22 @@
 	return sprintf(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
+					  struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
 	&dev_attr_meltdown.attr,
 	&dev_attr_spectre_v1.attr,
 	&dev_attr_spectre_v2.attr,
+	&dev_attr_spec_store_bypass.attr,
 	NULL
 };
 
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 3331ed8..18228ba 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -98,7 +98,7 @@
 	int ret;
 	unsigned int val;
 
-	if (map->cache == REGCACHE_NONE)
+	if (map->cache_type == REGCACHE_NONE)
 		return false;
 
 	if (!map->cache_ops)
@@ -1736,7 +1736,7 @@
 		return -EINVAL;
 	if (val_len % map->format.val_bytes)
 		return -EINVAL;
-	if (map->max_raw_write && map->max_raw_write > val_len)
+	if (map->max_raw_write && map->max_raw_write < val_len)
 		return -E2BIG;
 
 	map->lock(map->lock_arg);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 68bfcef..ff1c4d7 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -612,6 +612,9 @@
  */
 static int loop_flush(struct loop_device *lo)
 {
+	/* loop not yet configured, no running thread, nothing to flush */
+	if (lo->lo_state != Lo_bound)
+		return 0;
 	return loop_switch(lo, NULL);
 }
 
@@ -1107,11 +1110,15 @@
 	if (info->lo_encrypt_type) {
 		unsigned int type = info->lo_encrypt_type;
 
-		if (type >= MAX_LO_CRYPT)
-			return -EINVAL;
+		if (type >= MAX_LO_CRYPT) {
+			err = -EINVAL;
+			goto exit;
+		}
 		xfer = xfer_funcs[type];
-		if (xfer == NULL)
-			return -EINVAL;
+		if (xfer == NULL) {
+			err = -EINVAL;
+			goto exit;
+		}
 	} else
 		xfer = NULL;
 
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index b86273f..3cfd879 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -169,25 +169,6 @@
 	return false; /* device present */
 }
 
-/* we have to use runtime tag to setup command header */
-static void mtip_init_cmd_header(struct request *rq)
-{
-	struct driver_data *dd = rq->q->queuedata;
-	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
-	u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
-
-	/* Point the command headers at the command tables. */
-	cmd->command_header = dd->port->command_list +
-				(sizeof(struct mtip_cmd_hdr) * rq->tag);
-	cmd->command_header_dma = dd->port->command_list_dma +
-				(sizeof(struct mtip_cmd_hdr) * rq->tag);
-
-	if (host_cap_64)
-		cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16);
-
-	cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
-}
-
 static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
 {
 	struct request *rq;
@@ -199,9 +180,6 @@
 	if (IS_ERR(rq))
 		return NULL;
 
-	/* Internal cmd isn't submitted via .queue_rq */
-	mtip_init_cmd_header(rq);
-
 	return blk_mq_rq_to_pdu(rq);
 }
 
@@ -3833,8 +3811,6 @@
 	struct request *rq = bd->rq;
 	int ret;
 
-	mtip_init_cmd_header(rq);
-
 	if (unlikely(mtip_check_unal_depth(hctx, rq)))
 		return BLK_MQ_RQ_QUEUE_BUSY;
 
@@ -3866,6 +3842,7 @@
 {
 	struct driver_data *dd = data;
 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+	u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
 
 	/*
 	 * For flush requests, request_idx starts at the end of the
@@ -3882,6 +3859,17 @@
 
 	memset(cmd->command, 0, CMD_DMA_ALLOC_SZ);
 
+	/* Point the command headers at the command tables. */
+	cmd->command_header = dd->port->command_list +
+				(sizeof(struct mtip_cmd_hdr) * request_idx);
+	cmd->command_header_dma = dd->port->command_list_dma +
+				(sizeof(struct mtip_cmd_hdr) * request_idx);
+
+	if (host_cap_64)
+		cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16);
+
+	cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
+
 	sg_init_table(cmd->sg, MTIP_MAX_SG);
 	return 0;
 }
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 9336236..8474a1b 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -230,6 +230,8 @@
 	struct pcd_unit *cd = bdev->bd_disk->private_data;
 	int ret;
 
+	check_disk_change(bdev);
+
 	mutex_lock(&pcd_mutex);
 	ret = cdrom_open(&cd->info, bdev, mode);
 	mutex_unlock(&pcd_mutex);
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index f927756..7f7c942 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2010, 2013-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2010, 2013-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -43,6 +43,7 @@
 static const struct of_device_id bt_power_match_table[] = {
 	{	.compatible = "qca,ar3002" },
 	{	.compatible = "qca,qca6174" },
+	{	.compatible = "qca,qca9379" },
 	{	.compatible = "qca,wcn3990" },
 	{}
 };
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f8ba5c7..bff67c5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -217,6 +217,7 @@
 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -249,7 +250,6 @@
 	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
 
 	/* QCA ROME chipset */
-	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
@@ -345,6 +345,9 @@
 	{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
 
+	/* Additional Realtek 8723BU Bluetooth devices */
+	{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
+
 	/* Additional Realtek 8821AE Bluetooth devices */
 	{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
@@ -352,6 +355,9 @@
 	{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
 
+	/* Additional Realtek 8822BE Bluetooth devices */
+	{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
+
 	/* Silicon Wave based devices */
 	{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
 
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 72fe0a5..017c37b 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -37,8 +37,6 @@
 #define  ARB_ERR_CAP_CLEAR		(1 << 0)
 #define  ARB_ERR_CAP_STATUS_TIMEOUT	(1 << 12)
 #define  ARB_ERR_CAP_STATUS_TEA		(1 << 11)
-#define  ARB_ERR_CAP_STATUS_BS_SHIFT	(1 << 2)
-#define  ARB_ERR_CAP_STATUS_BS_MASK	0x3c
 #define  ARB_ERR_CAP_STATUS_WRITE	(1 << 1)
 #define  ARB_ERR_CAP_STATUS_VALID	(1 << 0)
 
@@ -47,7 +45,6 @@
 	ARB_ERR_CAP_CLR,
 	ARB_ERR_CAP_HI_ADDR,
 	ARB_ERR_CAP_ADDR,
-	ARB_ERR_CAP_DATA,
 	ARB_ERR_CAP_STATUS,
 	ARB_ERR_CAP_MASTER,
 };
@@ -57,7 +54,6 @@
 	[ARB_ERR_CAP_CLR]	= 0x0c4,
 	[ARB_ERR_CAP_HI_ADDR]	= -1,
 	[ARB_ERR_CAP_ADDR]	= 0x0c8,
-	[ARB_ERR_CAP_DATA]	= 0x0cc,
 	[ARB_ERR_CAP_STATUS]	= 0x0d0,
 	[ARB_ERR_CAP_MASTER]	= -1,
 };
@@ -67,7 +63,6 @@
 	[ARB_ERR_CAP_CLR]	= 0x0c8,
 	[ARB_ERR_CAP_HI_ADDR]	= -1,
 	[ARB_ERR_CAP_ADDR]	= 0x0cc,
-	[ARB_ERR_CAP_DATA]	= 0x0d0,
 	[ARB_ERR_CAP_STATUS]	= 0x0d4,
 	[ARB_ERR_CAP_MASTER]	= 0x0d8,
 };
@@ -77,7 +72,6 @@
 	[ARB_ERR_CAP_CLR]	= 0x168,
 	[ARB_ERR_CAP_HI_ADDR]	= -1,
 	[ARB_ERR_CAP_ADDR]	= 0x16c,
-	[ARB_ERR_CAP_DATA]	= 0x170,
 	[ARB_ERR_CAP_STATUS]	= 0x174,
 	[ARB_ERR_CAP_MASTER]	= 0x178,
 };
@@ -87,7 +81,6 @@
 	[ARB_ERR_CAP_CLR]	= 0x7e4,
 	[ARB_ERR_CAP_HI_ADDR]	= 0x7e8,
 	[ARB_ERR_CAP_ADDR]	= 0x7ec,
-	[ARB_ERR_CAP_DATA]	= 0x7f0,
 	[ARB_ERR_CAP_STATUS]	= 0x7f4,
 	[ARB_ERR_CAP_MASTER]	= 0x7f8,
 };
@@ -109,9 +102,13 @@
 {
 	int offset = gdev->gisb_offsets[reg];
 
-	/* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
-	if (offset == -1)
-		return 1;
+	if (offset < 0) {
+		/* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
+		if (reg == ARB_ERR_CAP_MASTER)
+			return 1;
+		else
+			return 0;
+	}
 
 	if (gdev->big_endian)
 		return ioread32be(gdev->base + offset);
@@ -119,6 +116,16 @@
 		return ioread32(gdev->base + offset);
 }
 
+static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
+{
+	u64 value;
+
+	value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
+	value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
+
+	return value;
+}
+
 static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
 {
 	int offset = gdev->gisb_offsets[reg];
@@ -127,9 +134,9 @@
 		return;
 
 	if (gdev->big_endian)
-		iowrite32be(val, gdev->base + reg);
+		iowrite32be(val, gdev->base + offset);
 	else
-		iowrite32(val, gdev->base + reg);
+		iowrite32(val, gdev->base + offset);
 }
 
 static ssize_t gisb_arb_get_timeout(struct device *dev,
@@ -185,7 +192,7 @@
 					const char *reason)
 {
 	u32 cap_status;
-	unsigned long arb_addr;
+	u64 arb_addr;
 	u32 master;
 	const char *m_name;
 	char m_fmt[11];
@@ -197,10 +204,7 @@
 		return 1;
 
 	/* Read the address and master */
-	arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff;
-#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
-	arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
-#endif
+	arb_addr = gisb_read_address(gdev);
 	master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
 
 	m_name = brcmstb_gisb_master_to_str(gdev, master);
@@ -209,7 +213,7 @@
 		m_name = m_fmt;
 	}
 
-	pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n",
+	pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n",
 		__func__, reason, arb_addr,
 		cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
 		cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 5d475b3..07b77fb 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1154,9 +1154,6 @@
 
 	cd_dbg(CD_OPEN, "entering cdrom_open\n");
 
-	/* open is event synchronization point, check events first */
-	check_disk_change(bdev);
-
 	/* if this was a O_NONBLOCK open and we should honor the flags,
 	 * do a quick open without drive/disc integrity checks. */
 	cdi->use_count++;
@@ -2368,7 +2365,7 @@
 	if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
 		return media_changed(cdi, 1);
 
-	if ((unsigned int)arg >= cdi->capacity)
+	if (arg >= cdi->capacity)
 		return -EINVAL;
 
 	info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 584bc31..e2808fe 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -497,6 +497,9 @@
 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
 {
 	int ret;
+
+	check_disk_change(bdev);
+
 	mutex_lock(&gdrom_mutex);
 	ret = cdrom_open(gd.cd_info, bdev, mode);
 	mutex_unlock(&gdrom_mutex);
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 4d734bf..d84dea6 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1179,8 +1179,6 @@
 	hlist_del_init(&ctx->hn);
 	hlist_add_head(&ctx->hn, &clst->interrupted);
 	spin_unlock(&ctx->fl->hlock);
-	/* free the cache on power collapse */
-	fastrpc_buf_list_free(ctx->fl);
 }
 
 static void context_free(struct smq_invoke_ctx *ctx)
@@ -1567,9 +1565,18 @@
 		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
 			continue;
 
-		if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart)
-			dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
-			uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
+		if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
+			if (map && map->handle)
+				msm_ion_do_cache_op(ctx->fl->apps->client,
+					map->handle,
+					uint64_to_ptr(rpra[i].buf.pv),
+					rpra[i].buf.len,
+					ION_IOC_CLEAN_INV_CACHES);
+			else
+				dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
+					uint64_to_ptr(rpra[i].buf.pv
+						+ rpra[i].buf.len));
+		}
 	}
 	PERF_END);
 	for (i = bufs; rpra && i < bufs + handles; i++) {
@@ -1578,11 +1585,6 @@
 		rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
 	}
 
-	if (!ctx->fl->sctx->smmu.coherent) {
-		PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
-		dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
-		PERF_END);
-	}
  bail:
 	return err;
 }
@@ -1671,14 +1673,33 @@
 		if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
 				buf_page_start(rpra[i].buf.pv))
 			continue;
-		if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
-			dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
-				(char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
+		if (!IS_CACHE_ALIGNED((uintptr_t)
+				uint64_to_ptr(rpra[i].buf.pv))) {
+			if (map && map->handle)
+				msm_ion_do_cache_op(ctx->fl->apps->client,
+					map->handle,
+					uint64_to_ptr(rpra[i].buf.pv),
+					sizeof(uintptr_t),
+					ION_IOC_CLEAN_INV_CACHES);
+			else
+				dmac_flush_range(
+					uint64_to_ptr(rpra[i].buf.pv), (char *)
+					uint64_to_ptr(rpra[i].buf.pv + 1));
+		}
+
 		end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
 							rpra[i].buf.len);
-		if (!IS_CACHE_ALIGNED(end))
-			dmac_flush_range((char *)end,
-				(char *)end + 1);
+		if (!IS_CACHE_ALIGNED(end)) {
+			if (map && map->handle)
+				msm_ion_do_cache_op(ctx->fl->apps->client,
+						map->handle,
+						uint64_to_ptr(end),
+						sizeof(uintptr_t),
+						ION_IOC_CLEAN_INV_CACHES);
+			else
+				dmac_flush_range((char *)end,
+					(char *)end + 1);
+		}
 	}
 }
 
@@ -1687,7 +1708,6 @@
 	int i, inbufs, outbufs;
 	uint32_t sc = ctx->sc;
 	remote_arg64_t *rpra = ctx->rpra;
-	int used = ctx->used;
 
 	inbufs = REMOTE_SCALARS_INBUFS(sc);
 	outbufs = REMOTE_SCALARS_OUTBUFS(sc);
@@ -1718,8 +1738,6 @@
 						 + rpra[i].buf.len));
 	}
 
-	if (rpra)
-		dmac_inv_range(rpra, (char *)rpra + used);
 }
 
 static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
@@ -2764,6 +2782,7 @@
 	mutex_unlock(&fl->perf_mutex);
 	mutex_destroy(&fl->perf_mutex);
 	mutex_destroy(&fl->fl_map_mutex);
+	mutex_destroy(&fl->map_mutex);
 	kfree(fl);
 	return 0;
 }
@@ -2777,7 +2796,6 @@
 			pm_qos_remove_request(&fl->pm_qos_req);
 		if (fl->debugfs_file != NULL)
 			debugfs_remove(fl->debugfs_file);
-		mutex_destroy(&fl->map_mutex);
 		fastrpc_file_free(fl);
 		file->private_data = NULL;
 	}
@@ -3259,6 +3277,28 @@
 		if (err)
 			goto bail;
 		break;
+	case FASTRPC_IOCTL_MMAP_64:
+		K_COPY_FROM_USER(err, 0, &p.mmap, param,
+						sizeof(p.mmap));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
+		if (err)
+			goto bail;
+		K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
+		if (err)
+			goto bail;
+		break;
+	case FASTRPC_IOCTL_MUNMAP_64:
+		K_COPY_FROM_USER(err, 0, &p.munmap, param,
+						sizeof(p.munmap));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
+							&p.munmap)));
+		if (err)
+			goto bail;
+		break;
 	case FASTRPC_IOCTL_MUNMAP_FD:
 		K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
 			sizeof(p.munmap_fd));
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index 0f07483..804ceda 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,10 @@
 		_IOWR('R', 11, struct compat_fastrpc_ioctl_invoke_crc)
 #define COMPAT_FASTRPC_IOCTL_CONTROL \
 		_IOWR('R', 12, struct compat_fastrpc_ioctl_control)
+#define COMPAT_FASTRPC_IOCTL_MMAP_64 \
+		_IOWR('R', 14, struct compat_fastrpc_ioctl_mmap_64)
+#define COMPAT_FASTRPC_IOCTL_MUNMAP_64 \
+		_IOWR('R', 15, struct compat_fastrpc_ioctl_munmap_64)
 
 struct compat_remote_buf {
 	compat_uptr_t pv;	/* buffer pointer */
@@ -82,11 +86,24 @@
 	compat_uptr_t vaddrout;	/* dsps virtual address */
 };
 
+struct compat_fastrpc_ioctl_mmap_64 {
+	compat_int_t fd;	/* ion fd */
+	compat_uint_t flags;	/* flags for dsp to map with */
+	compat_u64 vaddrin;	/* optional virtual address */
+	compat_size_t size;	/* size */
+	compat_u64 vaddrout;	/* dsps virtual address */
+};
+
 struct compat_fastrpc_ioctl_munmap {
 	compat_uptr_t vaddrout;	/* address to unmap */
 	compat_size_t size;	/* size */
 };
 
+struct compat_fastrpc_ioctl_munmap_64 {
+	compat_u64 vaddrout;	/* address to unmap */
+	compat_size_t size;	/* size */
+};
+
 struct compat_fastrpc_ioctl_init {
 	compat_uint_t flags;	/* one of FASTRPC_INIT_* macros */
 	compat_uptr_t file;	/* pointer to elf file */
@@ -206,6 +223,28 @@
 	return err;
 }
 
+static int compat_get_fastrpc_ioctl_mmap_64(
+			struct compat_fastrpc_ioctl_mmap_64 __user *map32,
+			struct fastrpc_ioctl_mmap __user *map)
+{
+	compat_uint_t u;
+	compat_int_t i;
+	compat_size_t s;
+	compat_u64 p;
+	int err;
+
+	err = get_user(i, &map32->fd);
+	err |= put_user(i, &map->fd);
+	err |= get_user(u, &map32->flags);
+	err |= put_user(u, &map->flags);
+	err |= get_user(p, &map32->vaddrin);
+	err |= put_user(p, &map->vaddrin);
+	err |= get_user(s, &map32->size);
+	err |= put_user(s, &map->size);
+
+	return err;
+}
+
 static int compat_put_fastrpc_ioctl_mmap(
 			struct compat_fastrpc_ioctl_mmap __user *map32,
 			struct fastrpc_ioctl_mmap __user *map)
@@ -219,6 +258,19 @@
 	return err;
 }
 
+static int compat_put_fastrpc_ioctl_mmap_64(
+			struct compat_fastrpc_ioctl_mmap_64 __user *map32,
+			struct fastrpc_ioctl_mmap __user *map)
+{
+	compat_u64 p;
+	int err;
+
+	err = get_user(p, &map->vaddrout);
+	err |= put_user(p, &map32->vaddrout);
+
+	return err;
+}
+
 static int compat_get_fastrpc_ioctl_munmap(
 			struct compat_fastrpc_ioctl_munmap __user *unmap32,
 			struct fastrpc_ioctl_munmap __user *unmap)
@@ -235,6 +287,22 @@
 	return err;
 }
 
+static int compat_get_fastrpc_ioctl_munmap_64(
+			struct compat_fastrpc_ioctl_munmap_64 __user *unmap32,
+			struct fastrpc_ioctl_munmap __user *unmap)
+{
+	compat_u64 p;
+	compat_size_t s;
+	int err;
+
+	err = get_user(p, &unmap32->vaddrout);
+	err |= put_user(p, &unmap->vaddrout);
+	err |= get_user(s, &unmap32->size);
+	err |= put_user(s, &unmap->size);
+
+	return err;
+}
+
 static int compat_get_fastrpc_ioctl_perf(
 			struct compat_fastrpc_ioctl_perf __user *perf32,
 			struct fastrpc_ioctl_perf __user *perf)
@@ -355,6 +423,27 @@
 		VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap(map32, map));
 		return err;
 	}
+	case COMPAT_FASTRPC_IOCTL_MMAP_64:
+	{
+		struct compat_fastrpc_ioctl_mmap_64  __user *map32;
+		struct fastrpc_ioctl_mmap __user *map;
+		long ret;
+
+		map32 = compat_ptr(arg);
+		VERIFY(err, NULL != (map = compat_alloc_user_space(
+							sizeof(*map))));
+		if (err)
+			return -EFAULT;
+		VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap_64(map32, map));
+		if (err)
+			return err;
+		ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MMAP_64,
+							(unsigned long)map);
+		if (ret)
+			return ret;
+		VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap_64(map32, map));
+		return err;
+	}
 	case COMPAT_FASTRPC_IOCTL_MUNMAP:
 	{
 		struct compat_fastrpc_ioctl_munmap __user *unmap32;
@@ -372,6 +461,23 @@
 		return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP,
 							(unsigned long)unmap);
 	}
+	case COMPAT_FASTRPC_IOCTL_MUNMAP_64:
+	{
+		struct compat_fastrpc_ioctl_munmap_64 __user *unmap32;
+		struct fastrpc_ioctl_munmap __user *unmap;
+
+		unmap32 = compat_ptr(arg);
+		VERIFY(err, NULL != (unmap = compat_alloc_user_space(
+							sizeof(*unmap))));
+		if (err)
+			return -EFAULT;
+		VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap_64(unmap32,
+							unmap));
+		if (err)
+			return err;
+		return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP_64,
+							(unsigned long)unmap);
+	}
 	case COMPAT_FASTRPC_IOCTL_INIT:
 		/* fall through */
 	case COMPAT_FASTRPC_IOCTL_INIT_ATTRS:
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index de0dd01..952b87c 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -19,6 +19,8 @@
 #define FASTRPC_IOCTL_INVOKE	_IOWR('R', 1, struct fastrpc_ioctl_invoke)
 #define FASTRPC_IOCTL_MMAP	_IOWR('R', 2, struct fastrpc_ioctl_mmap)
 #define FASTRPC_IOCTL_MUNMAP	_IOWR('R', 3, struct fastrpc_ioctl_munmap)
+#define FASTRPC_IOCTL_MMAP_64	_IOWR('R', 14, struct fastrpc_ioctl_mmap_64)
+#define FASTRPC_IOCTL_MUNMAP_64	_IOWR('R', 15, struct fastrpc_ioctl_munmap_64)
 #define FASTRPC_IOCTL_INVOKE_FD	_IOWR('R', 4, struct fastrpc_ioctl_invoke_fd)
 #define FASTRPC_IOCTL_SETMODE	_IOWR('R', 5, uint32_t)
 #define FASTRPC_IOCTL_INIT	_IOWR('R', 6, struct fastrpc_ioctl_init)
@@ -204,6 +206,11 @@
 	size_t size;		/* size */
 };
 
+struct fastrpc_ioctl_munmap_64 {
+	uint64_t vaddrout;	/* address to unmap */
+	size_t size;		/* size */
+};
+
 struct fastrpc_ioctl_mmap {
 	int fd;				/* ion fd */
 	uint32_t flags;			/* flags for dsp to map with */
@@ -212,6 +219,14 @@
 	uintptr_t vaddrout;		/* dsps virtual address */
 };
 
+struct fastrpc_ioctl_mmap_64 {
+	int fd;				/* ion fd */
+	uint32_t flags;			/* flags for dsp to map with */
+	uint64_t vaddrin;		/* optional virtual address */
+	size_t size;			/* size */
+	uint64_t vaddrout;		/* dsps virtual address */
+};
+
 struct fastrpc_ioctl_munmap_fd {
 	int     fd;				/* fd */
 	uint32_t  flags;		/* control flags */
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 286418f..a089e7c 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2713,7 +2713,7 @@
 	}
 }
 
-static int diag_dci_init_remote(void)
+int diag_dci_init_remote(void)
 {
 	int i;
 	struct dci_ops_tbl_t *temp = NULL;
@@ -2740,11 +2740,6 @@
 
 	return 0;
 }
-#else
-static int diag_dci_init_remote(void)
-{
-	return 0;
-}
 #endif
 
 static int diag_dci_init_ops_tbl(void)
@@ -2754,10 +2749,6 @@
 	err = diag_dci_init_local();
 	if (err)
 		goto err;
-	err = diag_dci_init_remote();
-	if (err)
-		goto err;
-
 	return 0;
 
 err:
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 61eb3f5..2fb0e3f 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -323,6 +323,7 @@
 int diag_dci_write_bridge(int token, unsigned char *buf, int len);
 int diag_dci_write_done_bridge(int index, unsigned char *buf, int len);
 int diag_dci_send_handshake_pkt(int index);
+int diag_dci_init_remote(void);
 #endif
 
 #endif
diff --git a/drivers/char/diag/diag_ipc_logging.h b/drivers/char/diag/diag_ipc_logging.h
index b9958a4..4b8dd1b 100644
--- a/drivers/char/diag/diag_ipc_logging.h
+++ b/drivers/char/diag/diag_ipc_logging.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
 #define DIAG_DEBUG_MASKS	0x0010
 #define DIAG_DEBUG_POWER	0x0020
 #define DIAG_DEBUG_BRIDGE	0x0040
+#define DIAG_DEBUG_CONTROL	0x0080
 
 #define DIAG_DEBUG
 
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index b7d56a9..24763fe 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -922,10 +922,12 @@
 	mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
 					   DIAG_CTRL_MASK_ALL_DISABLED;
 	for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
-		mutex_lock(&mask->lock);
-		memset(mask->ptr, req->rt_mask,
-		       mask->range * sizeof(uint32_t));
-		mutex_unlock(&mask->lock);
+		if (mask && mask->ptr) {
+			mutex_lock(&mask->lock);
+			memset(mask->ptr, req->rt_mask,
+			       mask->range * sizeof(uint32_t));
+			mutex_unlock(&mask->lock);
+		}
 	}
 	mutex_unlock(&driver->msg_mask_lock);
 	mutex_unlock(&mask_info->lock);
@@ -1337,6 +1339,8 @@
 
 	mutex_lock(&mask_info->lock);
 	for (i = 0; i < MAX_EQUIP_ID && !status; i++, mask++) {
+		if (!mask || !mask->ptr)
+			continue;
 		if (mask->equip_id != req->equip_id)
 			continue;
 		mutex_lock(&mask->lock);
@@ -1464,9 +1468,11 @@
 		return -EINVAL;
 	}
 	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
-		mutex_lock(&mask->lock);
-		memset(mask->ptr, 0, mask->range);
-		mutex_unlock(&mask->lock);
+		if (mask && mask->ptr) {
+			mutex_lock(&mask->lock);
+			memset(mask->ptr, 0, mask->range);
+			mutex_unlock(&mask->lock);
+		}
 	}
 	mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
 	mutex_unlock(&driver->md_session_lock);
@@ -2189,9 +2195,12 @@
 		if (driver->time_sync_enabled)
 			diag_send_time_sync_update(peripheral);
 		mutex_lock(&driver->md_session_lock);
-		diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
-		diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
-		diag_send_event_mask_update(peripheral);
+		if (driver->set_mask_cmd) {
+			diag_send_msg_mask_update(peripheral,
+				ALL_SSID, ALL_SSID);
+			diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
+			diag_send_event_mask_update(peripheral);
+		}
 		mutex_unlock(&driver->md_session_lock);
 		diag_send_real_time_update(peripheral,
 				driver->real_time_mode[DIAG_LOCAL_PROC]);
@@ -2228,6 +2237,7 @@
 			break;
 		case DIAG_CMD_OP_SET_LOG_MASK:
 			hdlr = diag_cmd_set_log_mask;
+			driver->set_mask_cmd = 1;
 			break;
 		case DIAG_CMD_OP_GET_LOG_MASK:
 			hdlr = diag_cmd_get_log_mask;
@@ -2247,17 +2257,21 @@
 			break;
 		case DIAG_CMD_OP_SET_MSG_MASK:
 			hdlr = diag_cmd_set_msg_mask;
+			driver->set_mask_cmd = 1;
 			break;
 		case DIAG_CMD_OP_SET_ALL_MSG_MASK:
 			hdlr = diag_cmd_set_all_msg_mask;
+			driver->set_mask_cmd = 1;
 			break;
 		}
 	} else if (*buf == DIAG_CMD_GET_EVENT_MASK) {
 		hdlr = diag_cmd_get_event_mask;
 	} else if (*buf == DIAG_CMD_SET_EVENT_MASK) {
 		hdlr = diag_cmd_update_event_mask;
+		driver->set_mask_cmd = 1;
 	} else if (*buf == DIAG_CMD_EVENT_TOGGLE) {
 		hdlr = diag_cmd_toggle_events;
+		driver->set_mask_cmd = 1;
 	}
 
 	if (hdlr)
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index ce0c7bb..c00fbfc 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -37,6 +37,7 @@
 		.ctx = 0,
 		.mempool = POOL_TYPE_MUX_APPS,
 		.num_tbl_entries = 0,
+		.md_info_inited = 0,
 		.tbl = NULL,
 		.ops = NULL,
 	},
@@ -46,6 +47,7 @@
 		.ctx = 0,
 		.mempool = POOL_TYPE_MDM_MUX,
 		.num_tbl_entries = 0,
+		.md_info_inited = 0,
 		.tbl = NULL,
 		.ops = NULL,
 	},
@@ -54,6 +56,7 @@
 		.ctx = 0,
 		.mempool = POOL_TYPE_MDM2_MUX,
 		.num_tbl_entries = 0,
+		.md_info_inited = 0,
 		.tbl = NULL,
 		.ops = NULL,
 	},
@@ -62,6 +65,7 @@
 		.ctx = 0,
 		.mempool = POOL_TYPE_QSC_MUX,
 		.num_tbl_entries = 0,
+		.md_info_inited = 0,
 		.tbl = NULL,
 		.ops = NULL,
 	}
@@ -85,6 +89,8 @@
 
 	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
 		ch = &diag_md[i];
+		if (!ch->md_info_inited)
+			continue;
 		if (ch->ops && ch->ops->open)
 			ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
 	}
@@ -99,6 +105,8 @@
 
 	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
 		ch = &diag_md[i];
+		if (!ch->md_info_inited)
+			continue;
 
 		if (ch->ops && ch->ops->close)
 			ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
@@ -155,7 +163,7 @@
 	mutex_unlock(&driver->md_session_lock);
 
 	ch = &diag_md[id];
-	if (!ch)
+	if (!ch || !ch->md_info_inited)
 		return -EINVAL;
 
 	spin_lock_irqsave(&ch->lock, flags);
@@ -232,6 +240,8 @@
 
 	for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
 		ch = &diag_md[i];
+		if (!ch->md_info_inited)
+			continue;
 		for (j = 0; j < ch->num_tbl_entries && !err; j++) {
 			entry = &ch->tbl[j];
 			if (entry->len <= 0 || entry->buf == NULL)
@@ -352,6 +362,8 @@
 		return -EINVAL;
 
 	ch = &diag_md[id];
+	if (!ch || !ch->md_info_inited)
+		return -EINVAL;
 
 	spin_lock_irqsave(&ch->lock, flags);
 	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
@@ -384,7 +396,7 @@
 	int i, j;
 	struct diag_md_info *ch = NULL;
 
-	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+	for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
 		ch = &diag_md[i];
 		ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
 		ch->tbl = kzalloc(ch->num_tbl_entries *
@@ -399,6 +411,7 @@
 			ch->tbl[j].ctx = 0;
 		}
 		spin_lock_init(&(ch->lock));
+		ch->md_info_inited = 1;
 	}
 
 	return 0;
@@ -408,12 +421,54 @@
 	return -ENOMEM;
 }
 
+int diag_md_mdm_init(void)
+{
+	int i, j;
+	struct diag_md_info *ch = NULL;
+
+	for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+		ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
+		ch->tbl = kcalloc(ch->num_tbl_entries, sizeof(*ch->tbl),
+				GFP_KERNEL);
+		if (!ch->tbl)
+			goto fail;
+
+		for (j = 0; j < ch->num_tbl_entries; j++) {
+			ch->tbl[j].buf = NULL;
+			ch->tbl[j].len = 0;
+			ch->tbl[j].ctx = 0;
+		}
+		spin_lock_init(&(ch->lock));
+		ch->md_info_inited = 1;
+	}
+
+	return 0;
+
+fail:
+	diag_md_mdm_exit();
+	return -ENOMEM;
+}
+
 void diag_md_exit(void)
 {
 	int i;
 	struct diag_md_info *ch = NULL;
 
-	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+	for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
+		ch = &diag_md[i];
+		kfree(ch->tbl);
+		ch->num_tbl_entries = 0;
+		ch->ops = NULL;
+	}
+}
+
+void diag_md_mdm_exit(void)
+{
+	int i;
+	struct diag_md_info *ch = NULL;
+
+	for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
 		ch = &diag_md[i];
 		kfree(ch->tbl);
 		ch->num_tbl_entries = 0;
diff --git a/drivers/char/diag/diag_memorydevice.h b/drivers/char/diag/diag_memorydevice.h
index 35a1ee3..4d65ded 100644
--- a/drivers/char/diag/diag_memorydevice.h
+++ b/drivers/char/diag/diag_memorydevice.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,7 @@
 	int ctx;
 	int mempool;
 	int num_tbl_entries;
+	int md_info_inited;
 	spinlock_t lock;
 	struct diag_buf_tbl_t *tbl;
 	struct diag_mux_ops *ops;
@@ -46,7 +47,9 @@
 extern struct diag_md_info diag_md[NUM_DIAG_MD_DEV];
 
 int diag_md_init(void);
+int diag_md_mdm_init(void);
 void diag_md_exit(void);
+void diag_md_mdm_exit(void);
 void diag_md_open_all(void);
 void diag_md_close_all(void);
 int diag_md_register(int id, int ctx, struct diag_mux_ops *ops);
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index fc18776..9bbfb82 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -673,6 +673,7 @@
 	struct diag_mask_info *log_mask;
 	struct diag_mask_info *event_mask;
 	struct diag_mask_info *build_time_mask;
+	uint8_t set_mask_cmd;
 	uint8_t msg_mask_tbl_count;
 	uint8_t bt_msg_mask_tbl_count;
 	uint16_t event_mask_size;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 0d3389a..a169230 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -969,6 +969,9 @@
 			poolsize_mdm_dci_write);
 	diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
 			poolsize_qsc_usb);
+	diag_md_mdm_init();
+	if (diag_dci_init_remote())
+		return -ENOMEM;
 	driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
 	if (!driver->hdlc_encode_buf)
 		return -ENOMEM;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 6247503..f1bc1c5 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -48,6 +48,7 @@
 #define STM_RSP_SUPPORTED_INDEX		7
 #define STM_RSP_STATUS_INDEX		8
 #define STM_RSP_NUM_BYTES		9
+#define RETRY_MAX_COUNT		1000
 
 struct diag_md_hdlc_reset_work {
 	int pid;
@@ -277,28 +278,22 @@
 	 * if its supporting qshrink4 feature.
 	 */
 	if (info && info->peripheral_mask) {
-		if (info->peripheral_mask == DIAG_CON_ALL ||
-			(info->peripheral_mask & (1 << APPS_DATA)) ||
-			(info->peripheral_mask & (1 << PERIPHERAL_MODEM))) {
-			rsp_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
-		} else {
-			for (i = 0; i < NUM_MD_SESSIONS; i++) {
-				if (info->peripheral_mask & (1 << i))
-					break;
-			}
-		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			if (info->peripheral_mask & (1 << i))
+				break;
 		}
+		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD);
 	} else
 		rsp_ctxt = driver->rsp_buf_ctxt;
 	mutex_unlock(&driver->md_session_lock);
 
 	/*
 	 * Keep trying till we get the buffer back. It should probably
-	 * take one or two iterations. When this loops till UINT_MAX, it
+	 * take one or two iterations. When this loops till RETRY_MAX_COUNT, it
 	 * means we did not get a write complete for the previous
 	 * response.
 	 */
-	while (retry_count < UINT_MAX) {
+	while (retry_count < RETRY_MAX_COUNT) {
 		if (!driver->rsp_buf_busy)
 			break;
 		/*
@@ -376,27 +371,21 @@
 	 * if its supporting qshrink4 feature.
 	 */
 	if (info && info->peripheral_mask) {
-		if (info->peripheral_mask == DIAG_CON_ALL ||
-			(info->peripheral_mask & (1 << APPS_DATA)) ||
-			(info->peripheral_mask & (1 << PERIPHERAL_MODEM))) {
-			rsp_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
-		} else {
-			for (i = 0; i < NUM_MD_SESSIONS; i++) {
-				if (info->peripheral_mask & (1 << i))
-					break;
-			}
-		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			if (info->peripheral_mask & (1 << i))
+				break;
 		}
+		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD);
 	} else
 		rsp_ctxt = driver->rsp_buf_ctxt;
 	mutex_unlock(&driver->md_session_lock);
 	/*
 	 * Keep trying till we get the buffer back. It should probably
-	 * take one or two iterations. When this loops till UINT_MAX, it
+	 * take one or two iterations. When this loops till RETRY_MAX_COUNT, it
 	 * means we did not get a write complete for the previous
 	 * response.
 	 */
-	while (retry_count < UINT_MAX) {
+	while (retry_count < RETRY_MAX_COUNT) {
 		if (!driver->rsp_buf_busy)
 			break;
 		/*
@@ -1843,14 +1832,18 @@
 		}
 		break;
 	case TYPE_CMD:
-		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS &&
+			num != TYPE_CMD) {
 			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 			"Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
-				peripheral, type, num);
+			peripheral, type, num);
 			diagfwd_write_done(peripheral, type, num);
-		}
-		if (peripheral == APPS_DATA ||
-				ctxt == DIAG_MEMORY_DEVICE_MODE) {
+		} else if (peripheral == APPS_DATA ||
+			(peripheral >= 0 && peripheral < NUM_PERIPHERALS &&
+			num == TYPE_CMD)) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Marking APPS response buffer free after write done for p: %d, t: %d, buf_num: %d\n",
+			peripheral, type, num);
 			spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
 			driver->rsp_buf_busy = 0;
 			driver->encoded_rsp_len = 0;
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 8d47ee38..ac4394b 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -45,8 +45,11 @@
 
 void diag_cntl_channel_open(struct diagfwd_info *p_info)
 {
-	if (!p_info)
+	if (!p_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid fwd_info structure\n");
 		return;
+	}
 	driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
 	queue_work(driver->cntl_wq, &driver->mask_update_work);
 	diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
@@ -56,12 +59,18 @@
 {
 	uint8_t peripheral;
 
-	if (!p_info)
+	if (!p_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid fwd_info structure\n");
 		return;
+	}
 
 	peripheral = p_info->peripheral;
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	driver->feature[peripheral].sent_feature_mask = 0;
 	driver->feature[peripheral].rcvd_feature_mask = 0;
@@ -87,8 +96,11 @@
 	driver->stm_peripheral = 0;
 	mutex_unlock(&driver->cntl_lock);
 
-	if (peripheral_mask == 0)
+	if (peripheral_mask == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: Empty Peripheral mask\n");
 		return;
+	}
 
 	for (i = 0; i < NUM_PERIPHERALS; i++) {
 		if (!driver->feature[i].stm_support)
@@ -111,11 +123,18 @@
 	struct pid *pid_struct;
 	struct task_struct *result;
 
-	if (peripheral > NUM_PERIPHERALS)
+	if (peripheral > NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
-	if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
+	if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: Invalid logging_mode (%d)\n",
+			driver->logging_mode);
 		return;
+	}
 
 	mutex_lock(&driver->md_session_lock);
 	memset(&info, 0, sizeof(struct siginfo));
@@ -171,8 +190,12 @@
 	uint32_t pd;
 	int status = DIAG_STATUS_CLOSED;
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg))
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg)) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, pd_msg_len = %d\n",
+		!buf, peripheral, len, (int)sizeof(*pd_msg));
 		return;
+	}
 
 	pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
 	pd = pd_msg->pd_id;
@@ -182,8 +205,11 @@
 
 static void enable_stm_feature(uint8_t peripheral)
 {
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	mutex_lock(&driver->cntl_lock);
 	driver->feature[peripheral].stm_support = ENABLE_STM;
@@ -195,8 +221,11 @@
 
 static void enable_socket_feature(uint8_t peripheral)
 {
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	if (driver->supports_sockets)
 		driver->feature[peripheral].sockets_enabled = 1;
@@ -206,8 +235,11 @@
 
 static void process_hdlc_encoding_feature(uint8_t peripheral)
 {
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	if (driver->supports_apps_hdlc_encoding) {
 		driver->feature[peripheral].encode_hdlc =
@@ -220,8 +252,11 @@
 
 static void process_upd_header_untagging_feature(uint8_t peripheral)
 {
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	if (driver->supports_apps_header_untagging) {
 		driver->feature[peripheral].untag_header =
@@ -247,8 +282,16 @@
 	 * Perform Basic sanity. The len field is the size of the data payload.
 	 * This doesn't include the header size.
 	 */
-	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+		!buf, peripheral, len);
 		return;
+	}
+
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag:peripheral(%d) command deregistration packet processing started\n",
+		peripheral);
 
 	dereg = (struct diag_ctrl_cmd_dereg *)ptr;
 	ptr += header_len;
@@ -256,8 +299,8 @@
 	read_len += header_len - (2 * sizeof(uint32_t));
 
 	if (dereg->count_entries == 0) {
-		pr_debug("diag: In %s, received reg tbl with no entries\n",
-			 __func__);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: received reg tbl with no entries\n");
 		return;
 	}
 
@@ -276,6 +319,9 @@
 		pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
 		       __func__, read_len, len, dereg->count_entries);
 	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag:peripheral(%d) command deregistration packet processing complete\n",
+		peripheral);
 }
 static void process_command_registration(uint8_t *buf, uint32_t len,
 					 uint8_t peripheral)
@@ -292,8 +338,15 @@
 	 * Perform Basic sanity. The len field is the size of the data payload.
 	 * This doesn't include the header size.
 	 */
-	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+		!buf, peripheral, len);
 		return;
+	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: peripheral(%d) command registration packet processing started\n",
+		peripheral);
 
 	reg = (struct diag_ctrl_cmd_reg *)ptr;
 	ptr += header_len;
@@ -301,7 +354,8 @@
 	read_len += header_len - (2 * sizeof(uint32_t));
 
 	if (reg->count_entries == 0) {
-		pr_debug("diag: In %s, received reg tbl with no entries\n",
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: In %s, received reg tbl with no entries\n",
 			 __func__);
 		return;
 	}
@@ -321,6 +375,9 @@
 		pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
 		       __func__, read_len, len, reg->count_entries);
 	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: peripheral(%d) command registration packet processing complete\n",
+		peripheral);
 }
 
 static void diag_close_transport_work_fn(struct work_struct *work)
@@ -347,8 +404,11 @@
 
 static void process_socket_feature(uint8_t peripheral)
 {
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	mutex_lock(&driver->cntl_lock);
 	driver->close_transport |= PERIPHERAL_MASK(peripheral);
@@ -379,15 +439,20 @@
 	uint32_t feature_mask = 0;
 	uint8_t *ptr = buf;
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+		!buf, peripheral, len);
 		return;
+	}
 
 	header = (struct diag_ctrl_feature_mask *)ptr;
 	ptr += header_len;
 	feature_mask_len = header->feature_mask_len;
 
 	if (feature_mask_len == 0) {
-		pr_debug("diag: In %s, received invalid feature mask from peripheral %d\n",
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: In %s, received invalid feature mask from peripheral %d\n",
 			 __func__, peripheral);
 		return;
 	}
@@ -400,6 +465,8 @@
 	diag_cmd_remove_reg_by_proc(peripheral);
 
 	driver->feature[peripheral].rcvd_feature_mask = 1;
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+	"diag: Received feature mask for peripheral %d\n", peripheral);
 
 	for (i = 0; i < feature_mask_len && read_len < len; i++) {
 		feature_mask = *(uint8_t *)ptr;
@@ -431,6 +498,10 @@
 
 	process_socket_feature(peripheral);
 	process_log_on_demand_feature(peripheral);
+
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: Peripheral(%d) feature mask is processed\n",
+		peripheral);
 }
 
 static void process_last_event_report(uint8_t *buf, uint32_t len,
@@ -442,15 +513,23 @@
 	uint32_t pkt_len = sizeof(uint32_t) + sizeof(uint16_t);
 	uint16_t event_size = 0;
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, pkt_len = %d\n",
+		!buf, peripheral, len, pkt_len);
 		return;
+	}
+
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag:started processing last event report for peripheral (%d)\n",
+		peripheral);
 
 	mutex_lock(&event_mask.lock);
 	header = (struct diag_ctrl_last_event_report *)ptr;
 	event_size = ((header->event_last_id / 8) + 1);
 	if (event_size >= driver->event_mask_size) {
-		pr_debug("diag: In %s, receiving event mask size more that Apps can handle\n",
-			 __func__);
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+		"diag: receiving event mask size more that Apps can handle\n");
 		temp = krealloc(driver->event_mask->ptr, event_size,
 				GFP_KERNEL);
 		if (!temp) {
@@ -467,6 +546,9 @@
 		driver->last_event_id = header->event_last_id;
 err:
 	mutex_unlock(&event_mask.lock);
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: last event report processed for peripheral (%d)\n",
+		peripheral);
 }
 
 static void process_log_range_report(uint8_t *buf, uint32_t len,
@@ -480,8 +562,15 @@
 	struct diag_ctrl_log_range *log_range = NULL;
 	struct diag_log_mask_t *mask_ptr = NULL;
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len < 0)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+		!buf, peripheral, len);
 		return;
+	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag:started processing log range report for peripheral(%d)\n",
+		peripheral);
 
 	header = (struct diag_ctrl_log_range_report *)ptr;
 	ptr += header_len;
@@ -507,6 +596,9 @@
 		mask_ptr->range = LOG_ITEMS_TO_SIZE(log_range->num_items);
 		mutex_unlock(&(mask_ptr->lock));
 	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: log range report processed for peripheral (%d)\n",
+		peripheral);
 }
 
 static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
@@ -514,8 +606,12 @@
 {
 	uint32_t temp_range;
 
-	if (!mask || !range)
+	if (!mask || !range) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid %s\n",
+		(!mask ? "mask" : (!range ? "range" : " ")));
 		return -EIO;
+	}
 	if (range->ssid_last < range->ssid_first) {
 		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
 		       __func__, range->ssid_first, range->ssid_last);
@@ -547,8 +643,16 @@
 	uint8_t *temp = NULL;
 	uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, min_len = %d\n",
+		!buf, peripheral, len, min_len);
 		return;
+	}
+
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: started processing ssid range for peripheral (%d)\n",
+		peripheral);
 
 	header = (struct diag_ctrl_ssid_range_report *)ptr;
 	ptr += header_len;
@@ -564,6 +668,10 @@
 		mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
 		found = 0;
 		for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) {
+			if (!mask_ptr || !ssid_range) {
+				found = 1;
+				break;
+			}
 			if (mask_ptr->ssid_first != ssid_range->ssid_first)
 				continue;
 			mutex_lock(&mask_ptr->lock);
@@ -582,6 +690,8 @@
 
 		new_size = (driver->msg_mask_tbl_count + 1) *
 			   sizeof(struct diag_msg_mask_t);
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			"diag: receiving msg mask size more that Apps can handle\n");
 		temp = krealloc(msg_mask.ptr, new_size, GFP_KERNEL);
 		if (!temp) {
 			pr_err("diag: In %s, Unable to add new ssid table to msg mask, ssid first: %d, last: %d\n",
@@ -590,6 +700,7 @@
 			continue;
 		}
 		msg_mask.ptr = temp;
+		mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
 		err = diag_create_msg_mask_table_entry(mask_ptr, ssid_range);
 		if (err) {
 			pr_err("diag: In %s, Unable to create a new msg mask table entry, first: %d last: %d err: %d\n",
@@ -600,6 +711,9 @@
 		driver->msg_mask_tbl_count += 1;
 	}
 	mutex_unlock(&driver->msg_mask_lock);
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: processed ssid range for peripheral(%d)\n",
+		peripheral);
 }
 
 static void diag_build_time_mask_update(uint8_t *buf,
@@ -616,8 +730,12 @@
 	uint32_t *dest_ptr = NULL;
 	struct diag_msg_mask_t *build_mask = NULL;
 
-	if (!range || !buf)
+	if (!range || !buf) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid %s\n",
+		(!range ? "range" : (!buf ? "buf" : " ")));
 		return;
+	}
 
 	if (range->ssid_last < range->ssid_first) {
 		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
@@ -629,6 +747,10 @@
 	num_items = range->ssid_last - range->ssid_first + 1;
 
 	for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
+		if (!build_mask) {
+			found = 1;
+			break;
+		}
 		if (build_mask->ssid_first != range->ssid_first)
 			continue;
 		found = 1;
@@ -639,7 +761,8 @@
 			       __func__);
 		}
 		dest_ptr = build_mask->ptr;
-		for (j = 0; j < build_mask->range; j++, mask_ptr++, dest_ptr++)
+		for (j = 0; (j < build_mask->range) && mask_ptr && dest_ptr;
+			j++, mask_ptr++, dest_ptr++)
 			*(uint32_t *)dest_ptr |= *mask_ptr;
 		mutex_unlock(&build_mask->lock);
 		break;
@@ -647,8 +770,12 @@
 
 	if (found)
 		goto end;
+
 	new_size = (driver->bt_msg_mask_tbl_count + 1) *
 		   sizeof(struct diag_msg_mask_t);
+	DIAG_LOG(DIAG_DEBUG_MASKS,
+		"diag: receiving build time mask size more that Apps can handle\n");
+
 	temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
 	if (!temp) {
 		pr_err("diag: In %s, unable to create a new entry for build time mask\n",
@@ -656,6 +783,7 @@
 		goto end;
 	}
 	driver->build_time_mask->ptr = temp;
+	build_mask = (struct diag_msg_mask_t *)driver->build_time_mask->ptr;
 	err = diag_create_msg_mask_table_entry(build_mask, range);
 	if (err) {
 		pr_err("diag: In %s, Unable to create a new msg mask table entry, err: %d\n",
@@ -679,8 +807,16 @@
 	struct diag_ctrl_build_mask_report *header = NULL;
 	struct diag_ssid_range_t *range = NULL;
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, header_len = %d\n",
+		!buf, peripheral, len, header_len);
 		return;
+	}
+
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: started processing build mask for peripheral(%d)\n",
+		peripheral);
 
 	header = (struct diag_ctrl_build_mask_report *)ptr;
 	ptr += header_len;
@@ -696,6 +832,8 @@
 		ptr += num_items * sizeof(uint32_t);
 		read_len += num_items * sizeof(uint32_t);
 	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: processing build mask complete (%d)\n", peripheral);
 }
 
 int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name,
@@ -703,8 +841,12 @@
 {
 	struct diag_id_tbl_t *new_item = NULL;
 
-	if (!process_name || diag_id == 0)
+	if (!process_name || diag_id == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters: !process_name = %d, diag_id = %d\n",
+		!process_name, diag_id);
 		return -EINVAL;
+	}
 
 	new_item = kzalloc(sizeof(struct diag_id_tbl_t), GFP_KERNEL);
 	if (!new_item)
@@ -734,8 +876,10 @@
 	struct list_head *temp;
 	struct diag_id_tbl_t *item = NULL;
 
-	if (!process_name || !diag_id)
+	if (!process_name || !diag_id) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid parameters\n");
 		return -EINVAL;
+	}
 
 	mutex_lock(&driver->diag_id_mutex);
 	list_for_each_safe(start, temp, &driver->diag_id_list) {
@@ -762,8 +906,12 @@
 	uint8_t local_diag_id = 0;
 	uint8_t new_request = 0, i = 0, ch_type = 0;
 
-	if (!buf || len == 0 || peripheral >= NUM_PERIPHERALS)
+	if (!buf || len == 0 || peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters: !buf = %d, len = %d, peripheral = %d\n",
+		!buf, len, peripheral);
 		return;
+	}
 
 	header = (struct diag_ctrl_diagid *)buf;
 	process_name = (char *)&header->process_name;
@@ -841,7 +989,7 @@
 		fwd_info = &peripheral_info[TYPE_DATA][peripheral];
 		diagfwd_buffers_init(fwd_info);
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
-		"diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s :\n",
+		"diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s\n",
 			driver->diag_id_sent[peripheral], peripheral,
 			ctrl_pkt.diag_id, process_name);
 	}
@@ -855,8 +1003,10 @@
 	uint8_t *ptr = buf;
 	struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
 
-	if (!buf || len <= 0 || !p_info)
+	if (!buf || len <= 0 || !p_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid parameters\n");
 		return;
+	}
 
 	if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
 		pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
@@ -866,6 +1016,9 @@
 
 	while (read_len + header_len < len) {
 		ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
+		DIAG_LOG(DIAG_DEBUG_CONTROL,
+			"diag:peripheral: %d: pkt_id: %d\n",
+			p_info->peripheral, ctrl_pkt->pkt_id);
 		switch (ctrl_pkt->pkt_id) {
 		case DIAG_CTRL_MSG_REG:
 			process_command_registration(ptr, ctrl_pkt->len,
@@ -904,12 +1057,15 @@
 						   p_info->peripheral);
 			break;
 		default:
-			pr_debug("diag: Control packet %d not supported\n",
-				 ctrl_pkt->pkt_id);
+			DIAG_LOG(DIAG_DEBUG_CONTROL,
+			"diag: Control packet %d not supported\n",
+			 ctrl_pkt->pkt_id);
 		}
 		ptr += header_len + ctrl_pkt->len;
 		read_len += header_len + ctrl_pkt->len;
 	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+	"diag: control packet processing complete\n");
 }
 
 static int diag_compute_real_time(int idx)
@@ -1127,15 +1283,16 @@
 	for (i = 0; i < DIAG_NUM_PROC; i++) {
 		temp_real_time = diag_compute_real_time(i);
 		if (temp_real_time == driver->real_time_mode[i]) {
-			pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"diag: did not update real time mode on proc %d, already in the req mode %d\n",
 				i, temp_real_time);
 			continue;
 		}
 
 		if (i == DIAG_LOCAL_PROC) {
 			if (!send_update) {
-				pr_debug("diag: In %s, cannot send real time mode pkt since one of the periperhal is in buffering mode\n",
-					 __func__);
+				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"diag: cannot send real time mode pkt since one of the periperhal is in buffering mode\n");
 				break;
 			}
 			for (j = 0; j < NUM_PERIPHERALS; j++)
@@ -1169,7 +1326,8 @@
 			temp_real_time = MODE_NONREALTIME;
 		}
 		if (temp_real_time == driver->real_time_mode[i]) {
-			pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"diag: did not update real time mode on proc %d, already in the req mode %d\n",
 				i, temp_real_time);
 			continue;
 		}
@@ -1204,8 +1362,8 @@
 
 	if (!driver->diagfwd_cntl[peripheral] ||
 	    !driver->diagfwd_cntl[peripheral]->ch_open) {
-		pr_debug("diag: In %s, control channel is not open, p: %d\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: control channel is not open, p: %d\n", peripheral);
 		return err;
 	}
 
@@ -1317,8 +1475,9 @@
 	}
 
 	if (!driver->feature[peripheral].peripheral_buffering) {
-		pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: peripheral %d doesn't support buffering\n",
+			 peripheral);
 		driver->buffering_flag[params->peripheral] = 0;
 		return -EIO;
 	}
@@ -1383,8 +1542,9 @@
 
 	if (!driver->diagfwd_cntl[peripheral] ||
 	    !driver->diagfwd_cntl[peripheral]->ch_open) {
-		pr_debug("diag: In %s, control channel is not open, p: %d\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: control channel is not open, p: %d\n",
+			 peripheral);
 		return -ENODEV;
 	}
 
@@ -1413,15 +1573,17 @@
 	struct diag_ctrl_drain_immediate_v2 ctrl_pkt_v2;
 
 	if (!driver->feature[peripheral].peripheral_buffering) {
-		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: peripheral  %d doesn't support buffering\n",
+			 peripheral);
 		return -EINVAL;
 	}
 
 	if (!driver->diagfwd_cntl[peripheral] ||
 	    !driver->diagfwd_cntl[peripheral]->ch_open) {
-		pr_debug("diag: In %s, control channel is not open, p: %d\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: control channel is not open, p: %d\n",
+			 peripheral);
 		return -ENODEV;
 	}
 
@@ -1478,8 +1640,9 @@
 	}
 
 	if (!driver->feature[peripheral].peripheral_buffering) {
-		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: peripheral  %d doesn't support buffering\n",
+			 peripheral);
 		return -EINVAL;
 	}
 
@@ -1557,15 +1720,17 @@
 	}
 
 	if (!driver->feature[peripheral].peripheral_buffering) {
-		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: peripheral  %d doesn't support buffering\n",
+			 peripheral);
 		return -EINVAL;
 	}
 
 	if (!driver->diagfwd_cntl[peripheral] ||
 	    !driver->diagfwd_cntl[peripheral]->ch_open) {
-		pr_debug("diag: In %s, control channel is not open, p: %d\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: control channel is not open, p: %d\n",
+			 peripheral);
 		return -ENODEV;
 	}
 
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 7225dc2..2022e7b 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -723,6 +723,7 @@
 				   unsigned char *buf, int len)
 {
 	if (!fwd_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid fwd_info\n");
 		diag_ws_release();
 		return;
 	}
@@ -743,8 +744,12 @@
 	 */
 	diag_ws_on_copy_fail(DIAG_WS_MUX);
 	/* Reset the buffer in_busy value after processing the data */
-	if (fwd_info->buf_1)
+	if (fwd_info->buf_1) {
 		atomic_set(&fwd_info->buf_1->in_busy, 0);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+	}
 
 	diagfwd_queue_read(fwd_info);
 	diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]);
@@ -769,8 +774,12 @@
 
 	diag_dci_process_peripheral_data(fwd_info, (void *)buf, len);
 	/* Reset the buffer in_busy value after processing the data */
-	if (fwd_info->buf_1)
+	if (fwd_info->buf_1) {
 		atomic_set(&fwd_info->buf_1->in_busy, 0);
+		DIAG_LOG(DIAG_DEBUG_DCI,
+		"Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+	}
 
 	diagfwd_queue_read(fwd_info);
 }
@@ -1638,13 +1647,15 @@
 	struct diagfwd_buf_t *temp_buf = NULL;
 
 	if (!fwd_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid fwd_info\n");
 		diag_ws_release();
 		return;
 	}
 
 	if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
-		pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d  ch_open: %d\n",
-			 __func__, fwd_info->peripheral, fwd_info->type,
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: p: %d, t: %d, inited: %d, opened: %d, ch_open: %d\n",
+			 fwd_info->peripheral, fwd_info->type,
 			 fwd_info->inited, atomic_read(&fwd_info->opened),
 			 fwd_info->ch_open);
 		diag_ws_release();
@@ -1680,8 +1691,9 @@
 			atomic_set(&temp_buf->in_busy, 1);
 		}
 	} else {
-		pr_debug("diag: In %s, both buffers are empty for p: %d, t: %d\n",
-			 __func__, fwd_info->peripheral, fwd_info->type);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: both buffers are busy for p: %d, t: %d\n",
+			 fwd_info->peripheral, fwd_info->type);
 	}
 
 	if (!read_buf) {
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 63d84e6..83c6959 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -21,6 +21,7 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/pm_runtime.h>
+#include <linux/reset.h>
 #include <linux/slab.h>
 
 #define RNG_CR 0x00
@@ -46,6 +47,7 @@
 	struct hwrng rng;
 	void __iomem *base;
 	struct clk *clk;
+	struct reset_control *rst;
 };
 
 static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
@@ -140,6 +142,13 @@
 	if (IS_ERR(priv->clk))
 		return PTR_ERR(priv->clk);
 
+	priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
+	if (!IS_ERR(priv->rst)) {
+		reset_control_assert(priv->rst);
+		udelay(2);
+		reset_control_deassert(priv->rst);
+	}
+
 	dev_set_drvdata(dev, priv);
 
 	priv->rng.name = dev_driver_string(dev),
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 6e658aa..a70518a 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -251,8 +251,9 @@
 		ipmi->irq = opal_event_request(prop);
 	}
 
-	if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
-				"opal-ipmi", ipmi)) {
+	rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
+			 "opal-ipmi", ipmi);
+	if (rc) {
 		dev_warn(dev, "Unable to request irq\n");
 		goto err_dispose;
 	}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 510fc10..1213191 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -409,6 +409,7 @@
 	msg = ipmi_alloc_smi_msg();
 	if (!msg) {
 		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
 		return;
 	}
 
@@ -431,6 +432,7 @@
 	msg = ipmi_alloc_smi_msg();
 	if (!msg) {
 		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
 		return;
 	}
 
@@ -759,7 +761,7 @@
 			ssif_info->ssif_state = SSIF_NORMAL;
 			ipmi_ssif_unlock_cond(ssif_info, flags);
 			pr_warn(PFX "Error getting flags: %d %d, %x\n",
-			       result, len, data[2]);
+			       result, len, (len >= 3) ? data[2] : 0);
 		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
 			   || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
 			/*
@@ -781,7 +783,7 @@
 		if ((result < 0) || (len < 3) || (data[2] != 0)) {
 			/* Error clearing flags */
 			pr_warn(PFX "Error clearing flags: %d %d, %x\n",
-			       result, len, data[2]);
+			       result, len, (len >= 3) ? data[2] : 0);
 		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
 			   || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
 			pr_warn(PFX "Invalid response clearing flags: %x %x\n",
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ee737ef..1b3c731 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -259,6 +259,7 @@
 #include <linux/kmemcheck.h>
 #include <linux/workqueue.h>
 #include <linux/irq.h>
+#include <linux/ratelimit.h>
 #include <linux/syscalls.h>
 #include <linux/completion.h>
 #include <linux/uuid.h>
@@ -434,8 +435,9 @@
  * its value (from 0->1->2).
  */
 static int crng_init = 0;
-#define crng_ready() (likely(crng_init > 0))
+#define crng_ready() (likely(crng_init > 1))
 static int crng_init_cnt = 0;
+static unsigned long crng_global_init_time = 0;
 #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
 static void _extract_crng(struct crng_state *crng,
 			  __u8 out[CHACHA20_BLOCK_SIZE]);
@@ -443,6 +445,16 @@
 				    __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
 static void process_random_ready_list(void);
 
+static struct ratelimit_state unseeded_warning =
+	RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
+static struct ratelimit_state urandom_warning =
+	RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+
+static int ratelimit_disable __read_mostly;
+
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+
 /**********************************************************************
  *
  * OS independent entropy store.   Here are the functions which handle
@@ -741,7 +753,7 @@
 
 static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
 {
-	const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+	const int nbits_max = r->poolinfo->poolwords * 32;
 
 	if (nbits < 0)
 		return -EINVAL;
@@ -800,7 +812,7 @@
 
 	if (!spin_trylock_irqsave(&primary_crng.lock, flags))
 		return 0;
-	if (crng_ready()) {
+	if (crng_init != 0) {
 		spin_unlock_irqrestore(&primary_crng.lock, flags);
 		return 0;
 	}
@@ -818,6 +830,39 @@
 	return 1;
 }
 
+#ifdef CONFIG_NUMA
+static void do_numa_crng_init(struct work_struct *work)
+{
+	int i;
+	struct crng_state *crng;
+	struct crng_state **pool;
+
+	pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+	for_each_online_node(i) {
+		crng = kmalloc_node(sizeof(struct crng_state),
+				    GFP_KERNEL | __GFP_NOFAIL, i);
+		spin_lock_init(&crng->lock);
+		crng_initialize(crng);
+		pool[i] = crng;
+	}
+	mb();
+	if (cmpxchg(&crng_node_pool, NULL, pool)) {
+		for_each_node(i)
+			kfree(pool[i]);
+		kfree(pool);
+	}
+}
+
+static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+
+static void numa_crng_init(void)
+{
+	schedule_work(&numa_crng_init_work);
+}
+#else
+static void numa_crng_init(void) {}
+#endif
+
 static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
 {
 	unsigned long	flags;
@@ -836,7 +881,7 @@
 		_crng_backtrack_protect(&primary_crng, buf.block,
 					CHACHA20_KEY_SIZE);
 	}
-	spin_lock_irqsave(&primary_crng.lock, flags);
+	spin_lock_irqsave(&crng->lock, flags);
 	for (i = 0; i < 8; i++) {
 		unsigned long	rv;
 		if (!arch_get_random_seed_long(&rv) &&
@@ -847,12 +892,25 @@
 	memzero_explicit(&buf, sizeof(buf));
 	crng->init_time = jiffies;
 	if (crng == &primary_crng && crng_init < 2) {
+		numa_crng_init();
 		crng_init = 2;
 		process_random_ready_list();
 		wake_up_interruptible(&crng_init_wait);
 		pr_notice("random: crng init done\n");
+		if (unseeded_warning.missed) {
+			pr_notice("random: %d get_random_xx warning(s) missed "
+				  "due to ratelimiting\n",
+				  unseeded_warning.missed);
+			unseeded_warning.missed = 0;
+		}
+		if (urandom_warning.missed) {
+			pr_notice("random: %d urandom warning(s) missed "
+				  "due to ratelimiting\n",
+				  urandom_warning.missed);
+			urandom_warning.missed = 0;
+		}
 	}
-	spin_unlock_irqrestore(&primary_crng.lock, flags);
+	spin_unlock_irqrestore(&crng->lock, flags);
 }
 
 static inline void maybe_reseed_primary_crng(void)
@@ -872,8 +930,9 @@
 {
 	unsigned long v, flags;
 
-	if (crng_init > 1 &&
-	    time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))
+	if (crng_ready() &&
+	    (time_after(crng_global_init_time, crng->init_time) ||
+	     time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
 		crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
 	spin_lock_irqsave(&crng->lock, flags);
 	if (arch_get_random_long(&v))
@@ -1115,12 +1174,16 @@
 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
 {
 	__u32 *ptr = (__u32 *) regs;
+	unsigned int idx;
 
 	if (regs == NULL)
 		return 0;
-	if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
-		f->reg_idx = 0;
-	return *(ptr + f->reg_idx++);
+	idx = READ_ONCE(f->reg_idx);
+	if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+		idx = 0;
+	ptr += idx++;
+	WRITE_ONCE(f->reg_idx, idx);
+	return *ptr;
 }
 
 void add_interrupt_randomness(int irq, int irq_flags)
@@ -1149,7 +1212,7 @@
 	fast_mix(fast_pool);
 	add_interrupt_bench(cycles);
 
-	if (!crng_ready()) {
+	if (unlikely(crng_init == 0)) {
 		if ((fast_pool->count >= 64) &&
 		    crng_fast_load((char *) fast_pool->pool,
 				   sizeof(fast_pool->pool))) {
@@ -1655,28 +1718,14 @@
  */
 static int rand_initialize(void)
 {
-#ifdef CONFIG_NUMA
-	int i;
-	struct crng_state *crng;
-	struct crng_state **pool;
-#endif
-
 	init_std_data(&input_pool);
 	init_std_data(&blocking_pool);
 	crng_initialize(&primary_crng);
-
-#ifdef CONFIG_NUMA
-	pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
-	for_each_online_node(i) {
-		crng = kmalloc_node(sizeof(struct crng_state),
-				    GFP_KERNEL | __GFP_NOFAIL, i);
-		spin_lock_init(&crng->lock);
-		crng_initialize(crng);
-		pool[i] = crng;
+	crng_global_init_time = jiffies;
+	if (ratelimit_disable) {
+		urandom_warning.interval = 0;
+		unseeded_warning.interval = 0;
 	}
-	mb();
-	crng_node_pool = pool;
-#endif
 	return 0;
 }
 early_initcall(rand_initialize);
@@ -1744,9 +1793,10 @@
 
 	if (!crng_ready() && maxwarn > 0) {
 		maxwarn--;
-		printk(KERN_NOTICE "random: %s: uninitialized urandom read "
-		       "(%zd bytes read)\n",
-		       current->comm, nbytes);
+		if (__ratelimit(&urandom_warning))
+			printk(KERN_NOTICE "random: %s: uninitialized "
+			       "urandom read (%zd bytes read)\n",
+			       current->comm, nbytes);
 		spin_lock_irqsave(&primary_crng.lock, flags);
 		crng_init_cnt = 0;
 		spin_unlock_irqrestore(&primary_crng.lock, flags);
@@ -1850,6 +1900,14 @@
 		input_pool.entropy_count = 0;
 		blocking_pool.entropy_count = 0;
 		return 0;
+	case RNDRESEEDCRNG:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		if (crng_init < 2)
+			return -ENODATA;
+		crng_reseed(&primary_crng, NULL);
+		crng_global_init_time = jiffies - 1;
+		return 0;
 	default:
 		return -EINVAL;
 	}
@@ -2143,7 +2201,7 @@
 {
 	struct entropy_store *poolp = &input_pool;
 
-	if (!crng_ready()) {
+	if (unlikely(crng_init == 0)) {
 		crng_fast_load(buffer, count);
 		return;
 	}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8f890c1..8c0017d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1405,7 +1405,6 @@
 {
 	char debugfs_name[16];
 	struct port *port;
-	struct port_buffer *buf;
 	dev_t devt;
 	unsigned int nr_added_bufs;
 	int err;
@@ -1516,8 +1515,6 @@
 	return 0;
 
 free_inbufs:
-	while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
-		free_buf(buf, true);
 free_device:
 	device_destroy(pdrvdata.class, port->dev->devt);
 free_cdev:
@@ -1542,34 +1539,14 @@
 
 static void remove_port_data(struct port *port)
 {
-	struct port_buffer *buf;
-
 	spin_lock_irq(&port->inbuf_lock);
 	/* Remove unused data this port might have received. */
 	discard_port_data(port);
 	spin_unlock_irq(&port->inbuf_lock);
 
-	/* Remove buffers we queued up for the Host to send us data in. */
-	do {
-		spin_lock_irq(&port->inbuf_lock);
-		buf = virtqueue_detach_unused_buf(port->in_vq);
-		spin_unlock_irq(&port->inbuf_lock);
-		if (buf)
-			free_buf(buf, true);
-	} while (buf);
-
 	spin_lock_irq(&port->outvq_lock);
 	reclaim_consumed_buffers(port);
 	spin_unlock_irq(&port->outvq_lock);
-
-	/* Free pending buffers from the out-queue. */
-	do {
-		spin_lock_irq(&port->outvq_lock);
-		buf = virtqueue_detach_unused_buf(port->out_vq);
-		spin_unlock_irq(&port->outvq_lock);
-		if (buf)
-			free_buf(buf, true);
-	} while (buf);
 }
 
 /*
@@ -1794,13 +1771,24 @@
 	spin_unlock(&portdev->c_ivq_lock);
 }
 
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+	struct port_buffer *buf;
+	unsigned int len;
+
+	while ((buf = virtqueue_get_buf(vq, &len)))
+		free_buf(buf, can_sleep);
+}
+
 static void out_intr(struct virtqueue *vq)
 {
 	struct port *port;
 
 	port = find_port_by_vq(vq->vdev->priv, vq);
-	if (!port)
+	if (!port) {
+		flush_bufs(vq, false);
 		return;
+	}
 
 	wake_up_interruptible(&port->waitqueue);
 }
@@ -1811,8 +1799,10 @@
 	unsigned long flags;
 
 	port = find_port_by_vq(vq->vdev->priv, vq);
-	if (!port)
+	if (!port) {
+		flush_bufs(vq, false);
 		return;
+	}
 
 	spin_lock_irqsave(&port->inbuf_lock, flags);
 	port->inbuf = get_inbuf(port);
@@ -1987,6 +1977,15 @@
 
 static void remove_vqs(struct ports_device *portdev)
 {
+	struct virtqueue *vq;
+
+	virtio_device_for_each_vq(portdev->vdev, vq) {
+		struct port_buffer *buf;
+
+		flush_bufs(vq, true);
+		while ((buf = virtqueue_detach_unused_buf(vq)))
+			free_buf(buf, true);
+	}
 	portdev->vdev->config->del_vqs(portdev->vdev);
 	kfree(portdev->in_vqs);
 	kfree(portdev->out_vqs);
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 4e1cd5a..07c8f70 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -260,13 +260,13 @@
 	gck->lock = lock;
 	gck->range = *range;
 
+	clk_generated_startup(gck);
 	hw = &gck->hw;
 	ret = clk_hw_register(NULL, &gck->hw);
 	if (ret) {
 		kfree(gck);
 		hw = ERR_PTR(ret);
-	} else
-		clk_generated_startup(gck);
+	}
 
 	return hw;
 }
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index abdc149..73aab6e 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -545,9 +545,7 @@
 	const struct bcm2835_pll_data *data = pll->data;
 
 	spin_lock(&cprman->regs_lock);
-	cprman_write(cprman, data->cm_ctrl_reg,
-		     cprman_read(cprman, data->cm_ctrl_reg) |
-		     CM_PLL_ANARST);
+	cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
 	cprman_write(cprman, data->a2w_ctrl_reg,
 		     cprman_read(cprman, data->a2w_ctrl_reg) |
 		     A2W_PLL_CTRL_PWRDN);
@@ -583,6 +581,10 @@
 		cpu_relax();
 	}
 
+	cprman_write(cprman, data->a2w_ctrl_reg,
+		     cprman_read(cprman, data->a2w_ctrl_reg) |
+		     A2W_PLL_CTRL_PRST_DISABLE);
+
 	return 0;
 }
 
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 674785d..f0290092 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -106,7 +106,7 @@
 
 			rc = clk_set_rate(clk, rate);
 			if (rc < 0)
-				pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n",
+				pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n",
 				       __clk_get_name(clk), rate, rc,
 				       clk_get_rate(clk));
 			clk_put(clk);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 96d37175..8ad458b 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -71,15 +71,15 @@
 };
 
 /* find closest match to given frequency in OPP table */
-static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
+static long __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
 {
 	int idx;
-	u32 fmin = 0, fmax = ~0, ftmp;
+	unsigned long fmin = 0, fmax = ~0, ftmp;
 	const struct scpi_opp *opp = clk->info->opps;
 
 	for (idx = 0; idx < clk->info->count; idx++, opp++) {
 		ftmp = opp->freq;
-		if (ftmp >= (u32)rate) {
+		if (ftmp >= rate) {
 			if (ftmp <= fmax)
 				fmax = ftmp;
 			break;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f41307d..0426ff7 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2280,6 +2280,9 @@
 	int ret;
 
 	clk_prepare_lock();
+	/* Always try to update cached phase if possible */
+	if (core->ops->get_phase)
+		core->phase = core->ops->get_phase(core->hw);
 	ret = core->phase;
 	clk_prepare_unlock();
 
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 2f29ee1..5588f75 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -7,9 +7,9 @@
 	bool
 	depends on COMMON_CLK_AMLOGIC
 	help
-	  Support for the clock controller on AmLogic S805 devices, aka
-	  meson8b. Say Y if you want peripherals and CPU frequency scaling to
-	  work.
+	  Support for the clock controller on AmLogic S802 (Meson8),
+	  S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you
+	  want peripherals and CPU frequency scaling to work.
 
 config COMMON_CLK_GXBB
 	bool
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 3f1be46..70567958 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1,5 +1,6 @@
 /*
- * AmLogic S805 / Meson8b Clock Controller Driver
+ * AmLogic S802 (Meson8) / S805 (Meson8b) / S812 (Meson8m2) Clock Controller
+ * Driver
  *
  * Copyright (c) 2015 Endless Mobile, Inc.
  * Author: Carlo Caione <carlo@endlessm.com>
@@ -661,7 +662,9 @@
 }
 
 static const struct of_device_id meson8b_clkc_match_table[] = {
+	{ .compatible = "amlogic,meson8-clkc" },
 	{ .compatible = "amlogic,meson8b-clkc" },
+	{ .compatible = "amlogic,meson8m2-clkc" },
 	{ }
 };
 
diff --git a/drivers/clk/msm/clock-cpu-8939.c b/drivers/clk/msm/clock-cpu-8939.c
index 5ba6bd0..5b8b8f3 100644
--- a/drivers/clk/msm/clock-cpu-8939.c
+++ b/drivers/clk/msm/clock-cpu-8939.c
@@ -999,7 +999,7 @@
 
 	return rc;
 }
-late_initcall(clock_cpu_lpm_get_latency);
+late_initcall_sync(clock_cpu_lpm_get_latency);
 
 #define APCS_C0_PLL			0xb116000
 #define C0_PLL_MODE			0x0
diff --git a/drivers/clk/msm/clock-gcc-8952.c b/drivers/clk/msm/clock-gcc-8952.c
index 6d7727f..47619f5 100644
--- a/drivers/clk/msm/clock-gcc-8952.c
+++ b/drivers/clk/msm/clock-gcc-8952.c
@@ -216,6 +216,7 @@
 	.config_reg = (void __iomem *)APCS_C0_PLL_USER_CTL,
 	.status_reg = (void __iomem *)APCS_C0_PLL_STATUS,
 	.freq_tbl = apcs_c0_pll_freq,
+	.config_ctl_reg = (void __iomem *)APCS_C0_PLL_CONFIG_CTL,
 	.masks = {
 		.vco_mask = BM(29, 28),
 		.pre_div_mask = BIT(12),
@@ -283,6 +284,7 @@
 	.config_reg = (void __iomem *)APCS_C1_PLL_USER_CTL,
 	.status_reg = (void __iomem *)APCS_C1_PLL_STATUS,
 	.freq_tbl = apcs_c1_pll_freq,
+	.config_ctl_reg = (void __iomem *)APCS_C1_PLL_CONFIG_CTL,
 	.masks = {
 		.vco_mask = BM(29, 28),
 		.pre_div_mask = BIT(12),
@@ -4407,6 +4409,11 @@
 	if (compat_bin2 || compat_bin4 || compat_bin5)
 		nbases = APCS_C0_PLL_BASE;
 
+	if (compat_bin5 || compat_bin6) {
+		a53ss_c0_pll.c.ops = &clk_ops_acpu_pll;
+		a53ss_c1_pll.c.ops = &clk_ops_acpu_pll;
+	}
+
 	ret = get_mmio_addr(pdev, nbases);
 	if (ret)
 		return ret;
@@ -4498,6 +4505,7 @@
 			vdd_hf_pll.num_levels = VDD_HF_PLL_NUM_439;
 			vdd_hf_pll.cur_level = VDD_HF_PLL_NUM_439;
 
+			gpll3_clk_src.test_ctl_hi_val = 0x400000;
 			gpll3_clk_src.vco_tbl = p_vco;
 			gpll3_clk_src.num_vco = ARRAY_SIZE(p_vco);
 			gpll3_clk_src.c.fmax[VDD_DIG_LOW] = 800000000;
diff --git a/drivers/clk/msm/clock-gcc-8953.c b/drivers/clk/msm/clock-gcc-8953.c
index 57adebf..0e8665c 100644
--- a/drivers/clk/msm/clock-gcc-8953.c
+++ b/drivers/clk/msm/clock-gcc-8953.c
@@ -411,6 +411,27 @@
 	F_END
 };
 
+static struct clk_freq_tbl ftbl_gfx3d_clk_src_sdm632[] = {
+	F_MM(  19200000, FIXED_CLK_SRC,                  xo,    1,    0,     0),
+	F_MM(  50000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    8,    0,     0),
+	F_MM(  80000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    5,    0,     0),
+	F_MM( 100000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    4,    0,     0),
+	F_MM( 133330000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    3,    0,     0),
+	F_MM( 160000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,  2.5,    0,     0),
+	F_MM( 200000000, FIXED_CLK_SRC,  gpll0_main_div2_mm,    2,    0,     0),
+	F_MM( 216000000, FIXED_CLK_SRC, gpll6_main_div2_gfx,  2.5,    0,     0),
+	F_MM( 266670000, FIXED_CLK_SRC,               gpll0,    3,    0,     0),
+	F_MM( 320000000, FIXED_CLK_SRC,               gpll0,  2.5,    0,     0),
+	F_MM( 400000000, FIXED_CLK_SRC,               gpll0,    2,    0,     0),
+	F_MM( 460800000, FIXED_CLK_SRC,       gpll4_out_aux,  2.5,    0,     0),
+	F_MM( 510000000,    1020000000,               gpll3,    1,    0,     0),
+	F_MM( 560000000,    1120000000,               gpll3,    1,    0,     0),
+	F_MM( 650000000,    1300000000,               gpll3,    1,    0,     0),
+	F_MM( 700000000,    1400000000,               gpll3,    1,    0,     0),
+	F_MM( 725000000,    1450000000,               gpll3,    1,    0,     0),
+
+	F_END
+};
 static struct rcg_clk gfx3d_clk_src = {
 	.cmd_rcgr_reg = GFX3D_CMD_RCGR,
 	.set_rate = set_rate_hid,
@@ -4104,6 +4125,11 @@
 	if (compat_bin)
 		gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_sdm450;
 
+	compat_bin = of_device_is_compatible(pdev->dev.of_node,
+							"qcom,gcc-gfx-sdm632");
+	if (compat_bin)
+		gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_sdm632;
+
 	ret = of_get_fmax_vdd_class(pdev, &gcc_oxili_gfx3d_clk.c,
 					"qcom,gfxfreq-corner");
 	if (ret) {
diff --git a/drivers/clk/msm/clock-pll.c b/drivers/clk/msm/clock-pll.c
index 26c04e5..381c8db 100644
--- a/drivers/clk/msm/clock-pll.c
+++ b/drivers/clk/msm/clock-pll.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -217,13 +217,46 @@
 	writel_relaxed(regval, pll_config);
 }
 
+static void pll_wait_for_lock(struct pll_clk *pll)
+{
+	int count;
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+	u32 status_reg, user_reg, l_reg, m_reg, n_reg, config_reg;
+
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)) {
+		mode = readl_relaxed(PLL_MODE_REG(pll));
+		status_reg = readl_relaxed(PLL_STATUS_REG(pll));
+		user_reg = readl_relaxed(PLL_CONFIG_REG(pll));
+		config_reg = readl_relaxed(PLL_CFG_CTL_REG(pll));
+		l_reg = readl_relaxed(PLL_L_REG(pll));
+		m_reg = readl_relaxed(PLL_M_REG(pll));
+		n_reg = readl_relaxed(PLL_N_REG(pll));
+		pr_err("count = %d\n", (int)count);
+		pr_err("mode register is 0x%x\n", mode);
+		pr_err("status register is 0x%x\n", status_reg);
+		pr_err("user control register is 0x%x\n", user_reg);
+		pr_err("config control register is 0x%x\n", config_reg);
+		pr_err("L value register is 0x%x\n", l_reg);
+		pr_err("M value register is 0x%x\n", m_reg);
+		pr_err("N value control register is 0x%x\n", n_reg);
+		panic("PLL %s didn't lock after enabling it!\n",
+				pll->c.dbg_name);
+	}
+}
+
 static int sr2_pll_clk_enable(struct clk *c)
 {
 	unsigned long flags;
 	struct pll_clk *pll = to_pll_clk(c);
-	int ret = 0, count;
 	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
-	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
 
@@ -245,15 +278,7 @@
 	mode |= PLL_RESET_N;
 	writel_relaxed(mode, PLL_MODE_REG(pll));
 
-	/* Wait for pll to lock. */
-	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
-		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
-			break;
-		udelay(1);
-	}
-
-	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
-		pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+	pll_wait_for_lock(pll);
 
 	/* Enable PLL output. */
 	mode |= PLL_OUTCTRL;
@@ -263,7 +288,50 @@
 	mb();
 
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
-	return ret;
+	return 0;
+}
+
+static int acpu_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+				pll->spm_ctrl.event_bit, false);
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* PLL H/W requires a 50uSec delay before polling lock_detect. */
+	mb();
+	udelay(50);
+
+	pll_wait_for_lock(pll);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return 0;
 }
 
 void __variable_rate_pll_init(struct clk *c)
@@ -886,6 +954,15 @@
 	.list_registers = local_pll_clk_list_registers,
 };
 
+const struct clk_ops clk_ops_acpu_pll = {
+	.enable = acpu_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = local_pll_clk_set_rate,
+	.round_rate = local_pll_clk_round_rate,
+	.handoff = local_pll_clk_handoff,
+	.list_registers = local_pll_clk_list_registers,
+};
+
 const struct clk_ops clk_ops_variable_rate_pll_hwfsm = {
 	.enable = variable_rate_pll_clk_enable_hwfsm,
 	.disable = variable_rate_pll_clk_disable_hwfsm,
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
index f2ed36c..0bdfeeb 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
@@ -165,7 +165,8 @@
 	return sel;
 }
 
-static bool pll_is_pll_locked_12nm(struct mdss_pll_resources *pll)
+static bool pll_is_pll_locked_12nm(struct mdss_pll_resources *pll,
+	bool is_handoff)
 {
 	u32 status;
 	bool pll_locked;
@@ -177,8 +178,9 @@
 			((status & BIT(1)) > 0),
 			DSI_PLL_POLL_MAX_READS,
 			DSI_PLL_POLL_TIMEOUT_US)) {
-		pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
-			pll->index, status);
+		if (!is_handoff)
+			pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
+				pll->index, status);
 		pll_locked = false;
 	} else {
 		pll_locked = true;
@@ -213,7 +215,7 @@
 	wmb(); /* make sure register committed before enabling branch clocks */
 	udelay(50); /* h/w recommended delay */
 
-	if (!pll_is_pll_locked_12nm(pll)) {
+	if (!pll_is_pll_locked_12nm(pll, false)) {
 		pr_err("DSI PLL ndx=%d lock failed!\n",
 			pll->index);
 		rc = -EINVAL;
@@ -261,7 +263,7 @@
 	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_POWERUP_CTRL, data);
 	ndelay(500); /* h/w recommended delay */
 
-	if (!pll_is_pll_locked_12nm(pll)) {
+	if (!pll_is_pll_locked_12nm(pll, false)) {
 		pr_err("DSI PLL ndx=%d lock failed!\n",
 			pll->index);
 		rc = -EINVAL;
@@ -556,6 +558,142 @@
 	param->gmp_cntrl = 0x1;
 }
 
+static u32 __mdss_dsi_get_multi_intX100(u64 vco_rate, u32 *rem)
+{
+	u32 reminder = 0;
+	u64 temp = 0;
+	const u32 ref_clk_rate = 19200000, quarterX100 = 25;
+
+	temp = div_u64_rem(vco_rate, ref_clk_rate, &reminder);
+	temp *= 100;
+
+	/*
+	 * Multiplication integer needs to be floored in steps of 0.25
+	 * Hence multi_intX100 needs to be rounded off in steps of 25
+	 */
+	if (reminder < (ref_clk_rate / 4)) {
+		*rem = reminder;
+		return temp;
+	} else if ((reminder >= (ref_clk_rate / 4)) &&
+		reminder < (ref_clk_rate / 2)) {
+		*rem = (reminder - (ref_clk_rate / 4));
+		return (temp + quarterX100);
+	} else if ((reminder >= (ref_clk_rate / 2)) &&
+		(reminder < ((3 * ref_clk_rate) / 4))) {
+		*rem = (reminder - (ref_clk_rate / 2));
+		return (temp + (quarterX100 * 2));
+	}
+
+	*rem = (reminder - ((3 * ref_clk_rate) / 4));
+	return (temp + (quarterX100 * 3));
+}
+
+static u32 __calc_gcd(u32 num1, u32 num2)
+{
+	if (num2 != 0)
+		return __calc_gcd(num2, (num1 % num2));
+	else
+		return num1;
+}
+
+static void mdss_dsi_pll_12nm_calc_ssc(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	struct dsi_pll_param *param = &pdb->param;
+	u64 multi_intX100 = 0, temp = 0;
+	u32 temp_rem1 = 0, temp_rem2 = 0;
+	const u64 power_2_17 = 131072, power_2_10 = 1024;
+	const u32 ref_clk_rate = 19200000;
+
+	multi_intX100 = __mdss_dsi_get_multi_intX100(pll->vco_current_rate,
+		&temp_rem1);
+
+	/* Calculation for mpll_ssc_peak_i */
+	temp = (multi_intX100 * pll->ssc_ppm * power_2_17);
+	temp = div_u64(temp, 100); /* 100 div for multi_intX100 */
+	param->mpll_ssc_peak_i =
+		(u32) div_u64(temp, 1000000); /*10^6 for SSC PPM */
+
+	/* Calculation for mpll_stepsize_i */
+	param->mpll_stepsize_i = (u32) div_u64((param->mpll_ssc_peak_i *
+		pll->ssc_freq * power_2_10), ref_clk_rate);
+
+	/* Calculation for mpll_mint_i */
+	param->mpll_mint_i = (u32) (div_u64((multi_intX100 * 4), 100) - 32);
+
+	/* Calculation for mpll_frac_den */
+	param->mpll_frac_den = (u32) div_u64(ref_clk_rate,
+		__calc_gcd((u32)pll->vco_current_rate, ref_clk_rate));
+
+	/* Calculation for mpll_frac_quot_i */
+	temp = (temp_rem1 * power_2_17);
+	param->mpll_frac_quot_i =
+		(u32) div_u64_rem(temp, ref_clk_rate, &temp_rem2);
+
+	/* Calculation for mpll_frac_rem */
+	param->mpll_frac_rem = (u32) div_u64(((u64)temp_rem2 *
+		param->mpll_frac_den), ref_clk_rate);
+
+	pr_debug("mpll_ssc_peak_i=%d mpll_stepsize_i=%d mpll_mint_i=%d\n",
+		param->mpll_ssc_peak_i, param->mpll_stepsize_i,
+		param->mpll_mint_i);
+	pr_debug("mpll_frac_den=%d mpll_frac_quot_i=%d mpll_frac_rem=%d",
+		param->mpll_frac_den, param->mpll_frac_quot_i,
+		param->mpll_frac_rem);
+}
+
+static void pll_db_commit_12nm_ssc(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_param *param = &pdb->param;
+	char data = 0;
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC0, 0x27);
+
+	data = (param->mpll_mint_i & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC7, data);
+
+	data = ((param->mpll_mint_i & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC8, data);
+
+	data = (param->mpll_ssc_peak_i & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC1, data);
+
+	data = ((param->mpll_ssc_peak_i & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC2, data);
+
+	data = ((param->mpll_ssc_peak_i & 0xf0000) >> 16);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC3, data);
+
+	data = (param->mpll_stepsize_i & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC4, data);
+
+	data = ((param->mpll_stepsize_i & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC5, data);
+
+	data = ((param->mpll_stepsize_i & 0x1f0000) >> 16);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC6, data);
+
+	data = (param->mpll_frac_quot_i & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC10, data);
+
+	data = ((param->mpll_frac_quot_i & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC11, data);
+
+	data = (param->mpll_frac_rem & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC12, data);
+
+	data = ((param->mpll_frac_rem & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC13, data);
+
+	data = (param->mpll_frac_den & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC14, data);
+
+	data = ((param->mpll_frac_den & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC15, data);
+}
+
 static void pll_db_commit_12nm(struct mdss_pll_resources *pll,
 					struct dsi_pll_db *pdb)
 {
@@ -616,6 +754,9 @@
 	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PRO_DLY_RELOCK, 0x0c);
 	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_LOCK_DET_MODE_SEL, 0x02);
 
+	if (pll->ssc_en)
+		pll_db_commit_12nm_ssc(pll, pdb);
+
 	pr_debug("pll:%d\n", pll->index);
 	wmb(); /* make sure register committed before preparing the clocks */
 }
@@ -710,7 +851,7 @@
 		return ret;
 	}
 
-	if (pll_is_pll_locked_12nm(pll)) {
+	if (pll_is_pll_locked_12nm(pll, true)) {
 		pll->handoff_resources = true;
 		pll->pll_on = true;
 		c->rate = pll_vco_get_rate_12nm(c);
@@ -756,18 +897,27 @@
 					rc, pll->index);
 			goto error;
 		}
+	}
 
-		data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SYS_CTRL);
-		if (data & BIT(7)) { /* DSI PHY in LP-11 or ULPS */
-			rc = dsi_pll_relock(pll);
-			if (rc)
-				goto error;
-			else
-				goto end;
-		}
+	/*
+	 * For cases where  DSI PHY is already enabled like:
+	 * 1.) LP-11 during static screen
+	 * 2.) ULPS during static screen
+	 * 3.) Boot up with cont splash enabled where PHY is programmed in LK
+	 * Execute the Re-lock sequence to enable the DSI PLL.
+	 */
+	data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SYS_CTRL);
+	if (data & BIT(7)) {
+		rc = dsi_pll_relock(pll);
+		if (rc)
+			goto error;
+		else
+			goto end;
 	}
 
 	mdss_dsi_pll_12nm_calc_reg(pll, pdb);
+	if (pll->ssc_en)
+		mdss_dsi_pll_12nm_calc_ssc(pll, pdb);
 
 	/* commit DSI vco  */
 	pll_db_commit_12nm(pll, pdb);
@@ -802,6 +952,7 @@
 {
 	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
 	struct mdss_pll_resources *pll = vco->priv;
+	u32 data = 0;
 
 	if (!pll) {
 		pr_err("Dsi pll resources are not available\n");
@@ -813,7 +964,9 @@
 		return -EINVAL;
 	}
 
-	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_SSC0, 0x40);
+	data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SSC0);
+	data |= BIT(6); /* enable GP_CLK_EN */
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_SSC0, data);
 	wmb(); /* make sure register committed before enabling branch clocks */
 
 	return 0;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
index 210742b..5d2fa9a 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
@@ -636,11 +636,13 @@
 };
 
 static struct clk_lookup mdss_dsi_pllcc_12nm[] = {
+	CLK_LIST(dsi0pll_vco_clk),
 	CLK_LIST(dsi0pll_byte_clk_src),
 	CLK_LIST(dsi0pll_pixel_clk_src),
 };
 
 static struct clk_lookup mdss_dsi_pllcc_12nm_1[] = {
+	CLK_LIST(dsi1pll_vco_clk),
 	CLK_LIST(dsi1pll_byte_clk_src),
 	CLK_LIST(dsi1pll_pixel_clk_src),
 };
@@ -650,6 +652,9 @@
 {
 	int rc = 0, ndx;
 	struct dsi_pll_db *pdb;
+	int const ssc_freq_min = 30000; /* min. recommended freq. value */
+	int const ssc_freq_max = 33000; /* max. recommended freq. value */
+	int const ssc_ppm_max = 5000; /* max. recommended ppm */
 
 	if (!pdev || !pdev->dev.of_node) {
 		pr_err("Invalid input parameters\n");
@@ -680,6 +685,21 @@
 	pixel_div_clk_src_ops = clk_ops_div;
 	pixel_div_clk_src_ops.prepare = dsi_pll_div_prepare;
 
+	if (pll_res->ssc_en) {
+		if (!pll_res->ssc_freq || (pll_res->ssc_freq < ssc_freq_min) ||
+			(pll_res->ssc_freq > ssc_freq_max)) {
+			pll_res->ssc_freq = ssc_freq_min;
+			pr_debug("SSC frequency out of recommended range. Set to default=%d\n",
+				pll_res->ssc_freq);
+		}
+
+		if (!pll_res->ssc_ppm || (pll_res->ssc_ppm > ssc_ppm_max)) {
+			pll_res->ssc_ppm = ssc_ppm_max;
+			pr_debug("SSC PPM out of recommended range. Set to default=%d\n",
+				pll_res->ssc_ppm);
+		}
+	}
+
 	/* Set client data to mux, div and vco clocks.  */
 	if (pll_res->index == DSI_PLL_1) {
 		dsi1pll_byte_clk_src.priv = pll_res;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
index 6912ff4..0974717 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
@@ -37,6 +37,22 @@
 #define DSIPHY_PLL_LOOP_DIV_RATIO_1			0x2e8
 #define DSIPHY_SLEWRATE_DDL_CYC_FRQ_ADJ_1		0x328
 #define DSIPHY_SSC0					0x394
+#define DSIPHY_SSC7					0x3b0
+#define DSIPHY_SSC8					0x3b4
+#define DSIPHY_SSC1					0x398
+#define DSIPHY_SSC2					0x39c
+#define DSIPHY_SSC3					0x3a0
+#define DSIPHY_SSC4					0x3a4
+#define DSIPHY_SSC5					0x3a8
+#define DSIPHY_SSC6					0x3ac
+#define DSIPHY_SSC10					0x360
+#define DSIPHY_SSC11					0x364
+#define DSIPHY_SSC12					0x368
+#define DSIPHY_SSC13					0x36c
+#define DSIPHY_SSC14					0x370
+#define DSIPHY_SSC15					0x374
+#define DSIPHY_SSC7					0x3b0
+#define DSIPHY_SSC8					0x3b4
 #define DSIPHY_SSC9					0x3b8
 #define DSIPHY_STAT0					0x3e0
 #define DSIPHY_CTRL0					0x3e8
@@ -58,6 +74,14 @@
 	u32 post_div_mux;
 	u32 pixel_divhf;
 	u32 fsm_ovr_ctrl;
+
+	/* ssc_params */
+	u32 mpll_ssc_peak_i;
+	u32 mpll_stepsize_i;
+	u32 mpll_mint_i;
+	u32 mpll_frac_den;
+	u32 mpll_frac_quot_i;
+	u32 mpll_frac_rem;
 };
 
 enum {
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
index 8bccf4e..9ff4ea6 100644
--- a/drivers/clk/mvebu/armada-38x.c
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -46,10 +46,11 @@
 }
 
 static const u32 armada_38x_cpu_frequencies[] __initconst = {
-	0, 0, 0, 0,
-	1066 * 1000 * 1000, 0, 0, 0,
+	666 * 1000 * 1000,  0, 800 * 1000 * 1000, 0,
+	1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
 	1332 * 1000 * 1000, 0, 0, 0,
-	1600 * 1000 * 1000,
+	1600 * 1000 * 1000, 0, 0, 0,
+	1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
 };
 
 static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
@@ -75,11 +76,11 @@
 };
 
 static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
-	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{1, 2}, {0, 1}, {1, 2}, {0, 1},
+	{1, 2}, {0, 1}, {1, 2}, {0, 1},
 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
-	{1, 2}, {0, 1}, {0, 1}, {0, 1},
-	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{1, 2}, {0, 1}, {0, 1}, {1, 2},
 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
@@ -90,7 +91,7 @@
 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
-	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{1, 2}, {0, 1}, {0, 1}, {7, 15},
 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 836c25c..7f9ba03 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -833,6 +833,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+	F(8000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 6),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(24000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 2),
 	F(33333333, P_CAM_CC_PLL0_OUT_EVEN, 2, 1, 9),
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index aaf2324..f759978 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -185,6 +185,7 @@
 extern const struct clk_ops clk_pixel_ops;
 extern const struct clk_ops clk_gfx3d_ops;
 extern const struct clk_ops clk_dp_ops;
+extern const struct clk_ops clk_esc_ops;
 
 extern int clk_rcg2_get_dfs_clock_rate(struct clk_rcg2 *clk,
 				struct device *dev, u8 rcg_flags);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 00989a8..057f0e1 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1191,6 +1191,76 @@
 };
 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
 
+static int clk_esc_determine_rate(struct clk_hw *hw,
+				    struct clk_rate_request *req)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	unsigned long parent_rate, div;
+	u32 mask = BIT(rcg->hid_width) - 1;
+	struct clk_hw *p;
+	unsigned long rate = req->rate;
+
+	if (rate == 0)
+		return -EINVAL;
+
+	p = req->best_parent_hw;
+	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
+
+	div = ((2 * parent_rate) / rate) - 1;
+	div = min_t(u32, div, mask);
+
+	req->rate = clk_rcg2_calc_rate(parent_rate, 0, 0, 0, div);
+
+	return 0;
+}
+
+static int clk_esc_set_rate(struct clk_hw *hw, unsigned long rate,
+			 unsigned long parent_rate)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	struct freq_tbl f = { 0 };
+	unsigned long div;
+	int i, num_parents = clk_hw_get_num_parents(hw);
+	u32 mask = BIT(rcg->hid_width) - 1;
+	u32 cfg;
+
+	div = ((2 * parent_rate) / rate) - 1;
+	div = min_t(u32, div, mask);
+
+	f.pre_div = div;
+
+	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+	cfg &= CFG_SRC_SEL_MASK;
+	cfg >>= CFG_SRC_SEL_SHIFT;
+
+	for (i = 0; i < num_parents; i++) {
+		if (cfg == rcg->parent_map[i].cfg) {
+			f.src = rcg->parent_map[i].src;
+			return clk_rcg2_configure(rcg, &f);
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int clk_esc_set_rate_and_parent(struct clk_hw *hw,
+		unsigned long rate, unsigned long parent_rate, u8 index)
+{
+	return clk_esc_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_esc_ops = {
+	.is_enabled = clk_rcg2_is_enabled,
+	.get_parent = clk_rcg2_get_parent,
+	.set_parent = clk_rcg2_set_parent,
+	.recalc_rate = clk_rcg2_recalc_rate,
+	.determine_rate = clk_esc_determine_rate,
+	.set_rate = clk_esc_set_rate,
+	.set_rate_and_parent = clk_esc_set_rate_and_parent,
+	.list_registers = clk_rcg2_list_registers,
+};
+EXPORT_SYMBOL(clk_esc_ops);
+
 /* Common APIs to be used for DFS based RCGR */
 static u8 clk_parent_index_pre_div_and_mode(struct clk_hw *hw, u32 offset,
 		u32 *mode, u32 *pre_div)
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index d426691..316ac39 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, 2017-2018,
+ * The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -30,7 +31,9 @@
 struct qcom_cc {
 	struct qcom_reset_controller reset;
 	struct clk_regmap **rclks;
+	struct clk_hw **hwclks;
 	size_t num_rclks;
+	size_t num_hwclks;
 };
 
 const
@@ -182,11 +185,14 @@
 	struct qcom_cc *cc = data;
 	unsigned int idx = clkspec->args[0];
 
-	if (idx >= cc->num_rclks) {
+	if (idx >= cc->num_rclks + cc->num_hwclks) {
 		pr_err("invalid index %u\n", idx);
 		return ERR_PTR(-EINVAL);
 	}
 
+	if (idx < cc->num_hwclks && cc->hwclks[idx])
+		return cc->hwclks[idx];
+
 	return cc->rclks[idx] ? &cc->rclks[idx]->hw : ERR_PTR(-ENOENT);
 }
 
@@ -199,7 +205,9 @@
 	struct qcom_cc *cc;
 	struct gdsc_desc *scd;
 	size_t num_clks = desc->num_clks;
+	size_t num_hwclks = desc->num_hwclks;
 	struct clk_regmap **rclks = desc->clks;
+	struct clk_hw **hwclks = desc->hwclks;
 
 	cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
 	if (!cc)
@@ -207,6 +215,17 @@
 
 	cc->rclks = rclks;
 	cc->num_rclks = num_clks;
+	cc->hwclks = hwclks;
+	cc->num_hwclks = num_hwclks;
+
+	for (i = 0; i < num_hwclks; i++) {
+		if (!hwclks[i])
+			continue;
+
+		ret = devm_clk_hw_register(dev, hwclks[i]);
+		if (ret)
+			return ret;
+	}
 
 	for (i = 0; i < num_clks; i++) {
 		if (!rclks[i])
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 5e26763..29c4697 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -28,7 +28,9 @@
 struct qcom_cc_desc {
 	const struct regmap_config *config;
 	struct clk_regmap **clks;
+	struct clk_hw **hwclks;
 	size_t num_clks;
+	size_t num_hwclks;
 	const struct qcom_reset_map *resets;
 	size_t num_resets;
 	struct gdsc **gdscs;
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index d4f27d7..0c49fa4 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -328,13 +328,12 @@
 	.mnd_width = 0,
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_0,
-	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_esc0_clk_src",
 		.parent_names = disp_cc_parent_names_0,
 		.num_parents = 4,
 		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
+		.ops = &clk_esc_ops,
 		VDD_CX_FMAX_MAP1(
 			MIN, 19200000),
 	},
@@ -345,13 +344,12 @@
 	.mnd_width = 0,
 	.hid_width = 5,
 	.parent_map = disp_cc_parent_map_0,
-	.freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_esc1_clk_src",
 		.parent_names = disp_cc_parent_names_0,
 		.num_parents = 4,
 		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
+		.ops = &clk_esc_ops,
 		VDD_CX_FMAX_MAP1(
 			MIN, 19200000),
 	},
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 555b8bd..7ea5d9d 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
 #include "reset.h"
 #include "clk-alpha-pll.h"
 #include "vdd-level-sdm845.h"
+#include "clk-voter.h"
 
 #define GCC_MMSS_MISC				0x09FFC
 #define GCC_GPU_MISC				0x71028
@@ -1505,6 +1506,11 @@
 	},
 };
 
+static DEFINE_CLK_VOTER(ufs_phy_axi_emmc_vote_clk,
+					gcc_aggre_ufs_phy_axi_clk, 0);
+static DEFINE_CLK_VOTER(ufs_phy_axi_ufs_vote_clk,
+					gcc_aggre_ufs_phy_axi_clk, 0);
+
 static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
 	.halt_reg = 0x82024,
 	.clkr = {
@@ -3780,6 +3786,8 @@
 	[MEASURE_ONLY_CNOC_CLK] = &measure_only_cnoc_clk.hw,
 	[MEASURE_ONLY_BIMC_CLK] = &measure_only_bimc_clk.hw,
 	[MEASURE_ONLY_IPA_2X_CLK] = &measure_only_ipa_2x_clk.hw,
+	[UFS_PHY_AXI_EMMC_VOTE_CLK] = &ufs_phy_axi_emmc_vote_clk.hw,
+	[UFS_PHY_AXI_UFS_VOTE_CLK] = &ufs_phy_axi_ufs_vote_clk.hw,
 };
 
 static struct clk_regmap *gcc_sdm845_clocks[] = {
@@ -4061,6 +4069,8 @@
 	.config = &gcc_sdm845_regmap_config,
 	.clks = gcc_sdm845_clocks,
 	.num_clks = ARRAY_SIZE(gcc_sdm845_clocks),
+	.hwclks = gcc_sdm845_hws,
+	.num_hwclks = ARRAY_SIZE(gcc_sdm845_hws),
 	.resets = gcc_sdm845_resets,
 	.num_resets = ARRAY_SIZE(gcc_sdm845_resets),
 };
@@ -4279,9 +4289,8 @@
 
 static int gcc_sdm845_probe(struct platform_device *pdev)
 {
-	struct clk *clk;
 	struct regmap *regmap;
-	int i, ret = 0;
+	int ret = 0;
 
 	regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
 	if (IS_ERR(regmap))
@@ -4307,13 +4316,6 @@
 	if (ret)
 		return ret;
 
-	/* Register the dummy measurement clocks */
-	for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
-		clk = devm_clk_register(&pdev->dev, gcc_sdm845_hws[i]);
-		if (IS_ERR(clk))
-			return PTR_ERR(clk);
-	}
-
 	ret = qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register GCC clocks\n");
diff --git a/drivers/clk/qcom/gcc-sdxpoorwills.c b/drivers/clk/qcom/gcc-sdxpoorwills.c
index b52002f..4e6599c 100644
--- a/drivers/clk/qcom/gcc-sdxpoorwills.c
+++ b/drivers/clk/qcom/gcc-sdxpoorwills.c
@@ -143,11 +143,6 @@
 			.parent_names = (const char *[]){ "bi_tcxo" },
 			.num_parents = 1,
 			.ops = &clk_trion_fixed_pll_ops,
-			VDD_CX_FMAX_MAP4(
-				MIN, 615000000,
-				LOW, 1066000000,
-				LOW_L1, 1600000000,
-				NOMINAL, 2000000000),
 		},
 	},
 };
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index 0899138..0d7ed80 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,10 @@
 #define PWR_ON_MASK		BIT(31)
 #define CLK_DIS_WAIT_MASK	(0xF << 12)
 #define CLK_DIS_WAIT_SHIFT	(12)
+#define EN_FEW_WAIT_MASK	(0xF << 16)
+#define EN_FEW_WAIT_SHIFT	(16)
+#define EN_REST_WAIT_MASK	(0xF << 20)
+#define EN_REST_WAIT_SHIFT	(20)
 #define SW_OVERRIDE_MASK	BIT(2)
 #define HW_CONTROL_MASK		BIT(1)
 #define SW_COLLAPSE_MASK	BIT(0)
@@ -535,6 +539,7 @@
 	struct resource *res;
 	struct gdsc *sc;
 	uint32_t regval, clk_dis_wait_val = 0;
+	uint32_t en_few_wait_val, en_rest_wait_val;
 	bool retain_mem, retain_periph, support_hw_trigger, prop_val;
 	int i, ret;
 	u32 timeout;
@@ -681,6 +686,22 @@
 		regval |= clk_dis_wait_val;
 	}
 
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,en-few-wait-val",
+				  &en_few_wait_val)) {
+		en_few_wait_val <<= EN_FEW_WAIT_SHIFT;
+
+		regval &= ~(EN_FEW_WAIT_MASK);
+		regval |= en_few_wait_val;
+	}
+
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,en-rest-wait-val",
+				  &en_rest_wait_val)) {
+		en_rest_wait_val <<= EN_REST_WAIT_SHIFT;
+
+		regval &= ~(EN_REST_WAIT_MASK);
+		regval |= en_rest_wait_val;
+	}
+
 	regmap_write(sc->regmap, REG_OFFSET, regval);
 
 	sc->no_status_check_on_disable =
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 00e6aba..c55d5fe 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -271,11 +271,14 @@
 	unsigned int extal_div;
 	unsigned int pll1_mult;
 	unsigned int pll3_mult;
+	unsigned int pll0_mult;		/* For R-Car V2H and E2 only */
 };
 
 static const struct cpg_pll_config cpg_pll_configs[8] __initconst = {
-	{ 1, 208, 106 }, { 1, 208,  88 }, { 1, 156,  80 }, { 1, 156,  66 },
-	{ 2, 240, 122 }, { 2, 240, 102 }, { 2, 208, 106 }, { 2, 208,  88 },
+	{ 1, 208, 106, 200 }, { 1, 208,  88, 200 },
+	{ 1, 156,  80, 150 }, { 1, 156,  66, 150 },
+	{ 2, 240, 122, 230 }, { 2, 240, 102, 230 },
+	{ 2, 208, 106, 200 }, { 2, 208,  88, 200 },
 };
 
 /* SDHI divisors */
@@ -297,6 +300,12 @@
 
 static u32 cpg_mode __initdata;
 
+static const char * const pll0_mult_match[] = {
+	"renesas,r8a7792-cpg-clocks",
+	"renesas,r8a7794-cpg-clocks",
+	NULL
+};
+
 static struct clk * __init
 rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
 			     const struct cpg_pll_config *config,
@@ -317,9 +326,15 @@
 		 * clock implementation and we currently have no need to change
 		 * the multiplier value.
 		 */
-		u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+		if (of_device_compatible_match(np, pll0_mult_match)) {
+			/* R-Car V2H and E2 do not have PLL0CR */
+			mult = config->pll0_mult;
+			div = 3;
+		} else {
+			u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+			mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
+		}
 		parent_name = "main";
-		mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
 	} else if (!strcmp(name, "pll1")) {
 		parent_name = "main";
 		mult = config->pll1_mult / 2;
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index eea38f6..3892346 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -46,7 +46,7 @@
 	unsigned int shift;
 };
 
-static struct div4_clk div4_clks[] = {
+static const struct div4_clk div4_clks[] = {
 	{ "zg", "pll0", CPG_FRQCRA, 16 },
 	{ "m3", "pll1", CPG_FRQCRA, 12 },
 	{ "b",  "pll1", CPG_FRQCRA,  8 },
@@ -79,7 +79,7 @@
 {
 	const struct clk_div_table *table = NULL;
 	unsigned int shift, reg, width;
-	const char *parent_name;
+	const char *parent_name = NULL;
 	unsigned int mult = 1;
 	unsigned int div = 1;
 
@@ -135,7 +135,7 @@
 		shift = 24;
 		width = 5;
 	} else {
-		struct div4_clk *c;
+		const struct div4_clk *c;
 
 		for (c = div4_clks; c->name; c++) {
 			if (!strcmp(name, c->name)) {
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 077fcdc..fe7d9ed 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -58,6 +58,12 @@
 	u16 degrees;
 	u32 delay_num = 0;
 
+	/* See the comment for rockchip_mmc_set_phase below */
+	if (!rate) {
+		pr_err("%s: invalid clk rate\n", __func__);
+		return -EINVAL;
+	}
+
 	raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
 
 	degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
@@ -84,6 +90,23 @@
 	u32 raw_value;
 	u32 delay;
 
+	/*
+	 * The below calculation is based on the output clock from
+	 * MMC host to the card, which expects the phase clock inherits
+	 * the clock rate from its parent, namely the output clock
+	 * provider of MMC host. However, things may go wrong if
+	 * (1) It is orphan.
+	 * (2) It is assigned to the wrong parent.
+	 *
+	 * This check help debug the case (1), which seems to be the
+	 * most likely problem we often face and which makes it difficult
+	 * for people to debug unstable mmc tuning results.
+	 */
+	if (!rate) {
+		pr_err("%s: invalid clk rate\n", __func__);
+		return -EINVAL;
+	}
+
 	nineties = degrees / 90;
 	remainder = (degrees % 90);
 
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index db6e5a9..53f16ef 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -369,7 +369,7 @@
 			RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS,
 			RK2928_CLKGATE_CON(2), 15, GFLAGS),
 
-	COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+	COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
 			RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS,
 			RK2928_CLKGATE_CON(2), 11, GFLAGS),
 
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 1b81e28..ed36728 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -698,7 +698,7 @@
 	PLL_36XX_RATE(144000000,  96, 2, 3,     0),
 	PLL_36XX_RATE( 96000000, 128, 2, 4,     0),
 	PLL_36XX_RATE( 84000000, 112, 2, 4,     0),
-	PLL_36XX_RATE( 80000004, 106, 2, 4, 43691),
+	PLL_36XX_RATE( 80000003, 106, 2, 4, 43691),
 	PLL_36XX_RATE( 73728000,  98, 2, 4, 19923),
 	PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
 	PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
@@ -734,7 +734,7 @@
 	PLL_36XX_RATE(148352005,  98, 2, 3, 59070),
 	PLL_36XX_RATE(108000000, 144, 2, 4,     0),
 	PLL_36XX_RATE( 74250000,  99, 2, 4,     0),
-	PLL_36XX_RATE( 74176002,  98, 3, 4, 59070),
+	PLL_36XX_RATE( 74176002,  98, 2, 4, 59070),
 	PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
 	PLL_36XX_RATE( 54000000, 144, 2, 5,     0),
 	{ /* sentinel */ }
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 27a227d..6a0cb8a 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -711,13 +711,13 @@
 	/* sorted in descending order */
 	/* PLL_36XX_RATE(rate, m, p, s, k) */
 	PLL_36XX_RATE(192000000, 64, 2, 2, 0),
-	PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
+	PLL_36XX_RATE(180633605, 90, 3, 2, 20762),
 	PLL_36XX_RATE(180000000, 90, 3, 2, 0),
 	PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
-	PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
+	PLL_36XX_RATE(67737602, 90, 2, 4, 20762),
 	PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
-	PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
-	PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
+	PLL_36XX_RATE(45158401, 90, 3, 4, 20762),
+	PLL_36XX_RATE(32768001, 131, 3, 5, 4719),
 	{ },
 };
 
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index fd1d9bf..8eae175 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -65,7 +65,7 @@
 	PLL_36XX_RATE(480000000, 160, 2, 2, 0),
 	PLL_36XX_RATE(432000000, 144, 2, 2, 0),
 	PLL_36XX_RATE(400000000, 200, 3, 2, 0),
-	PLL_36XX_RATE(394073130, 459, 7, 2, 49282),
+	PLL_36XX_RATE(394073128, 459, 7, 2, 49282),
 	PLL_36XX_RATE(333000000, 111, 2, 2, 0),
 	PLL_36XX_RATE(300000000, 100, 2, 2, 0),
 	PLL_36XX_RATE(266000000, 266, 3, 3, 0),
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 2fe0573..09cdd35 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -725,7 +725,7 @@
 	PLL_35XX_RATE(800000000U,  400, 6,  1),
 	PLL_35XX_RATE(733000000U,  733, 12, 1),
 	PLL_35XX_RATE(700000000U,  175, 3,  1),
-	PLL_35XX_RATE(667000000U,  222, 4,  1),
+	PLL_35XX_RATE(666000000U,  222, 4,  1),
 	PLL_35XX_RATE(633000000U,  211, 4,  1),
 	PLL_35XX_RATE(600000000U,  500, 5,  2),
 	PLL_35XX_RATE(552000000U,  460, 5,  2),
@@ -751,12 +751,12 @@
 /* AUD_PLL */
 static const struct samsung_pll_rate_table exynos5443_aud_pll_rates[] __initconst = {
 	PLL_36XX_RATE(400000000U, 200, 3, 2,      0),
-	PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
+	PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
 	PLL_36XX_RATE(384000000U, 128, 2, 2,      0),
-	PLL_36XX_RATE(368640000U, 246, 4, 2, -15729),
-	PLL_36XX_RATE(361507200U, 181, 3, 2, -16148),
-	PLL_36XX_RATE(338688000U, 113, 2, 2,  -6816),
-	PLL_36XX_RATE(294912000U,  98, 1, 3,  19923),
+	PLL_36XX_RATE(368639991U, 246, 4, 2, -15729),
+	PLL_36XX_RATE(361507202U, 181, 3, 2, -16148),
+	PLL_36XX_RATE(338687988U, 113, 2, 2,  -6816),
+	PLL_36XX_RATE(294912002U,  98, 1, 3,  19923),
 	PLL_36XX_RATE(288000000U,  96, 1, 3,      0),
 	PLL_36XX_RATE(252000000U,  84, 1, 3,      0),
 	{ /* sentinel */ }
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 5931a41..bbfa57b 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -140,7 +140,7 @@
 };
 
 static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = {
-	PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
+	PLL_36XX_RATE(491519897, 20, 1, 0, 31457),
 	{},
 };
 
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index d7a1e77..5f50037 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -168,7 +168,7 @@
 	PLL_35XX_RATE(226000000, 105, 1, 1),
 	PLL_35XX_RATE(210000000, 132, 2, 1),
 	/* 2410 common */
-	PLL_35XX_RATE(203000000, 161, 3, 1),
+	PLL_35XX_RATE(202800000, 161, 3, 1),
 	PLL_35XX_RATE(192000000, 88, 1, 1),
 	PLL_35XX_RATE(186000000, 85, 1, 1),
 	PLL_35XX_RATE(180000000, 82, 1, 1),
@@ -178,18 +178,18 @@
 	PLL_35XX_RATE(147000000, 90, 2, 1),
 	PLL_35XX_RATE(135000000, 82, 2, 1),
 	PLL_35XX_RATE(124000000, 116, 1, 2),
-	PLL_35XX_RATE(118000000, 150, 2, 2),
+	PLL_35XX_RATE(118500000, 150, 2, 2),
 	PLL_35XX_RATE(113000000, 105, 1, 2),
-	PLL_35XX_RATE(101000000, 127, 2, 2),
+	PLL_35XX_RATE(101250000, 127, 2, 2),
 	PLL_35XX_RATE(90000000, 112, 2, 2),
-	PLL_35XX_RATE(85000000, 105, 2, 2),
+	PLL_35XX_RATE(84750000, 105, 2, 2),
 	PLL_35XX_RATE(79000000, 71, 1, 2),
-	PLL_35XX_RATE(68000000, 82, 2, 2),
-	PLL_35XX_RATE(56000000, 142, 2, 3),
+	PLL_35XX_RATE(67500000, 82, 2, 2),
+	PLL_35XX_RATE(56250000, 142, 2, 3),
 	PLL_35XX_RATE(48000000, 120, 2, 3),
-	PLL_35XX_RATE(51000000, 161, 3, 3),
+	PLL_35XX_RATE(50700000, 161, 3, 3),
 	PLL_35XX_RATE(45000000, 82, 1, 3),
-	PLL_35XX_RATE(34000000, 82, 2, 3),
+	PLL_35XX_RATE(33750000, 82, 2, 3),
 	{ /* sentinel */ },
 };
 
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index b385536..66d1fc7 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1145,6 +1145,8 @@
 	.enable = clk_pllu_enable,
 	.disable = clk_pll_disable,
 	.recalc_rate = clk_pll_recalc_rate,
+	.round_rate = clk_pll_round_rate,
+	.set_rate = clk_pll_set_rate,
 };
 
 static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index cd6d307..91cf857 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -331,6 +331,15 @@
 	  This option enables support for reading the ARM architected timer's
 	  virtual counter in userspace.
 
+config MSM_TIMER_LEAP
+	bool "ARCH TIMER counter rollover"
+	default n
+	depends on ARM_ARCH_TIMER && ARM64
+	help
+	  This option enables a check for least significant 32 bits of
+	  counter rollover. On every counter read if least significant
+	  32 bits are set, reread counter.
+
 config ARM_GLOBAL_TIMER
 	bool "Support for the ARM global timer" if COMPILE_TEST
 	select CLKSRC_OF if OF
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 738515b..a22c1d7 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -281,7 +281,7 @@
 
 static unsigned long __init ftm_clk_init(struct device_node *np)
 {
-	unsigned long freq;
+	long freq;
 
 	freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
 	if (freq <= 0)
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 597aa57..379080c 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -45,6 +45,15 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_TIMES
+       bool "CPU frequency time-in-state statistics"
+       default y
+       help
+         This driver exports CPU time-in-state information through procfs file
+         system.
+
+         If in doubt, say N.
+
 choice
 	prompt "Default CPUFreq governor"
 	default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index bf98b28..ba9f9405 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -4,7 +4,10 @@
 # CPUfreq stats
 obj-$(CONFIG_CPU_FREQ_STAT)             += cpufreq_stats.o
 
-# CPUfreq governors 
+# CPUfreq times
+obj-$(CONFIG_CPU_FREQ_TIMES)		+= cpufreq_times.o
+
+# CPUfreq governors
 obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE)	+= cpufreq_performance.o
 obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE)	+= cpufreq_powersave.o
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)	+= cpufreq_userspace.o
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 4852d9e..9f09752 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -151,9 +151,19 @@
 	policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
 	policy->shared_type = cpu->shared_type;
 
-	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
+		int i;
+
 		cpumask_copy(policy->cpus, cpu->shared_cpu_map);
-	else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
+
+		for_each_cpu(i, policy->cpus) {
+			if (unlikely(i == policy->cpu))
+				continue;
+
+			memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
+			       sizeof(cpu->perf_caps));
+		}
+	} else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
 		/* Support only SW_ANY for now. */
 		pr_debug("Unsupported CPU co-ord type\n");
 		return -EFAULT;
@@ -218,8 +228,13 @@
 	return ret;
 
 out:
-	for_each_possible_cpu(i)
-		kfree(all_cpu_data[i]);
+	for_each_possible_cpu(i) {
+		cpu = all_cpu_data[i];
+		if (!cpu)
+			break;
+		free_cpumask_var(cpu->shared_cpu_map);
+		kfree(cpu);
+	}
 
 	kfree(all_cpu_data);
 	return -ENODEV;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8059ef9..f474e70 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -19,6 +19,7 @@
 
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/init.h>
@@ -444,6 +445,7 @@
 			 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
 		trace_cpu_frequency(freqs->new, freqs->cpu);
 		cpufreq_stats_record_transition(policy, freqs->new);
+		cpufreq_times_record_transition(freqs);
 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
 				CPUFREQ_POSTCHANGE, freqs);
 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
@@ -1373,6 +1375,7 @@
 			goto out_exit_policy;
 
 		cpufreq_stats_create_table(policy);
+		cpufreq_times_create_policy(policy);
 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 				CPUFREQ_CREATE_POLICY, policy);
 
@@ -1406,14 +1409,14 @@
 	return 0;
 
 out_exit_policy:
+	for_each_cpu(j, policy->real_cpus)
+		remove_cpu_dev_symlink(policy, get_cpu_device(j));
+
 	up_write(&policy->rwsem);
 
 	if (cpufreq_driver->exit)
 		cpufreq_driver->exit(policy);
 
-	for_each_cpu(j, policy->real_cpus)
-		remove_cpu_dev_symlink(policy, get_cpu_device(j));
-
 out_free_policy:
 	cpufreq_policy_free(policy, !new_policy);
 	return ret;
@@ -1503,6 +1506,9 @@
 		policy->freq_table = NULL;
 	}
 
+	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+			CPUFREQ_STOP, policy);
+
 unlock:
 	up_write(&policy->rwsem);
 	return 0;
@@ -1651,7 +1657,10 @@
 
 	if (policy) {
 		down_read(&policy->rwsem);
-		ret_freq = __cpufreq_get(policy);
+
+		if (!policy_is_inactive(policy))
+			ret_freq = __cpufreq_get(policy);
+
 		up_read(&policy->rwsem);
 
 		cpufreq_cpu_put(policy);
@@ -2394,6 +2403,11 @@
 
 	down_write(&policy->rwsem);
 
+	if (policy_is_inactive(policy)) {
+		ret = -ENODEV;
+		goto unlock;
+	}
+
 	pr_debug("updating policy for CPU %u\n", cpu);
 	memcpy(&new_policy, policy, sizeof(*policy));
 	new_policy.min = policy->user_policy.min;
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
new file mode 100644
index 0000000..6254f45
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -0,0 +1,461 @@
+/* drivers/cpufreq/cpufreq_times.c
+ *
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
+#include <linux/cputime.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+
+#define UID_HASH_BITS 10
+
+static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
+
+static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
+static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
+
+struct uid_entry {
+	uid_t uid;
+	unsigned int max_state;
+	struct hlist_node hash;
+	struct rcu_head rcu;
+	u64 time_in_state[0];
+};
+
+/**
+ * struct cpu_freqs - per-cpu frequency information
+ * @offset: start of these freqs' stats in task time_in_state array
+ * @max_state: number of entries in freq_table
+ * @last_index: index in freq_table of last frequency switched to
+ * @freq_table: list of available frequencies
+ */
+struct cpu_freqs {
+	unsigned int offset;
+	unsigned int max_state;
+	unsigned int last_index;
+	unsigned int freq_table[0];
+};
+
+static struct cpu_freqs *all_freqs[NR_CPUS];
+
+static unsigned int next_offset;
+
+
+/* Caller must hold rcu_read_lock() */
+static struct uid_entry *find_uid_entry_rcu(uid_t uid)
+{
+	struct uid_entry *uid_entry;
+
+	hash_for_each_possible_rcu(uid_hash_table, uid_entry, hash, uid) {
+		if (uid_entry->uid == uid)
+			return uid_entry;
+	}
+	return NULL;
+}
+
+/* Caller must hold uid lock */
+static struct uid_entry *find_uid_entry_locked(uid_t uid)
+{
+	struct uid_entry *uid_entry;
+
+	hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) {
+		if (uid_entry->uid == uid)
+			return uid_entry;
+	}
+	return NULL;
+}
+
+/* Caller must hold uid lock */
+static struct uid_entry *find_or_register_uid_locked(uid_t uid)
+{
+	struct uid_entry *uid_entry, *temp;
+	unsigned int max_state = READ_ONCE(next_offset);
+	size_t alloc_size = sizeof(*uid_entry) + max_state *
+		sizeof(uid_entry->time_in_state[0]);
+
+	uid_entry = find_uid_entry_locked(uid);
+	if (uid_entry) {
+		if (uid_entry->max_state == max_state)
+			return uid_entry;
+		/* uid_entry->time_in_state is too small to track all freqs, so
+		 * expand it.
+		 */
+		temp = __krealloc(uid_entry, alloc_size, GFP_ATOMIC);
+		if (!temp)
+			return uid_entry;
+		temp->max_state = max_state;
+		memset(temp->time_in_state + uid_entry->max_state, 0,
+		       (max_state - uid_entry->max_state) *
+		       sizeof(uid_entry->time_in_state[0]));
+		if (temp != uid_entry) {
+			hlist_replace_rcu(&uid_entry->hash, &temp->hash);
+			kfree_rcu(uid_entry, rcu);
+		}
+		return temp;
+	}
+
+	uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
+	if (!uid_entry)
+		return NULL;
+
+	uid_entry->uid = uid;
+	uid_entry->max_state = max_state;
+
+	hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
+
+	return uid_entry;
+}
+
+static bool freq_index_invalid(unsigned int index)
+{
+	unsigned int cpu;
+	struct cpu_freqs *freqs;
+
+	for_each_possible_cpu(cpu) {
+		freqs = all_freqs[cpu];
+		if (!freqs || index < freqs->offset ||
+		    freqs->offset + freqs->max_state <= index)
+			continue;
+		return freqs->freq_table[index - freqs->offset] ==
+			CPUFREQ_ENTRY_INVALID;
+	}
+	return true;
+}
+
+static int single_uid_time_in_state_show(struct seq_file *m, void *ptr)
+{
+	struct uid_entry *uid_entry;
+	unsigned int i;
+	u64 time;
+	uid_t uid = from_kuid_munged(current_user_ns(), *(kuid_t *)m->private);
+
+	if (uid == overflowuid)
+		return -EINVAL;
+
+	rcu_read_lock();
+
+	uid_entry = find_uid_entry_rcu(uid);
+	if (!uid_entry) {
+		rcu_read_unlock();
+		return 0;
+	}
+
+	for (i = 0; i < uid_entry->max_state; ++i) {
+		if (freq_index_invalid(i))
+			continue;
+		time = cputime_to_clock_t(uid_entry->time_in_state[i]);
+		seq_write(m, &time, sizeof(time));
+	}
+
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static void *uid_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	if (*pos >= HASH_SIZE(uid_hash_table))
+		return NULL;
+
+	return &uid_hash_table[*pos];
+}
+
+static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	(*pos)++;
+
+	if (*pos >= HASH_SIZE(uid_hash_table))
+		return NULL;
+
+	return &uid_hash_table[*pos];
+}
+
+static void uid_seq_stop(struct seq_file *seq, void *v) { }
+
+static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
+{
+	struct uid_entry *uid_entry;
+	struct cpu_freqs *freqs, *last_freqs = NULL;
+	int i, cpu;
+
+	if (v == uid_hash_table) {
+		seq_puts(m, "uid:");
+		for_each_possible_cpu(cpu) {
+			freqs = all_freqs[cpu];
+			if (!freqs || freqs == last_freqs)
+				continue;
+			last_freqs = freqs;
+			for (i = 0; i < freqs->max_state; i++) {
+				if (freqs->freq_table[i] ==
+				    CPUFREQ_ENTRY_INVALID)
+					continue;
+				seq_printf(m, " %d", freqs->freq_table[i]);
+			}
+		}
+		seq_putc(m, '\n');
+	}
+
+	rcu_read_lock();
+
+	hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
+		if (uid_entry->max_state)
+			seq_printf(m, "%d:", uid_entry->uid);
+		for (i = 0; i < uid_entry->max_state; ++i) {
+			if (freq_index_invalid(i))
+				continue;
+			seq_printf(m, " %lu", (unsigned long)cputime_to_clock_t(
+					   uid_entry->time_in_state[i]));
+		}
+		if (uid_entry->max_state)
+			seq_putc(m, '\n');
+	}
+
+	rcu_read_unlock();
+	return 0;
+}
+
+void cpufreq_task_times_init(struct task_struct *p)
+{
+	void *temp;
+	unsigned long flags;
+	unsigned int max_state;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	p->time_in_state = NULL;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	p->max_state = 0;
+
+	max_state = READ_ONCE(next_offset);
+
+	/* We use one array to avoid multiple allocs per task */
+	temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
+	if (!temp)
+		return;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	p->time_in_state = temp;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	p->max_state = max_state;
+}
+
+/* Caller must hold task_time_in_state_lock */
+static int cpufreq_task_times_realloc_locked(struct task_struct *p)
+{
+	void *temp;
+	unsigned int max_state = READ_ONCE(next_offset);
+
+	temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC);
+	if (!temp)
+		return -ENOMEM;
+	p->time_in_state = temp;
+	memset(p->time_in_state + p->max_state, 0,
+	       (max_state - p->max_state) * sizeof(u64));
+	p->max_state = max_state;
+	return 0;
+}
+
+void cpufreq_task_times_exit(struct task_struct *p)
+{
+	unsigned long flags;
+	void *temp;
+
+	if (!p->time_in_state)
+		return;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	temp = p->time_in_state;
+	p->time_in_state = NULL;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	kfree(temp);
+}
+
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+	struct pid *pid, struct task_struct *p)
+{
+	unsigned int cpu, i;
+	cputime_t cputime;
+	unsigned long flags;
+	struct cpu_freqs *freqs;
+	struct cpu_freqs *last_freqs = NULL;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	for_each_possible_cpu(cpu) {
+		freqs = all_freqs[cpu];
+		if (!freqs || freqs == last_freqs)
+			continue;
+		last_freqs = freqs;
+
+		seq_printf(m, "cpu%u\n", cpu);
+		for (i = 0; i < freqs->max_state; i++) {
+			if (freqs->freq_table[i] == CPUFREQ_ENTRY_INVALID)
+				continue;
+			cputime = 0;
+			if (freqs->offset + i < p->max_state &&
+			    p->time_in_state)
+				cputime = p->time_in_state[freqs->offset + i];
+			seq_printf(m, "%u %lu\n", freqs->freq_table[i],
+				   (unsigned long)cputime_to_clock_t(cputime));
+		}
+	}
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	return 0;
+}
+
+void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime)
+{
+	unsigned long flags;
+	unsigned int state;
+	struct uid_entry *uid_entry;
+	struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+	uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
+
+	if (!freqs || p->flags & PF_EXITING)
+		return;
+
+	state = freqs->offset + READ_ONCE(freqs->last_index);
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
+	    p->time_in_state)
+		p->time_in_state[state] += cputime;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+
+	spin_lock_irqsave(&uid_lock, flags);
+	uid_entry = find_or_register_uid_locked(uid);
+	if (uid_entry && state < uid_entry->max_state)
+		uid_entry->time_in_state[state] += cputime;
+	spin_unlock_irqrestore(&uid_lock, flags);
+}
+
+void cpufreq_times_create_policy(struct cpufreq_policy *policy)
+{
+	int cpu, index;
+	unsigned int count = 0;
+	struct cpufreq_frequency_table *pos, *table;
+	struct cpu_freqs *freqs;
+	void *tmp;
+
+	if (all_freqs[policy->cpu])
+		return;
+
+	table = policy->freq_table;
+	if (!table)
+		return;
+
+	cpufreq_for_each_entry(pos, table)
+		count++;
+
+	tmp =  kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count,
+		       GFP_KERNEL);
+	if (!tmp)
+		return;
+
+	freqs = tmp;
+	freqs->max_state = count;
+
+	index = cpufreq_frequency_table_get_index(policy, policy->cur);
+	if (index >= 0)
+		WRITE_ONCE(freqs->last_index, index);
+
+	cpufreq_for_each_entry(pos, table)
+		freqs->freq_table[pos - table] = pos->frequency;
+
+	freqs->offset = next_offset;
+	WRITE_ONCE(next_offset, freqs->offset + count);
+	for_each_cpu(cpu, policy->related_cpus)
+		all_freqs[cpu] = freqs;
+}
+
+void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
+{
+	struct uid_entry *uid_entry;
+	struct hlist_node *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&uid_lock, flags);
+
+	for (; uid_start <= uid_end; uid_start++) {
+		hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp,
+			hash, uid_start) {
+			if (uid_start == uid_entry->uid) {
+				hash_del_rcu(&uid_entry->hash);
+				kfree_rcu(uid_entry, rcu);
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&uid_lock, flags);
+}
+
+void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
+{
+	int index;
+	struct cpu_freqs *freqs = all_freqs[freq->cpu];
+	struct cpufreq_policy *policy;
+
+	if (!freqs)
+		return;
+
+	policy = cpufreq_cpu_get(freq->cpu);
+	if (!policy)
+		return;
+
+	index = cpufreq_frequency_table_get_index(policy, freq->new);
+	if (index >= 0)
+		WRITE_ONCE(freqs->last_index, index);
+
+	cpufreq_cpu_put(policy);
+}
+
+static const struct seq_operations uid_time_in_state_seq_ops = {
+	.start = uid_seq_start,
+	.next = uid_seq_next,
+	.stop = uid_seq_stop,
+	.show = uid_time_in_state_seq_show,
+};
+
+static int uid_time_in_state_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &uid_time_in_state_seq_ops);
+}
+
+int single_uid_time_in_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, single_uid_time_in_state_show,
+			&(inode->i_uid));
+}
+
+static const struct file_operations uid_time_in_state_fops = {
+	.open		= uid_time_in_state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int __init cpufreq_times_init(void)
+{
+	proc_create_data("uid_time_in_state", 0444, NULL,
+			 &uid_time_in_state_fops, NULL);
+
+	return 0;
+}
+
+early_initcall(cpufreq_times_init);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 6fb3cd2..a1d7fa4 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -599,6 +599,16 @@
 
 	if (!spin_trylock(&gpstates->gpstate_lock))
 		return;
+	/*
+	 * If the timer has migrated to the different cpu then bring
+	 * it back to one of the policy->cpus
+	 */
+	if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
+		gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
+		add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
+		spin_unlock(&gpstates->gpstate_lock);
+		return;
+	}
 
 	gpstates->last_sampled_time += time_diff;
 	gpstates->elapsed_time += time_diff;
@@ -626,10 +636,8 @@
 	gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id);
 	gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id);
 
+	set_pstate(&freq_data);
 	spin_unlock(&gpstates->gpstate_lock);
-
-	/* Timer may get migrated to a different cpu on cpu hot unplug */
-	smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
 }
 
 /*
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 21340e0..f521448 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -4,6 +4,7 @@
 config ARM_CPUIDLE
         bool "Generic ARM/ARM64 CPU idle Driver"
         select DT_IDLE_STATES
+	select CPU_IDLE_MULTIPLE_DRIVERS
         help
           Select this to enable generic cpuidle driver for ARM.
           It provides a generic idle driver whose idle states are configured
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index cb3c48a..99ad22e 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -27,4 +27,8 @@
 # POWERPC drivers
 obj-$(CONFIG_PSERIES_CPUIDLE)		+= cpuidle-pseries.o
 obj-$(CONFIG_POWERNV_CPUIDLE)		+= cpuidle-powernv.o
+ifeq ($(CONFIG_MSM_PM_LEGACY), y)
+obj-y += lpm-levels-legacy.o lpm-levels-of-legacy.o lpm-workarounds.o
+else
 obj-$(CONFIG_MSM_PM) += lpm-levels.o lpm-levels-of.o
+endif
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index f440d38..f47c545 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/slab.h>
+#include <linux/topology.h>
 
 #include <asm/cpuidle.h>
 
@@ -44,7 +45,7 @@
 	return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
 }
 
-static struct cpuidle_driver arm_idle_driver = {
+static struct cpuidle_driver arm_idle_driver __initdata = {
 	.name = "arm_idle",
 	.owner = THIS_MODULE,
 	/*
@@ -80,30 +81,42 @@
 static int __init arm_idle_init(void)
 {
 	int cpu, ret;
-	struct cpuidle_driver *drv = &arm_idle_driver;
+	struct cpuidle_driver *drv;
 	struct cpuidle_device *dev;
 
-	/*
-	 * Initialize idle states data, starting at index 1.
-	 * This driver is DT only, if no DT idle states are detected (ret == 0)
-	 * let the driver initialization fail accordingly since there is no
-	 * reason to initialize the idle driver if only wfi is supported.
-	 */
-	ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
-	if (ret <= 0)
-		return ret ? : -ENODEV;
-
-	ret = cpuidle_register_driver(drv);
-	if (ret) {
-		pr_err("Failed to register cpuidle driver\n");
-		return ret;
-	}
-
-	/*
-	 * Call arch CPU operations in order to initialize
-	 * idle states suspend back-end specific data
-	 */
 	for_each_possible_cpu(cpu) {
+
+		drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
+		if (!drv) {
+			ret = -ENOMEM;
+			goto out_fail;
+		}
+
+		drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+		/*
+		 * Initialize idle states data, starting at index 1.  This
+		 * driver is DT only, if no DT idle states are detected (ret
+		 * == 0) let the driver initialization fail accordingly since
+		 * there is no reason to initialize the idle driver if only
+		 * wfi is supported.
+		 */
+		ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
+		if (ret <= 0) {
+			ret = ret ? : -ENODEV;
+			goto out_kfree_drv;
+		}
+
+		ret = cpuidle_register_driver(drv);
+		if (ret) {
+			pr_err("Failed to register cpuidle driver\n");
+			goto out_kfree_drv;
+		}
+
+		/*
+		 * Call arch CPU operations in order to initialize
+		 * idle states suspend back-end specific data
+		 */
 		ret = arm_cpuidle_init(cpu);
 
 		/*
@@ -115,14 +128,14 @@
 
 		if (ret) {
 			pr_err("CPU %d failed to init idle CPU ops\n", cpu);
-			goto out_fail;
+			goto out_unregister_drv;
 		}
 
 		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 		if (!dev) {
 			pr_err("Failed to allocate cpuidle device\n");
 			ret = -ENOMEM;
-			goto out_fail;
+			goto out_unregister_drv;
 		}
 		dev->cpu = cpu;
 
@@ -130,21 +143,28 @@
 		if (ret) {
 			pr_err("Failed to register cpuidle device for CPU %d\n",
 			       cpu);
-			kfree(dev);
-			goto out_fail;
+			goto out_kfree_dev;
 		}
 	}
 
 	return 0;
+
+out_kfree_dev:
+	kfree(dev);
+out_unregister_drv:
+	cpuidle_unregister_driver(drv);
+out_kfree_drv:
+	kfree(drv);
 out_fail:
 	while (--cpu >= 0) {
 		dev = per_cpu(cpuidle_devices, cpu);
+		drv = cpuidle_get_cpu_driver(dev);
 		cpuidle_unregister_device(dev);
+		cpuidle_unregister_driver(drv);
 		kfree(dev);
+		kfree(drv);
 	}
 
-	cpuidle_unregister_driver(drv);
-
 	return ret;
 }
 device_initcall(arm_idle_init);
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index a5c111b..ea11a33 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -174,8 +174,10 @@
 		if (!state_node)
 			break;
 
-		if (!of_device_is_available(state_node))
+		if (!of_device_is_available(state_node)) {
+			of_node_put(state_node);
 			continue;
+		}
 
 		if (!idle_state_valid(state_node, i, cpumask)) {
 			pr_warn("%s idle state not valid, bailing out\n",
diff --git a/drivers/cpuidle/lpm-levels-legacy.c b/drivers/cpuidle/lpm-levels-legacy.c
new file mode 100644
index 0000000..26cb52a
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-legacy.c
@@ -0,0 +1,1535 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/pm_qos.h>
+#include <linux/of_platform.h>
+#include <linux/smp.h>
+#include <linux/remote_spinlock.h>
+#include <linux/msm_remote_spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/coresight-cti.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/cpu_pm.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/event_timer.h>
+#include <soc/qcom/lpm-stats.h>
+#include <soc/qcom/lpm_levels.h>
+#include <soc/qcom/jtag.h>
+#include <asm/cputype.h>
+#include <asm/arch_timer.h>
+#include <asm/cacheflush.h>
+#include <asm/suspend.h>
+#include "lpm-levels-legacy.h"
+#include "lpm-workarounds.h"
+#include <trace/events/power.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_low_power.h>
+#if defined(CONFIG_COMMON_CLK)
+#include "../clk/clk.h"
+#elif defined(CONFIG_COMMON_CLK_MSM)
+#include "../../drivers/clk/msm/clock.h"
+#endif /* CONFIG_COMMON_CLK */
+#include <soc/qcom/minidump.h>
+
+#define SCLK_HZ (32768)
+#define SCM_HANDOFF_LOCK_ID "S:7"
+#define PSCI_POWER_STATE(reset) (reset << 30)
+#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
+static remote_spinlock_t scm_handoff_lock;
+
+enum {
+	MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
+	MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
+};
+
+enum debug_event {
+	CPU_ENTER,
+	CPU_EXIT,
+	CLUSTER_ENTER,
+	CLUSTER_EXIT,
+	PRE_PC_CB,
+	CPU_HP_STARTING,
+	CPU_HP_DYING,
+};
+
+struct lpm_debug {
+	cycle_t time;
+	enum debug_event evt;
+	int cpu;
+	uint32_t arg1;
+	uint32_t arg2;
+	uint32_t arg3;
+	uint32_t arg4;
+};
+
+static struct system_pm_ops *sys_pm_ops;
+struct lpm_cluster *lpm_root_node;
+
+static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
+static bool suspend_in_progress;
+static struct hrtimer lpm_hrtimer;
+static struct lpm_debug *lpm_debug;
+static phys_addr_t lpm_debug_phys;
+
+static const int num_dbg_elements = 0x100;
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t time);
+static void cluster_prepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t time);
+
+static bool menu_select;
+module_param_named(
+	menu_select, menu_select, bool, 0664
+);
+
+static bool print_parsed_dt;
+module_param_named(
+	print_parsed_dt, print_parsed_dt, bool, 0664
+);
+
+static bool sleep_disabled;
+module_param_named(sleep_disabled,
+	sleep_disabled, bool, 0664);
+
+s32 msm_cpuidle_get_deep_idle_latency(void)
+{
+	return 10;
+}
+EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
+
+uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
+{
+	if (sys_pm_ops)
+		return -EUSERS;
+
+	sys_pm_ops = pm_ops;
+
+	return 0;
+}
+
+static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
+					struct latency_level *lat_level)
+{
+	struct list_head *list;
+	struct lpm_cluster_level *level;
+	struct lpm_cluster *n;
+	struct power_params *pwr_params;
+	uint32_t latency = 0;
+	int i;
+
+	if (!cluster->list.next) {
+		for (i = 0; i < cluster->nlevels; i++) {
+			level = &cluster->levels[i];
+			pwr_params = &level->pwr;
+			if (lat_level->reset_level == level->reset_level) {
+				if ((latency > pwr_params->latency_us)
+						|| (!latency))
+					latency = pwr_params->latency_us;
+				break;
+			}
+		}
+	} else {
+		list_for_each(list, &cluster->parent->child) {
+			n = list_entry(list, typeof(*n), list);
+			if (lat_level->level_name) {
+				if (strcmp(lat_level->level_name,
+						 n->cluster_name))
+					continue;
+			}
+			for (i = 0; i < n->nlevels; i++) {
+				level = &n->levels[i];
+				pwr_params = &level->pwr;
+				if (lat_level->reset_level ==
+						level->reset_level) {
+					if ((latency > pwr_params->latency_us)
+								|| (!latency))
+						latency =
+						pwr_params->latency_us;
+					break;
+				}
+			}
+		}
+	}
+	return latency;
+}
+
+static uint32_t least_cpu_latency(struct list_head *child,
+				struct latency_level *lat_level)
+{
+	struct list_head *list;
+	struct lpm_cpu_level *level;
+	struct power_params *pwr_params;
+	struct lpm_cpu *cpu;
+	struct lpm_cluster *n;
+	uint32_t latency = 0;
+	int i;
+
+	list_for_each(list, child) {
+		n = list_entry(list, typeof(*n), list);
+		if (lat_level->level_name) {
+			if (strcmp(lat_level->level_name, n->cluster_name))
+				continue;
+		}
+		cpu = n->cpu;
+		for (i = 0; i < cpu->nlevels; i++) {
+			level = &cpu->levels[i];
+			pwr_params = &level->pwr;
+			if (lat_level->reset_level == level->reset_level) {
+				if ((latency > pwr_params->latency_us)
+							|| (!latency))
+					latency = pwr_params->latency_us;
+				break;
+			}
+		}
+	}
+	return latency;
+}
+
+static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
+							int affinity_level)
+{
+	struct lpm_cluster *n;
+
+	if ((cluster->aff_level == affinity_level)
+		|| ((cluster->cpu) && (affinity_level == 0)))
+		return cluster;
+	else if (!cluster->cpu) {
+		n =  list_entry(cluster->child.next, typeof(*n), list);
+		return cluster_aff_match(n, affinity_level);
+	} else
+		return NULL;
+}
+
+int lpm_get_latency(struct latency_level *level, uint32_t *latency)
+{
+	struct lpm_cluster *cluster;
+	uint32_t val;
+
+	if (!lpm_root_node) {
+		pr_err("%s: lpm_probe not completed\n", __func__);
+		return -EAGAIN;
+	}
+
+	if ((level->affinity_level < 0)
+		|| (level->affinity_level > lpm_root_node->aff_level)
+		|| (level->reset_level < LPM_RESET_LVL_RET)
+		|| (level->reset_level > LPM_RESET_LVL_PC)
+		|| !latency)
+		return -EINVAL;
+
+	cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
+	if (!cluster) {
+		pr_err("%s:No matching cluster found for affinity_level:%d\n",
+					__func__, level->affinity_level);
+		return -EINVAL;
+	}
+
+	if (level->affinity_level == 0)
+		val = least_cpu_latency(&cluster->parent->child, level);
+	else
+		val = least_cluster_latency(cluster, level);
+
+	if (!val) {
+		pr_err("%s:No mode with affinity_level:%d reset_level:%d\n",
+			__func__, level->affinity_level, level->reset_level);
+		return -EINVAL;
+	}
+
+	*latency = val;
+
+	return 0;
+}
+EXPORT_SYMBOL(lpm_get_latency);
+
+static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
+		uint32_t arg2, uint32_t arg3, uint32_t arg4)
+{
+	struct lpm_debug *dbg;
+	int idx;
+	static DEFINE_SPINLOCK(debug_lock);
+	static int pc_event_index;
+
+	if (!lpm_debug)
+		return;
+
+	spin_lock(&debug_lock);
+	idx = pc_event_index++;
+	dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
+
+	dbg->evt = event;
+	dbg->time = arch_counter_get_cntpct();
+	dbg->cpu = raw_smp_processor_id();
+	dbg->arg1 = arg1;
+	dbg->arg2 = arg2;
+	dbg->arg3 = arg3;
+	dbg->arg4 = arg4;
+	spin_unlock(&debug_lock);
+}
+
+static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
+{
+	return HRTIMER_NORESTART;
+}
+
+static void msm_pm_set_timer(uint32_t modified_time_us)
+{
+	u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
+	ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
+
+	lpm_hrtimer.function = lpm_hrtimer_cb;
+	hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+int set_l2_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level)
+{
+	int lpm = mode;
+	int rc = 0;
+	bool notify_rpm = level->notify_rpm;
+	struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
+			smp_processor_id())->lpm_dev;
+
+	if (cpu_ops->tz_flag & MSM_SCM_L2_OFF ||
+			cpu_ops->tz_flag & MSM_SCM_L2_GDHS)
+		coresight_cti_ctx_restore();
+
+	switch (mode) {
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_FASTPC:
+		if (level->no_cache_flush)
+			cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
+		else
+			cpu_ops->tz_flag = MSM_SCM_L2_OFF;
+		coresight_cti_ctx_save();
+		break;
+	case MSM_SPM_MODE_GDHS:
+		cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
+		coresight_cti_ctx_save();
+		break;
+	case MSM_SPM_MODE_CLOCK_GATING:
+	case MSM_SPM_MODE_RETENTION:
+	case MSM_SPM_MODE_DISABLED:
+		cpu_ops->tz_flag = MSM_SCM_L2_ON;
+		break;
+	default:
+		cpu_ops->tz_flag = MSM_SCM_L2_ON;
+		lpm = MSM_SPM_MODE_DISABLED;
+		break;
+	}
+
+	if (lpm_wa_get_skip_l2_spm())
+		rc = msm_spm_config_low_power_mode_addr(ops->spm, lpm,
+							notify_rpm);
+	else
+		rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm);
+
+	if (rc)
+		pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
+				__func__, lpm, rc);
+
+	return rc;
+}
+
+int set_l3_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level)
+{
+	bool notify_rpm = level->notify_rpm;
+	struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
+			smp_processor_id())->lpm_dev;
+
+	switch (mode) {
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_FASTPC:
+		cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF;
+		break;
+	default:
+		break;
+	}
+	return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
+}
+
+
+int set_system_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level)
+{
+	bool notify_rpm = level->notify_rpm;
+
+	return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
+}
+
+static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
+		struct lpm_cluster_level *level)
+{
+	struct low_power_ops *ops;
+
+	if (use_psci)
+		return 0;
+
+	ops = &cluster->lpm_dev[ndevice];
+	if (ops && ops->set_mode)
+		return ops->set_mode(ops, level->mode[ndevice],
+				level);
+	else
+		return -EINVAL;
+}
+
+static int cpu_power_select(struct cpuidle_device *dev,
+		struct lpm_cpu *cpu)
+{
+	int best_level = 0;
+	uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
+							dev->cpu);
+	s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
+	uint32_t modified_time_us = 0;
+	uint32_t next_event_us = 0;
+	int i;
+	uint32_t lvl_latency_us = 0;
+	uint32_t *residency = get_per_cpu_max_residency(dev->cpu);
+
+	if (!cpu)
+		return best_level;
+
+	if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us  < 0)
+		return 0;
+
+	next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
+
+	for (i = 0; i < cpu->nlevels; i++) {
+		struct lpm_cpu_level *level = &cpu->levels[i];
+		struct power_params *pwr_params = &level->pwr;
+		uint32_t next_wakeup_us = (uint32_t)sleep_us;
+		enum msm_pm_sleep_mode mode = level->mode;
+		bool allow;
+
+		allow = lpm_cpu_mode_allow(dev->cpu, i, true);
+
+		if (!allow)
+			continue;
+
+		lvl_latency_us = pwr_params->latency_us;
+
+		if (latency_us < lvl_latency_us)
+			break;
+
+		if (next_event_us) {
+			if (next_event_us < lvl_latency_us)
+				break;
+
+			if (((next_event_us - lvl_latency_us) < sleep_us) ||
+					(next_event_us < sleep_us))
+				next_wakeup_us = next_event_us - lvl_latency_us;
+		}
+
+		best_level = i;
+
+		if (next_event_us && next_event_us < sleep_us &&
+				(mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
+			modified_time_us
+				= next_event_us - lvl_latency_us;
+		else
+			modified_time_us = 0;
+
+		if (next_wakeup_us <= residency[i])
+			break;
+	}
+
+	if (modified_time_us)
+		msm_pm_set_timer(modified_time_us);
+
+	trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
+
+	return best_level;
+}
+
+static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
+		struct cpumask *mask, bool from_idle)
+{
+	int cpu;
+	int next_cpu = raw_smp_processor_id();
+	ktime_t next_event;
+	struct cpumask online_cpus_in_cluster;
+
+	next_event.tv64 = KTIME_MAX;
+	if (!from_idle) {
+		if (mask)
+			cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
+		return ~0ULL;
+	}
+
+	cpumask_and(&online_cpus_in_cluster,
+			&cluster->num_children_in_sync, cpu_online_mask);
+
+	for_each_cpu(cpu, &online_cpus_in_cluster) {
+		ktime_t *next_event_c;
+
+		next_event_c = get_next_event_cpu(cpu);
+		if (next_event_c->tv64 < next_event.tv64) {
+			next_event.tv64 = next_event_c->tv64;
+			next_cpu = cpu;
+		}
+	}
+
+	if (mask)
+		cpumask_copy(mask, cpumask_of(next_cpu));
+
+
+	if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
+		return ktime_to_us(ktime_sub(next_event, ktime_get()));
+	else
+		return 0;
+}
+
+static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
+{
+	int best_level = -1;
+	int i;
+	struct cpumask mask;
+	uint32_t latency_us = ~0U;
+	uint32_t sleep_us;
+
+	if (!cluster)
+		return -EINVAL;
+
+	sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle);
+
+	if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
+		latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
+							&mask);
+
+	/*
+	 * If atleast one of the core in the cluster is online, the cluster
+	 * low power modes should be determined by the idle characteristics
+	 * even if the last core enters the low power mode as a part of
+	 * hotplug.
+	 */
+
+	if (!from_idle && num_online_cpus() > 1 &&
+		cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
+		from_idle = true;
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *level = &cluster->levels[i];
+		struct power_params *pwr_params = &level->pwr;
+
+		if (!lpm_cluster_mode_allow(cluster, i, from_idle))
+			continue;
+
+		if (level->last_core_only &&
+			cpumask_weight(cpu_online_mask) > 1)
+			continue;
+
+		if (!cpumask_equal(&cluster->num_children_in_sync,
+					&level->num_cpu_votes))
+			continue;
+
+		if (from_idle && latency_us < pwr_params->latency_us)
+			break;
+
+		if (sleep_us < pwr_params->time_overhead_us)
+			break;
+
+		if (suspend_in_progress && from_idle && level->notify_rpm)
+			continue;
+
+		if (level->notify_rpm) {
+			if (!(sys_pm_ops && sys_pm_ops->sleep_allowed))
+				continue;
+			if (!sys_pm_ops->sleep_allowed())
+				continue;
+		}
+
+		best_level = i;
+
+		if (from_idle && sleep_us <= pwr_params->max_residency)
+			break;
+	}
+
+	return best_level;
+}
+
+static void cluster_notify(struct lpm_cluster *cluster,
+		struct lpm_cluster_level *level, bool enter)
+{
+	if (level->is_reset && enter)
+		cpu_cluster_pm_enter(cluster->aff_level);
+	else if (level->is_reset && !enter)
+		cpu_cluster_pm_exit(cluster->aff_level);
+}
+
+static unsigned int get_next_online_cpu(bool from_idle)
+{
+	unsigned int cpu;
+	ktime_t next_event;
+	unsigned int next_cpu = raw_smp_processor_id();
+
+	if (!from_idle)
+		return next_cpu;
+	next_event.tv64 = KTIME_MAX;
+	for_each_online_cpu(cpu) {
+		ktime_t *next_event_c;
+
+		next_event_c = get_next_event_cpu(cpu);
+		if (next_event_c->tv64 < next_event.tv64) {
+			next_event.tv64 = next_event_c->tv64;
+			next_cpu = cpu;
+		}
+	}
+	return next_cpu;
+}
+
+static int cluster_configure(struct lpm_cluster *cluster, int idx,
+		bool from_idle)
+{
+	struct lpm_cluster_level *level = &cluster->levels[idx];
+	struct cpumask cpumask;
+	unsigned int cpu;
+	int ret, i;
+
+	if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
+			|| is_IPI_pending(&cluster->num_children_in_sync)) {
+		return -EPERM;
+	}
+
+	if (idx != cluster->default_level) {
+		update_debug_pc_event(CLUSTER_ENTER, idx,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+		trace_cluster_enter(cluster->cluster_name, idx,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+		lpm_stats_cluster_enter(cluster->stats, idx);
+	}
+
+	for (i = 0; i < cluster->ndevices; i++) {
+		ret = set_device_mode(cluster, i, level);
+		if (ret)
+			goto failed_set_mode;
+	}
+
+	if (level->notify_rpm) {
+		struct cpumask *nextcpu;
+
+		cpu = get_next_online_cpu(from_idle);
+		cpumask_copy(&cpumask, cpumask_of(cpu));
+		nextcpu = level->disable_dynamic_routing ? NULL : &cpumask;
+
+		if (sys_pm_ops && sys_pm_ops->enter) {
+			ret = sys_pm_ops->enter(nextcpu);
+			if (ret)
+				goto failed_set_mode;
+		}
+
+		if (cluster->no_saw_devices && !use_psci)
+			msm_spm_set_rpm_hs(true);
+	}
+
+	/* Notify cluster enter event after successfully config completion */
+	cluster_notify(cluster, level, true);
+
+	cluster->last_level = idx;
+	return 0;
+
+failed_set_mode:
+
+	for (i = 0; i < cluster->ndevices; i++) {
+		int rc = 0;
+
+		level = &cluster->levels[cluster->default_level];
+		rc = set_device_mode(cluster, i, level);
+		WARN_ON(rc);
+	}
+	return ret;
+}
+
+static void cluster_prepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t start_time)
+{
+	int i;
+
+	if (!cluster)
+		return;
+
+	if (cluster->min_child_level > child_idx)
+		return;
+
+	spin_lock(&cluster->sync_lock);
+	cpumask_or(&cluster->num_children_in_sync, cpu,
+			&cluster->num_children_in_sync);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+		if (child_idx >= lvl->min_child_level)
+			cpumask_or(&lvl->num_cpu_votes, cpu,
+					&lvl->num_cpu_votes);
+	}
+
+	/*
+	 * cluster_select() does not make any configuration changes. So its ok
+	 * to release the lock here. If a core wakes up for a rude request,
+	 * it need not wait for another to finish its cluster selection and
+	 * configuration process
+	 */
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus))
+		goto failed;
+
+	i = cluster_select(cluster, from_idle);
+
+	if (i < 0)
+		goto failed;
+
+	if (cluster_configure(cluster, i, from_idle))
+		goto failed;
+
+	cluster->stats->sleep_time = start_time;
+	cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
+			from_idle, start_time);
+
+	spin_unlock(&cluster->sync_lock);
+
+	if (!use_psci) {
+		struct lpm_cluster_level *level = &cluster->levels[i];
+
+		if (level->notify_rpm)
+			if (sys_pm_ops && sys_pm_ops->update_wakeup)
+				sys_pm_ops->update_wakeup(from_idle);
+	}
+
+	return;
+failed:
+	spin_unlock(&cluster->sync_lock);
+	cluster->stats->sleep_time = 0;
+}
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t end_time)
+{
+	struct lpm_cluster_level *level;
+	bool first_cpu;
+	int last_level, i, ret;
+
+	if (!cluster)
+		return;
+
+	if (cluster->min_child_level > child_idx)
+		return;
+
+	spin_lock(&cluster->sync_lock);
+	last_level = cluster->default_level;
+	first_cpu = cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus);
+	cpumask_andnot(&cluster->num_children_in_sync,
+			&cluster->num_children_in_sync, cpu);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+		if (child_idx >= lvl->min_child_level)
+			cpumask_andnot(&lvl->num_cpu_votes,
+					&lvl->num_cpu_votes, cpu);
+	}
+
+	if (!first_cpu || cluster->last_level == cluster->default_level)
+		goto unlock_return;
+
+	if (cluster->stats->sleep_time)
+		cluster->stats->sleep_time = end_time -
+			cluster->stats->sleep_time;
+	lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
+
+	level = &cluster->levels[cluster->last_level];
+	if (level->notify_rpm) {
+		if (sys_pm_ops && sys_pm_ops->exit)
+			sys_pm_ops->exit();
+
+		/* If RPM bumps up CX to turbo, unvote CX turbo vote
+		 * during exit of rpm assisted power collapse to
+		 * reduce the power impact
+		 */
+		lpm_wa_cx_unvote_send();
+
+		if (cluster->no_saw_devices && !use_psci)
+			msm_spm_set_rpm_hs(false);
+
+	}
+
+	update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+	trace_cluster_exit(cluster->cluster_name, cluster->last_level,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+
+	last_level = cluster->last_level;
+	cluster->last_level = cluster->default_level;
+
+	for (i = 0; i < cluster->ndevices; i++) {
+		level = &cluster->levels[cluster->default_level];
+		ret = set_device_mode(cluster, i, level);
+
+		WARN_ON(ret);
+
+	}
+
+	cluster_notify(cluster, &cluster->levels[last_level], false);
+	cluster_unprepare(cluster->parent, &cluster->child_cpus,
+			last_level, from_idle, end_time);
+unlock_return:
+	spin_unlock(&cluster->sync_lock);
+}
+
+static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
+				bool from_idle)
+{
+	struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
+	bool jtag_save_restore =
+			cluster->cpu->levels[cpu_index].jtag_save_restore;
+
+	/* Use broadcast timer for aggregating sleep mode within a cluster.
+	 * A broadcast timer could be used in the following scenarios
+	 * 1) The architected timer HW gets reset during certain low power
+	 * modes and the core relies on a external(broadcast) timer to wake up
+	 * from sleep. This information is passed through device tree.
+	 * 2) The CPU low power mode could trigger a system low power mode.
+	 * The low power module relies on Broadcast timer to aggregate the
+	 * next wakeup within a cluster, in which case, CPU switches over to
+	 * use broadcast timer.
+	 */
+	if (from_idle && (cpu_level->use_bc_timer ||
+			(cpu_index >= cluster->min_child_level)))
+		tick_broadcast_enter();
+
+	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+		|| (cpu_level->mode ==
+			MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
+			|| (cpu_level->is_reset)))
+		cpu_pm_enter();
+
+	/*
+	 * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
+	 */
+	if (jtag_save_restore)
+		msm_jtag_save_state();
+}
+
+static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
+				bool from_idle)
+{
+	struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
+	bool jtag_save_restore =
+			cluster->cpu->levels[cpu_index].jtag_save_restore;
+
+	if (from_idle && (cpu_level->use_bc_timer ||
+			(cpu_index >= cluster->min_child_level)))
+		tick_broadcast_exit();
+
+	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+		|| (cpu_level->mode ==
+			MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
+		|| cpu_level->is_reset))
+		cpu_pm_exit();
+
+	/*
+	 * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
+	 */
+	if (jtag_save_restore)
+		msm_jtag_restore_state();
+}
+
+#if defined(CONFIG_ARM_PSCI) || !defined(CONFIG_CPU_V7)
+static int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
+{
+	int state_id = 0;
+
+	if (!cluster)
+		return 0;
+
+	spin_lock(&cluster->sync_lock);
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus))
+		goto unlock_and_return;
+
+	state_id |= get_cluster_id(cluster->parent, aff_lvl);
+
+	if (cluster->last_level != cluster->default_level) {
+		struct lpm_cluster_level *level
+			= &cluster->levels[cluster->last_level];
+
+		state_id |= (level->psci_id & cluster->psci_mode_mask)
+					<< cluster->psci_mode_shift;
+		(*aff_lvl)++;
+	}
+unlock_and_return:
+	spin_unlock(&cluster->sync_lock);
+	return state_id;
+}
+#endif
+
+#if !defined(CONFIG_CPU_V7)
+asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+					int idx, bool from_idle)
+
+{
+	bool ret;
+	/*
+	 * idx = 0 is the default LPM state
+	 */
+	if (!idx) {
+		stop_critical_timings();
+		wfi();
+		start_critical_timings();
+		ret = true;
+	} else {
+		int affinity_level = 0;
+		int state_id = get_cluster_id(cluster, &affinity_level);
+		int power_state =
+			PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+		bool success = false;
+
+		if (cluster->cpu->levels[idx].hyp_psci) {
+			stop_critical_timings();
+			__invoke_psci_fn_smc(0xC4000021, 0, 0, 0);
+			start_critical_timings();
+			return 1;
+		}
+
+		affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+		state_id |= (power_state | affinity_level
+			| cluster->cpu->levels[idx].psci_id);
+
+		update_debug_pc_event(CPU_ENTER, state_id,
+						0xdeaffeed, 0xdeaffeed, true);
+		stop_critical_timings();
+		success = !arm_cpuidle_suspend(state_id);
+		start_critical_timings();
+		update_debug_pc_event(CPU_EXIT, state_id,
+						success, 0xdeaffeed, true);
+		ret = success;
+	}
+	return ret;
+}
+#elif defined(CONFIG_ARM_PSCI)
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+					int idx, bool from_idle)
+{
+	bool ret;
+
+	if (!idx) {
+		stop_critical_timings();
+		wfi();
+		start_critical_timings();
+		ret = true;
+	} else {
+		int affinity_level = 0;
+		int state_id = get_cluster_id(cluster, &affinity_level);
+		int power_state =
+			PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+		bool success = false;
+
+		affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+		state_id |= (power_state | affinity_level
+			| cluster->cpu->levels[idx].psci_id);
+
+		update_debug_pc_event(CPU_ENTER, state_id,
+						0xdeaffeed, 0xdeaffeed, true);
+		stop_critical_timings();
+		success = !arm_cpuidle_suspend(state_id);
+		start_critical_timings();
+		update_debug_pc_event(CPU_EXIT, state_id,
+						success, 0xdeaffeed, true);
+		ret = success;
+	}
+	return ret;
+}
+#else
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+					int idx, bool from_idle)
+{
+	WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
+	return false;
+}
+#endif
+
+static int lpm_cpuidle_select(struct cpuidle_driver *drv,
+		struct cpuidle_device *dev)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+	int idx;
+
+	if (!cluster)
+		return 0;
+
+	idx = cpu_power_select(dev, cluster->cpu);
+
+	return idx;
+}
+
+static int lpm_cpuidle_enter(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int idx)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+	bool success = true;
+	const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
+	ktime_t start = ktime_get();
+	int64_t start_time = ktime_to_ns(ktime_get()), end_time;
+
+	if (idx < 0)
+		return -EINVAL;
+
+	cpu_prepare(cluster, idx, true);
+	cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
+
+	trace_cpu_idle_enter(idx);
+	lpm_stats_cpu_enter(idx, start_time);
+
+	if (need_resched())
+		goto exit;
+
+	if (!use_psci) {
+		if (idx > 0)
+			update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
+					0xdeaffeed, true);
+		success = msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode,
+				true);
+
+		if (idx > 0)
+			update_debug_pc_event(CPU_EXIT, idx, success,
+							0xdeaffeed, true);
+	} else {
+		success = psci_enter_sleep(cluster, idx, true);
+	}
+
+exit:
+	end_time = ktime_to_ns(ktime_get());
+	lpm_stats_cpu_exit(idx, end_time, success);
+
+	cluster_unprepare(cluster, cpumask, idx, true, end_time);
+	cpu_unprepare(cluster, idx, true);
+
+	trace_cpu_idle_exit(idx, success);
+	dev->last_residency = ktime_us_delta(ktime_get(), start);
+	local_irq_enable();
+
+	return idx;
+}
+
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+		struct cpumask *mask)
+{
+	struct cpuidle_device *device;
+	int cpu, ret;
+
+
+	if (!mask || !drv)
+		return -EINVAL;
+
+	drv->cpumask = mask;
+	ret = cpuidle_register_driver(drv);
+	if (ret) {
+		pr_err("Failed to register cpuidle driver %d\n", ret);
+		goto failed_driver_register;
+	}
+
+	for_each_cpu(cpu, mask) {
+		device = &per_cpu(cpuidle_dev, cpu);
+		device->cpu = cpu;
+
+		ret = cpuidle_register_device(device);
+		if (ret) {
+			pr_err("Failed to register cpuidle driver for cpu:%u\n",
+					cpu);
+			goto failed_driver_register;
+		}
+	}
+	return ret;
+failed_driver_register:
+	for_each_cpu(cpu, mask)
+		cpuidle_unregister_driver(drv);
+	return ret;
+}
+#else
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+		struct  cpumask *mask)
+{
+	return cpuidle_register(drv, NULL);
+}
+#endif
+
+static struct cpuidle_governor lpm_governor = {
+	.name =		"qcom",
+	.rating =	30,
+	.select =	lpm_cpuidle_select,
+	.owner =	THIS_MODULE,
+};
+
+static int cluster_cpuidle_register(struct lpm_cluster *cl)
+{
+	int i = 0, ret = 0;
+	unsigned int cpu;
+	struct lpm_cluster *p = NULL;
+
+	if (!cl->cpu) {
+		struct lpm_cluster *n;
+
+		list_for_each_entry(n, &cl->child, list) {
+			ret = cluster_cpuidle_register(n);
+			if (ret)
+				break;
+		}
+		return ret;
+	}
+
+	cl->drv = kzalloc(sizeof(*cl->drv), GFP_KERNEL);
+	if (!cl->drv)
+		return -ENOMEM;
+
+	cl->drv->name = "msm_idle";
+
+	for (i = 0; i < cl->cpu->nlevels; i++) {
+		struct cpuidle_state *st = &cl->drv->states[i];
+		struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
+
+		snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+		snprintf(st->desc, CPUIDLE_DESC_LEN, "%s", cpu_level->name);
+		st->flags = 0;
+		st->exit_latency = cpu_level->pwr.latency_us;
+		st->power_usage = cpu_level->pwr.ss_power;
+		st->target_residency = 0;
+		st->enter = lpm_cpuidle_enter;
+	}
+
+	cl->drv->state_count = cl->cpu->nlevels;
+	cl->drv->safe_state_index = 0;
+	for_each_cpu(cpu, &cl->child_cpus)
+		per_cpu(cpu_cluster, cpu) = cl;
+
+	for_each_possible_cpu(cpu) {
+		if (cpu_online(cpu))
+			continue;
+		p = per_cpu(cpu_cluster, cpu);
+		while (p) {
+			int j;
+
+			spin_lock(&p->sync_lock);
+			cpumask_set_cpu(cpu, &p->num_children_in_sync);
+			for (j = 0; j < p->nlevels; j++)
+				cpumask_copy(&p->levels[j].num_cpu_votes,
+						&p->num_children_in_sync);
+			spin_unlock(&p->sync_lock);
+			p = p->parent;
+		}
+	}
+	ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
+
+	if (ret) {
+		kfree(cl->drv);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * init_lpm - initializes the governor
+ */
+static int __init init_lpm(void)
+{
+	return cpuidle_register_governor(&lpm_governor);
+}
+
+postcore_initcall(init_lpm);
+
+static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
+		struct lpm_cluster *parent)
+{
+	const char **level_name;
+	int i;
+
+	level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
+
+	if (!level_name)
+		return;
+
+	for (i = 0; i < cpu->nlevels; i++)
+		level_name[i] = cpu->levels[i].name;
+
+	lpm_stats_config_level("cpu", level_name, cpu->nlevels,
+			parent->stats, &parent->child_cpus);
+
+	kfree(level_name);
+}
+
+static void register_cluster_lpm_stats(struct lpm_cluster *cl,
+		struct lpm_cluster *parent)
+{
+	const char **level_name;
+	int i;
+	struct lpm_cluster *child;
+
+	if (!cl)
+		return;
+
+	level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
+
+	if (!level_name)
+		return;
+
+	for (i = 0; i < cl->nlevels; i++)
+		level_name[i] = cl->levels[i].level_name;
+
+	cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
+			cl->nlevels, parent ? parent->stats : NULL, NULL);
+
+	kfree(level_name);
+
+	if (cl->cpu) {
+		register_cpu_lpm_stats(cl->cpu, cl);
+		return;
+	}
+
+	list_for_each_entry(child, &cl->child, list)
+		register_cluster_lpm_stats(child, cl);
+}
+
+static int lpm_suspend_prepare(void)
+{
+	suspend_in_progress = true;
+	lpm_stats_suspend_enter();
+
+	return 0;
+}
+
+static void lpm_suspend_wake(void)
+{
+	suspend_in_progress = false;
+	lpm_stats_suspend_exit();
+}
+
+static int lpm_suspend_enter(suspend_state_t state)
+{
+	int cpu = raw_smp_processor_id();
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	struct lpm_cpu *lpm_cpu = cluster->cpu;
+	const struct cpumask *cpumask = get_cpu_mask(cpu);
+	int idx;
+
+	for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
+
+		if (lpm_cpu_mode_allow(cpu, idx, false))
+			break;
+	}
+	if (idx < 0) {
+		pr_err("Failed suspend\n");
+		return 0;
+	}
+	cpu_prepare(cluster, idx, false);
+	cluster_prepare(cluster, cpumask, idx, false, 0);
+	if (idx > 0)
+		update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
+					0xdeaffeed, false);
+
+	/*
+	 * Print the clocks which are enabled during system suspend
+	 * This debug information is useful to know which are the
+	 * clocks that are enabled and preventing the system level
+	 * LPMs(XO and Vmin).
+	 */
+	clock_debug_print_enabled(true);
+
+	if (!use_psci)
+		msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false);
+	else
+		psci_enter_sleep(cluster, idx, true);
+
+	if (idx > 0)
+		update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
+					false);
+
+	cluster_unprepare(cluster, cpumask, idx, false, 0);
+	cpu_unprepare(cluster, idx, false);
+	return 0;
+}
+
+static int lpm_dying_cpu(unsigned int cpu)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+	update_debug_pc_event(CPU_HP_DYING, cpu,
+				cluster->num_children_in_sync.bits[0],
+				cluster->child_cpus.bits[0], false);
+	cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+	return 0;
+}
+
+static int lpm_starting_cpu(unsigned int cpu)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+	update_debug_pc_event(CPU_HP_STARTING, cpu,
+				cluster->num_children_in_sync.bits[0],
+				cluster->child_cpus.bits[0], false);
+	cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+	return 0;
+}
+
+static const struct platform_suspend_ops lpm_suspend_ops = {
+	.enter = lpm_suspend_enter,
+	.valid = suspend_valid_only_mem,
+	.prepare_late = lpm_suspend_prepare,
+	.wake = lpm_suspend_wake,
+};
+
+static int lpm_probe(struct platform_device *pdev)
+{
+	int ret;
+	int size;
+	struct kobject *module_kobj = NULL;
+	struct md_region md_entry;
+
+	get_online_cpus();
+	lpm_root_node = lpm_of_parse_cluster(pdev);
+
+	if (IS_ERR_OR_NULL(lpm_root_node)) {
+		pr_err("%s(): Failed to probe low power modes\n", __func__);
+		put_online_cpus();
+		return PTR_ERR(lpm_root_node);
+	}
+
+	if (print_parsed_dt)
+		cluster_dt_walkthrough(lpm_root_node);
+
+	/*
+	 * Register hotplug notifier before broadcast time to ensure there
+	 * to prevent race where a broadcast timer might not be setup on for a
+	 * core.  BUG in existing code but no known issues possibly because of
+	 * how late lpm_levels gets initialized.
+	 */
+	suspend_set_ops(&lpm_suspend_ops);
+	hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+	ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
+	if (ret) {
+		pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
+			__func__, ret);
+		put_online_cpus();
+		return ret;
+	}
+	size = num_dbg_elements * sizeof(struct lpm_debug);
+	lpm_debug = dma_alloc_coherent(&pdev->dev, size,
+			&lpm_debug_phys, GFP_KERNEL);
+	register_cluster_lpm_stats(lpm_root_node, NULL);
+
+	ret = cluster_cpuidle_register(lpm_root_node);
+	put_online_cpus();
+	if (ret) {
+		pr_err("%s()Failed to register with cpuidle framework\n",
+				__func__);
+		goto failed;
+	}
+	ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
+			"AP_QCOM_SLEEP_STARTING",
+			lpm_starting_cpu, lpm_dying_cpu);
+	if (ret)
+		goto failed;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("%s: cannot find kobject for module %s\n",
+			__func__, KBUILD_MODNAME);
+		ret = -ENOENT;
+		goto failed;
+	}
+
+	ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
+	if (ret) {
+		pr_err("%s(): Failed to create cluster level nodes\n",
+				__func__);
+		goto failed;
+	}
+
+	/* Add lpm_debug to Minidump*/
+	strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
+	md_entry.virt_addr = (uintptr_t)lpm_debug;
+	md_entry.phys_addr = lpm_debug_phys;
+	md_entry.size = size;
+	if (msm_minidump_add_region(&md_entry))
+		pr_info("Failed to add lpm_debug in Minidump\n");
+
+	return 0;
+failed:
+	free_cluster_node(lpm_root_node);
+	lpm_root_node = NULL;
+	return ret;
+}
+
+static const struct of_device_id lpm_mtch_tbl[] = {
+	{.compatible = "qcom,lpm-levels"},
+	{},
+};
+
+static struct platform_driver lpm_driver = {
+	.probe = lpm_probe,
+	.driver = {
+		.name = "lpm-levels",
+		.owner = THIS_MODULE,
+		.of_match_table = lpm_mtch_tbl,
+	},
+};
+
+static int __init lpm_levels_module_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&lpm_driver);
+	if (rc) {
+		pr_info("Error registering %s\n", lpm_driver.driver.name);
+		goto fail;
+	}
+
+fail:
+	return rc;
+}
+late_initcall(lpm_levels_module_init);
+
+enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
+
+	/*
+	 * No need to acquire the lock if probe isn't completed yet
+	 * In the event of the hotplug happening before lpm probe, we want to
+	 * flush the cache to make sure that L2 is flushed. In particular, this
+	 * could cause incoherencies for a cluster architecture. This wouldn't
+	 * affect the idle case as the idle driver wouldn't be registered
+	 * before the probe function
+	 */
+	if (!cluster)
+		return MSM_SCM_L2_OFF;
+
+	/*
+	 * Assumes L2 only. What/How parameters gets passed into TZ will
+	 * determine how this function reports this info back in msm-pm.c
+	 */
+	spin_lock(&cluster->sync_lock);
+
+	if (!cluster->lpm_dev) {
+		retflag = MSM_SCM_L2_OFF;
+		goto unlock_and_return;
+	}
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+						&cluster->child_cpus))
+		goto unlock_and_return;
+
+	if (cluster->lpm_dev)
+		retflag = cluster->lpm_dev->tz_flag;
+	/*
+	 * The scm_handoff_lock will be release by the secure monitor.
+	 * It is used to serialize power-collapses from this point on,
+	 * so that both Linux and the secure context have a consistent
+	 * view regarding the number of running cpus (cpu_count).
+	 *
+	 * It must be acquired before releasing the cluster lock.
+	 */
+unlock_and_return:
+	update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
+			0xdeadbeef);
+	trace_pre_pc_cb(retflag);
+	remote_spin_lock_rlock_id(&scm_handoff_lock,
+				  REMOTE_SPINLOCK_TID_START + cpu);
+	spin_unlock(&cluster->sync_lock);
+	return retflag;
+}
+
+/**
+ * lpm_cpu_hotplug_enter(): Called by dying CPU to terminate in low power mode
+ *
+ * @cpu: cpuid of the dying CPU
+ *
+ * Called from platform_cpu_kill() to terminate hotplug in a low power mode
+ */
+void lpm_cpu_hotplug_enter(unsigned int cpu)
+{
+	enum msm_pm_sleep_mode mode = MSM_PM_SLEEP_MODE_NR;
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	int i;
+	int idx = -1;
+
+	/*
+	 * If lpm isn't probed yet, try to put cpu into the one of the modes
+	 * available
+	 */
+	if (!cluster) {
+		if (msm_spm_is_mode_avail(
+					MSM_SPM_MODE_POWER_COLLAPSE)){
+			mode = MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
+		} else if (msm_spm_is_mode_avail(
+				MSM_SPM_MODE_FASTPC)) {
+			mode = MSM_PM_SLEEP_MODE_FASTPC;
+		} else if (msm_spm_is_mode_avail(
+				MSM_SPM_MODE_RETENTION)) {
+			mode = MSM_PM_SLEEP_MODE_RETENTION;
+		} else {
+			pr_err("No mode avail for cpu%d hotplug\n", cpu);
+			WARN_ON(1);
+			return;
+		}
+	} else {
+		struct lpm_cpu *lpm_cpu;
+		uint32_t ss_pwr = ~0U;
+
+		lpm_cpu = cluster->cpu;
+		for (i = 0; i < lpm_cpu->nlevels; i++) {
+			if (ss_pwr < lpm_cpu->levels[i].pwr.ss_power)
+				continue;
+			ss_pwr = lpm_cpu->levels[i].pwr.ss_power;
+			idx = i;
+			mode = lpm_cpu->levels[i].mode;
+		}
+
+		if (mode == MSM_PM_SLEEP_MODE_NR)
+			return;
+
+		WARN_ON(idx < 0);
+		cluster_prepare(cluster, get_cpu_mask(cpu), idx, false, 0);
+	}
+
+	msm_cpu_pm_enter_sleep(mode, false);
+}
diff --git a/drivers/cpuidle/lpm-levels-legacy.h b/drivers/cpuidle/lpm-levels-legacy.h
new file mode 100644
index 0000000..4a07355
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-legacy.h
@@ -0,0 +1,152 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/spm.h>
+
+#define NR_LPM_LEVELS 8
+
+extern bool use_psci;
+
+struct lpm_lookup_table {
+	uint32_t modes;
+	const char *mode_name;
+};
+
+struct power_params {
+	uint32_t latency_us;		/* Enter + Exit latency */
+	uint32_t ss_power;		/* Steady state power */
+	uint32_t energy_overhead;	/* Enter + exit over head */
+	uint32_t time_overhead_us;	/* Enter + exit overhead */
+	uint32_t residencies[NR_LPM_LEVELS];
+	uint32_t max_residency;
+};
+
+struct lpm_cpu_level {
+	const char *name;
+	enum msm_pm_sleep_mode mode;
+	bool use_bc_timer;
+	struct power_params pwr;
+	unsigned int psci_id;
+	bool is_reset;
+	bool jtag_save_restore;
+	bool hyp_psci;
+	int reset_level;
+};
+
+struct lpm_cpu {
+	struct lpm_cpu_level levels[NR_LPM_LEVELS];
+	int nlevels;
+	unsigned int psci_mode_shift;
+	unsigned int psci_mode_mask;
+	struct lpm_cluster *parent;
+};
+
+struct lpm_level_avail {
+	bool idle_enabled;
+	bool suspend_enabled;
+	struct kobject *kobj;
+	struct kobj_attribute idle_enabled_attr;
+	struct kobj_attribute suspend_enabled_attr;
+	void *data;
+	int idx;
+	bool cpu_node;
+};
+
+struct lpm_cluster_level {
+	const char *level_name;
+	int *mode;			/* SPM mode to enter */
+	int min_child_level;
+	struct cpumask num_cpu_votes;
+	struct power_params pwr;
+	bool notify_rpm;
+	bool disable_dynamic_routing;
+	bool sync_level;
+	bool last_core_only;
+	struct lpm_level_avail available;
+	unsigned int psci_id;
+	bool is_reset;
+	int reset_level;
+	bool no_cache_flush;
+};
+
+struct low_power_ops {
+	struct msm_spm_device *spm;
+	int (*set_mode)(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level);
+	enum msm_pm_l2_scm_flag tz_flag;
+};
+
+struct lpm_cluster {
+	struct list_head list;
+	struct list_head child;
+	const char *cluster_name;
+	const char **name;
+	unsigned long aff_level; /* Affinity level of the node */
+	struct low_power_ops *lpm_dev;
+	int ndevices;
+	struct lpm_cluster_level levels[NR_LPM_LEVELS];
+	int nlevels;
+	enum msm_pm_l2_scm_flag l2_flag;
+	int min_child_level;
+	int default_level;
+	int last_level;
+	struct lpm_cpu *cpu;
+	struct cpuidle_driver *drv;
+	spinlock_t sync_lock;
+	struct cpumask child_cpus;
+	struct cpumask num_children_in_sync;
+	struct lpm_cluster *parent;
+	struct lpm_stats *stats;
+	unsigned int psci_mode_shift;
+	unsigned int psci_mode_mask;
+	bool no_saw_devices;
+};
+
+int set_l2_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level);
+int set_system_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level);
+int set_l3_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level);
+void lpm_suspend_wake_time(uint64_t wakeup_time);
+
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
+void free_cluster_node(struct lpm_cluster *cluster);
+void cluster_dt_walkthrough(struct lpm_cluster *cluster);
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj);
+bool lpm_cpu_mode_allow(unsigned int cpu,
+		unsigned int mode, bool from_idle);
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+		unsigned int mode, bool from_idle);
+uint32_t *get_per_cpu_max_residency(int cpu);
+extern struct lpm_cluster *lpm_root_node;
+
+#ifdef CONFIG_SMP
+extern DEFINE_PER_CPU(bool, pending_ipi);
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, mask) {
+		if per_cpu(pending_ipi, cpu)
+			return true;
+	}
+	return false;
+}
+#else
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+	return false;
+}
+#endif
diff --git a/drivers/cpuidle/lpm-levels-of-legacy.c b/drivers/cpuidle/lpm-levels-of-legacy.c
new file mode 100644
index 0000000..8782850
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-of-legacy.c
@@ -0,0 +1,1014 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include "lpm-levels-legacy.h"
+
+bool use_psci;
+enum lpm_type {
+	IDLE = 0,
+	SUSPEND,
+	LPM_TYPE_NR
+};
+
+struct lpm_type_str {
+	enum lpm_type type;
+	char *str;
+};
+
+static const struct lpm_type_str lpm_types[] = {
+	{IDLE, "idle_enabled"},
+	{SUSPEND, "suspend_enabled"},
+};
+
+static DEFINE_PER_CPU(uint32_t *, max_residency);
+static struct lpm_level_avail *cpu_level_available[NR_CPUS];
+static struct platform_device *lpm_pdev;
+
+static void *get_enabled_ptr(struct kobj_attribute *attr,
+					struct lpm_level_avail *avail)
+{
+	void *arg = NULL;
+
+	if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+		arg = (void *) &avail->idle_enabled;
+	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+		arg = (void *) &avail->suspend_enabled;
+
+	return arg;
+}
+
+static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
+					struct kobj_attribute *attr)
+{
+	struct lpm_level_avail *avail = NULL;
+
+	if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+		avail = container_of(attr, struct lpm_level_avail,
+					idle_enabled_attr);
+	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+		avail = container_of(attr, struct lpm_level_avail,
+					suspend_enabled_attr);
+
+	return avail;
+}
+
+static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
+		bool probe_time)
+{
+	int i, j;
+	bool mode_avail;
+	uint32_t *residency = per_cpu(max_residency, cpu_id);
+
+	for (i = 0; i < cpu->nlevels; i++) {
+		struct power_params *pwr = &cpu->levels[i].pwr;
+
+		mode_avail = probe_time ||
+			lpm_cpu_mode_allow(cpu_id, i, true);
+
+		if (!mode_avail) {
+			residency[i] = 0;
+			continue;
+		}
+
+		residency[i] = ~0;
+		for (j = i + 1; j < cpu->nlevels; j++) {
+			mode_avail = probe_time ||
+					lpm_cpu_mode_allow(cpu_id, j, true);
+
+			if (mode_avail &&
+				(residency[i] > pwr->residencies[j]) &&
+				(pwr->residencies[j] != 0))
+				residency[i] = pwr->residencies[j];
+		}
+	}
+}
+
+static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
+		bool probe_time)
+{
+	int i, j;
+	bool mode_avail;
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct power_params *pwr = &cluster->levels[i].pwr;
+
+		mode_avail = probe_time ||
+			lpm_cluster_mode_allow(cluster, i,
+					true);
+
+		if (!mode_avail) {
+			pwr->max_residency = 0;
+			continue;
+		}
+
+		pwr->max_residency = ~0;
+		for (j = i+1; j < cluster->nlevels; j++) {
+			mode_avail = probe_time ||
+					lpm_cluster_mode_allow(cluster, j,
+							true);
+			if (mode_avail &&
+				(pwr->max_residency > pwr->residencies[j]) &&
+				(pwr->residencies[j] != 0))
+				pwr->max_residency = pwr->residencies[j];
+		}
+	}
+}
+
+uint32_t *get_per_cpu_max_residency(int cpu)
+{
+	return per_cpu(max_residency, cpu);
+}
+
+static ssize_t lpm_enable_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	int ret = 0;
+	struct kernel_param kp;
+
+	kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
+	ret = param_get_bool(buf, &kp);
+	if (ret > 0) {
+		strlcat(buf, "\n", PAGE_SIZE);
+		ret++;
+	}
+
+	return ret;
+}
+
+static ssize_t lpm_enable_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t len)
+{
+	int ret = 0;
+	struct kernel_param kp;
+	struct lpm_level_avail *avail;
+
+	avail = get_avail_ptr(kobj, attr);
+	if (WARN_ON(!avail))
+		return -EINVAL;
+
+	kp.arg = get_enabled_ptr(attr, avail);
+	ret = param_set_bool(buf, &kp);
+
+	if (avail->cpu_node)
+		set_optimum_cpu_residency(avail->data, avail->idx, false);
+	else
+		set_optimum_cluster_residency(avail->data, false);
+
+	return ret ? ret : len;
+}
+
+static int create_lvl_avail_nodes(const char *name,
+			struct kobject *parent, struct lpm_level_avail *avail,
+			void *data, int index, bool cpu_node)
+{
+	struct attribute_group *attr_group = NULL;
+	struct attribute **attr = NULL;
+	struct kobject *kobj = NULL;
+	int ret = 0;
+
+	kobj = kobject_create_and_add(name, parent);
+	if (!kobj)
+		return -ENOMEM;
+
+	attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
+					GFP_KERNEL);
+	if (!attr_group) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	attr = devm_kzalloc(&lpm_pdev->dev,
+		sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
+	if (!attr) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	sysfs_attr_init(&avail->idle_enabled_attr.attr);
+	avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
+	avail->idle_enabled_attr.attr.mode = 0644;
+	avail->idle_enabled_attr.show = lpm_enable_show;
+	avail->idle_enabled_attr.store = lpm_enable_store;
+
+	sysfs_attr_init(&avail->suspend_enabled_attr.attr);
+	avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
+	avail->suspend_enabled_attr.attr.mode = 0644;
+	avail->suspend_enabled_attr.show = lpm_enable_show;
+	avail->suspend_enabled_attr.store = lpm_enable_store;
+
+	attr[0] = &avail->idle_enabled_attr.attr;
+	attr[1] = &avail->suspend_enabled_attr.attr;
+	attr[2] = NULL;
+	attr_group->attrs = attr;
+
+	ret = sysfs_create_group(kobj, attr_group);
+	if (ret) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	avail->idle_enabled = true;
+	avail->suspend_enabled = true;
+	avail->kobj = kobj;
+	avail->data = data;
+	avail->idx = index;
+	avail->cpu_node = cpu_node;
+
+	return ret;
+
+failed:
+	kobject_put(kobj);
+	return ret;
+}
+
+static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
+{
+	int cpu;
+	int i, cpu_idx;
+	struct kobject **cpu_kobj = NULL;
+	struct lpm_level_avail *level_list = NULL;
+	char cpu_name[20] = {0};
+	int ret = 0;
+
+	cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
+			cpumask_weight(&p->child_cpus), GFP_KERNEL);
+	if (!cpu_kobj)
+		return -ENOMEM;
+
+	cpu_idx = 0;
+	for_each_cpu(cpu, &p->child_cpus) {
+		snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
+		cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent);
+		if (!cpu_kobj[cpu_idx]) {
+			ret = -ENOMEM;
+			goto release_kobj;
+		}
+
+		level_list = devm_kzalloc(&lpm_pdev->dev,
+				p->cpu->nlevels * sizeof(*level_list),
+				GFP_KERNEL);
+		if (!level_list) {
+			ret = -ENOMEM;
+			goto release_kobj;
+		}
+
+		for (i = 0; i < p->cpu->nlevels; i++) {
+
+			ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
+					cpu_kobj[cpu_idx], &level_list[i],
+					(void *)p->cpu, cpu, true);
+			if (ret)
+				goto release_kobj;
+		}
+
+		cpu_level_available[cpu] = level_list;
+		cpu_idx++;
+	}
+
+	return ret;
+
+release_kobj:
+	for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
+		kobject_put(cpu_kobj[i]);
+
+	return ret;
+}
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
+{
+	int ret = 0;
+	struct lpm_cluster *child = NULL;
+	int i;
+	struct kobject *cluster_kobj = NULL;
+
+	if (!p)
+		return -ENODEV;
+
+	cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
+	if (!cluster_kobj)
+		return -ENOMEM;
+
+	for (i = 0; i < p->nlevels; i++) {
+		ret = create_lvl_avail_nodes(p->levels[i].level_name,
+				cluster_kobj, &p->levels[i].available,
+				(void *)p, 0, false);
+		if (ret)
+			return ret;
+	}
+
+	list_for_each_entry(child, &p->child, list) {
+		ret = create_cluster_lvl_nodes(child, cluster_kobj);
+		if (ret)
+			return ret;
+	}
+
+	if (p->cpu) {
+		ret = create_cpu_lvl_nodes(p, cluster_kobj);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+bool lpm_cpu_mode_allow(unsigned int cpu,
+		unsigned int index, bool from_idle)
+{
+	struct lpm_level_avail *avail = cpu_level_available[cpu];
+
+	if (!lpm_pdev || !avail)
+		return !from_idle;
+
+	return !!(from_idle ? avail[index].idle_enabled :
+				avail[index].suspend_enabled);
+}
+
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+		unsigned int mode, bool from_idle)
+{
+	struct lpm_level_avail *avail = &cluster->levels[mode].available;
+
+	if (!lpm_pdev || !avail)
+		return false;
+
+	return !!(from_idle ? avail->idle_enabled :
+				avail->suspend_enabled);
+}
+
+static int parse_legacy_cluster_params(struct device_node *node,
+		struct lpm_cluster *c)
+{
+	int i;
+	char *key;
+	int ret;
+	struct lpm_match {
+		char *devname;
+		int (*set_mode)(struct low_power_ops *, int,
+					struct lpm_cluster_level *);
+	};
+	struct lpm_match match_tbl[] = {
+		{"l2", set_l2_mode},
+		{"cci", set_system_mode},
+		{"l3", set_l3_mode},
+		{"cbf", set_system_mode},
+	};
+
+
+	key = "qcom,spm-device-names";
+	c->ndevices = of_property_count_strings(node, key);
+
+	if (c->ndevices < 0) {
+		pr_info("%s(): Ignoring cluster params\n", __func__);
+		c->no_saw_devices = true;
+		c->ndevices = 0;
+		return 0;
+	}
+
+	c->name = devm_kzalloc(&lpm_pdev->dev, c->ndevices * sizeof(*c->name),
+				GFP_KERNEL);
+	c->lpm_dev = devm_kzalloc(&lpm_pdev->dev,
+				c->ndevices * sizeof(*c->lpm_dev),
+				GFP_KERNEL);
+	if (!c->name || !c->lpm_dev) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	for (i = 0; i < c->ndevices; i++) {
+		char device_name[20];
+		int j;
+
+		ret = of_property_read_string_index(node, key, i, &c->name[i]);
+		if (ret)
+			goto failed;
+		snprintf(device_name, sizeof(device_name), "%s-%s",
+				c->cluster_name, c->name[i]);
+
+		c->lpm_dev[i].spm = msm_spm_get_device_by_name(device_name);
+
+		if (IS_ERR_OR_NULL(c->lpm_dev[i].spm)) {
+			pr_err("Failed to get spm device by name:%s\n",
+					device_name);
+			ret = PTR_ERR(c->lpm_dev[i].spm);
+			goto failed;
+		}
+		for (j = 0; j < ARRAY_SIZE(match_tbl); j++) {
+			if (!strcmp(c->name[i], match_tbl[j].devname))
+				c->lpm_dev[i].set_mode = match_tbl[j].set_mode;
+		}
+
+		if (!c->lpm_dev[i].set_mode) {
+			ret = -ENODEV;
+			goto failed;
+		}
+	}
+
+	key = "qcom,default-level";
+	if (of_property_read_u32(node, key, &c->default_level))
+		c->default_level = 0;
+	return 0;
+failed:
+	pr_err("%s(): Failed reading %s\n", __func__, key);
+	return ret;
+}
+
+static int parse_cluster_params(struct device_node *node,
+		struct lpm_cluster *c)
+{
+	char *key;
+	int ret;
+
+	key = "label";
+	ret = of_property_read_string(node, key, &c->cluster_name);
+	if (ret) {
+		pr_err("%s(): Cannot read required param %s\n", __func__, key);
+		return ret;
+	}
+
+	if (use_psci) {
+		key = "qcom,psci-mode-shift";
+		ret = of_property_read_u32(node, key,
+				&c->psci_mode_shift);
+		if (ret) {
+			pr_err("%s(): Failed to read param: %s\n",
+							__func__, key);
+			return ret;
+		}
+
+		key = "qcom,psci-mode-mask";
+		ret = of_property_read_u32(node, key,
+				&c->psci_mode_mask);
+		if (ret) {
+			pr_err("%s(): Failed to read param: %s\n",
+							__func__, key);
+			return ret;
+		}
+
+		/* Set ndevice to 1 as default */
+		c->ndevices = 1;
+
+		return 0;
+	} else
+		return parse_legacy_cluster_params(node, c);
+}
+
+static int parse_lpm_mode(const char *str)
+{
+	int i;
+	struct lpm_lookup_table mode_lookup[] = {
+		{MSM_SPM_MODE_POWER_COLLAPSE, "pc"},
+		{MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, "spc"},
+		{MSM_SPM_MODE_FASTPC, "fpc"},
+		{MSM_SPM_MODE_GDHS, "gdhs"},
+		{MSM_SPM_MODE_RETENTION, "retention"},
+		{MSM_SPM_MODE_CLOCK_GATING, "wfi"},
+		{MSM_SPM_MODE_DISABLED, "active"}
+	};
+
+	for (i = 0; i < ARRAY_SIZE(mode_lookup); i++)
+		if (!strcmp(str, mode_lookup[i].mode_name))
+			return  mode_lookup[i].modes;
+	return -EINVAL;
+}
+
+static int parse_power_params(struct device_node *node,
+		struct power_params *pwr)
+{
+	char *key;
+	int ret;
+
+	key = "qcom,latency-us";
+	ret  = of_property_read_u32(node, key, &pwr->latency_us);
+	if (ret)
+		goto fail;
+
+	key = "qcom,ss-power";
+	ret = of_property_read_u32(node, key, &pwr->ss_power);
+	if (ret)
+		goto fail;
+
+	key = "qcom,energy-overhead";
+	ret = of_property_read_u32(node, key, &pwr->energy_overhead);
+	if (ret)
+		goto fail;
+
+	key = "qcom,time-overhead";
+	ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
+	if (ret)
+		goto fail;
+
+fail:
+	if (ret)
+		pr_err("%s(): %s Error reading %s\n", __func__, node->name,
+				key);
+	return ret;
+}
+
+static int parse_cluster_level(struct device_node *node,
+		struct lpm_cluster *cluster)
+{
+	int i = 0;
+	struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
+	int ret = -ENOMEM;
+	char *key;
+
+	key = "label";
+	ret = of_property_read_string(node, key, &level->level_name);
+	if (ret)
+		goto failed;
+
+	if (use_psci) {
+		char *k = "qcom,psci-mode";
+
+		ret = of_property_read_u32(node, k, &level->psci_id);
+		if (ret)
+			goto failed;
+
+		level->is_reset = of_property_read_bool(node, "qcom,is-reset");
+	} else if (!cluster->no_saw_devices) {
+		key  = "no saw-devices";
+
+		level->mode = devm_kzalloc(&lpm_pdev->dev,
+				cluster->ndevices * sizeof(*level->mode),
+				GFP_KERNEL);
+		if (!level->mode) {
+			pr_err("Memory allocation failed\n");
+			goto failed;
+		}
+
+		for (i = 0; i < cluster->ndevices; i++) {
+			const char *spm_mode;
+			char key[25] = {0};
+
+			snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]);
+			ret = of_property_read_string(node, key, &spm_mode);
+			if (ret)
+				goto failed;
+
+			level->mode[i] = parse_lpm_mode(spm_mode);
+
+			if (level->mode[i] < 0)
+				goto failed;
+
+			if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE
+				|| level->mode[i] ==
+				MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE)
+				level->is_reset |= true;
+		}
+	}
+
+	key = "label";
+	ret = of_property_read_string(node, key, &level->level_name);
+	if (ret)
+		goto failed;
+
+	if (cluster->nlevels != cluster->default_level) {
+		key = "min child idx";
+		ret = of_property_read_u32(node, "qcom,min-child-idx",
+				&level->min_child_level);
+		if (ret)
+			goto failed;
+
+		if (cluster->min_child_level > level->min_child_level)
+			cluster->min_child_level = level->min_child_level;
+	}
+
+	level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
+	level->disable_dynamic_routing = of_property_read_bool(node,
+					"qcom,disable-dynamic-int-routing");
+	level->last_core_only = of_property_read_bool(node,
+					"qcom,last-core-only");
+	level->no_cache_flush = of_property_read_bool(node,
+					"qcom,no-cache-flush");
+
+	key = "parse_power_params";
+	ret = parse_power_params(node, &level->pwr);
+	if (ret)
+		goto failed;
+
+	key = "qcom,reset-level";
+	ret = of_property_read_u32(node, key, &level->reset_level);
+	if (ret == -EINVAL)
+		level->reset_level = LPM_RESET_LVL_NONE;
+	else if (ret)
+		goto failed;
+
+	cluster->nlevels++;
+	return 0;
+failed:
+	pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
+	return ret;
+}
+
+static int parse_cpu_spm_mode(const char *mode_name)
+{
+	struct lpm_lookup_table pm_sm_lookup[] = {
+		{MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+			"wfi"},
+		{MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+			"standalone_pc"},
+		{MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+			"pc"},
+		{MSM_PM_SLEEP_MODE_RETENTION,
+			"retention"},
+		{MSM_PM_SLEEP_MODE_FASTPC,
+			"fpc"},
+	};
+	int i;
+	int ret = -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
+		if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
+			ret = pm_sm_lookup[i].modes;
+			break;
+		}
+	}
+	return ret;
+}
+
+static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
+{
+	char *key;
+	int ret;
+
+	key = "qcom,spm-cpu-mode";
+	ret  =  of_property_read_string(n, key, &l->name);
+	if (ret) {
+		pr_err("Failed %s %d\n", n->name, __LINE__);
+		return ret;
+	}
+
+	if (use_psci) {
+		key = "qcom,psci-cpu-mode";
+
+		ret = of_property_read_u32(n, key, &l->psci_id);
+		if (ret) {
+			pr_err("Failed reading %s on device %s\n", key,
+					n->name);
+			return ret;
+		}
+		key = "qcom,hyp-psci";
+
+		l->hyp_psci = of_property_read_bool(n, key);
+	} else {
+		l->mode = parse_cpu_spm_mode(l->name);
+
+		if (l->mode < 0)
+			return l->mode;
+	}
+	return 0;
+
+}
+
+static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
+{
+	struct device_node *cpu_node;
+	int cpu;
+	int idx = 0;
+
+	cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+	if (!cpu_node) {
+		pr_info("%s: No CPU phandle, assuming single cluster\n",
+				node->full_name);
+		/*
+		 * Not all targets have the cpu node populated in the device
+		 * tree. If cpu node is not populated assume all possible
+		 * nodes belong to this cluster
+		 */
+		cpumask_copy(mask, cpu_possible_mask);
+		return 0;
+	}
+
+	while (cpu_node) {
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+				cpumask_set_cpu(cpu, mask);
+				break;
+			}
+		}
+		of_node_put(cpu_node);
+		cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+	}
+
+	return 0;
+}
+
+static int calculate_residency(struct power_params *base_pwr,
+					struct power_params *next_pwr)
+{
+	int32_t residency = (int32_t)(next_pwr->energy_overhead -
+						base_pwr->energy_overhead) -
+		((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
+		- (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
+
+	residency /= (int32_t)(base_pwr->ss_power  - next_pwr->ss_power);
+
+	if (residency < 0) {
+		pr_err("%s: residency < 0 for LPM\n",
+				__func__);
+		return next_pwr->time_overhead_us;
+	}
+
+	return residency < next_pwr->time_overhead_us ?
+				next_pwr->time_overhead_us : residency;
+}
+
+static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+{
+	struct device_node *n;
+	int ret = -ENOMEM;
+	int i, j;
+	char *key;
+
+	c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
+	if (!c->cpu)
+		return ret;
+
+	c->cpu->parent = c;
+	if (use_psci) {
+
+		key = "qcom,psci-mode-shift";
+
+		ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
+		if (ret) {
+			pr_err("Failed reading %s on device %s\n", key,
+					node->name);
+			return ret;
+		}
+		key = "qcom,psci-mode-mask";
+
+		ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
+		if (ret) {
+			pr_err("Failed reading %s on device %s\n", key,
+					node->name);
+			return ret;
+		}
+	}
+	for_each_child_of_node(node, n) {
+		struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
+
+		c->cpu->nlevels++;
+
+		ret = parse_cpu_mode(n, l);
+		if (ret < 0) {
+			pr_info("Failed %s\n", l->name);
+			goto failed;
+		}
+
+		ret = parse_power_params(n, &l->pwr);
+		if (ret)
+			goto failed;
+
+		key = "qcom,use-broadcast-timer";
+		l->use_bc_timer = of_property_read_bool(n, key);
+
+		l->is_reset = of_property_read_bool(n, "qcom,is-reset");
+
+		key = "qcom,jtag-save-restore";
+		l->jtag_save_restore = of_property_read_bool(n, key);
+
+		key = "qcom,reset-level";
+		ret = of_property_read_u32(n, key, &l->reset_level);
+		if (ret == -EINVAL)
+			l->reset_level = LPM_RESET_LVL_NONE;
+		else if (ret)
+			goto failed;
+		of_node_put(n);
+	}
+	for (i = 0; i < c->cpu->nlevels; i++) {
+		for (j = 0; j < c->cpu->nlevels; j++) {
+			if (i >= j) {
+				c->cpu->levels[i].pwr.residencies[j] = 0;
+				continue;
+			}
+
+			c->cpu->levels[i].pwr.residencies[j] =
+				calculate_residency(&c->cpu->levels[i].pwr,
+					&c->cpu->levels[j].pwr);
+
+			pr_err("%s: idx %d %u\n", __func__, j,
+					c->cpu->levels[i].pwr.residencies[j]);
+		}
+	}
+
+	return 0;
+failed:
+	of_node_put(n);
+	pr_err("%s(): Failed with error code:%d\n", __func__, ret);
+	return ret;
+}
+
+void free_cluster_node(struct lpm_cluster *cluster)
+{
+	struct lpm_cluster *cl, *m;
+
+	list_for_each_entry_safe(cl, m, &cluster->child, list) {
+		list_del(&cl->list);
+		free_cluster_node(cl);
+	};
+
+	cluster->ndevices = 0;
+}
+
+/*
+ * TODO:
+ * Expects a CPU or a cluster only. This ensures that affinity
+ * level of a cluster is consistent with reference to its
+ * child nodes.
+ */
+static struct lpm_cluster *parse_cluster(struct device_node *node,
+		struct lpm_cluster *parent)
+{
+	struct lpm_cluster *c;
+	struct device_node *n;
+	char *key;
+	int ret = 0;
+	int i, j;
+
+	c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	ret = parse_cluster_params(node, c);
+
+	if (ret)
+		goto failed_parse_params;
+
+	INIT_LIST_HEAD(&c->child);
+	c->parent = parent;
+	spin_lock_init(&c->sync_lock);
+	c->min_child_level = NR_LPM_LEVELS;
+
+	for_each_child_of_node(node, n) {
+
+		if (!n->name)
+			continue;
+		key = "qcom,pm-cluster-level";
+		if (!of_node_cmp(n->name, key)) {
+			if (parse_cluster_level(n, c)) {
+				of_node_put(n);
+				goto failed_parse_cluster;
+			}
+			of_node_put(n);
+			continue;
+		}
+
+		key = "qcom,pm-cluster";
+		if (!of_node_cmp(n->name, key)) {
+			struct lpm_cluster *child;
+
+			if (c->no_saw_devices)
+				pr_info("%s: SAW device not provided.\n",
+					__func__);
+
+			child = parse_cluster(n, c);
+			if (!child) {
+				of_node_put(n);
+				goto failed_parse_cluster;
+			}
+
+			list_add(&child->list, &c->child);
+			cpumask_or(&c->child_cpus, &c->child_cpus,
+					&child->child_cpus);
+			c->aff_level = child->aff_level + 1;
+			of_node_put(n);
+			continue;
+		}
+
+		key = "qcom,pm-cpu";
+		if (!of_node_cmp(n->name, key)) {
+			/*
+			 * Parse the the cpu node only if a pm-cpu node
+			 * is available, though the mask is defined @ the
+			 * cluster level
+			 */
+			if (get_cpumask_for_node(node, &c->child_cpus))
+				goto failed_parse_cluster;
+
+			if (parse_cpu_levels(n, c)) {
+				of_node_put(n);
+				goto failed_parse_cluster;
+			}
+
+			c->aff_level = 1;
+			of_node_put(n);
+
+			for_each_cpu(i, &c->child_cpus) {
+				per_cpu(max_residency, i) = devm_kzalloc(
+					&lpm_pdev->dev,
+					sizeof(uint32_t) * c->cpu->nlevels,
+					GFP_KERNEL);
+				if (!per_cpu(max_residency, i))
+					return ERR_PTR(-ENOMEM);
+				set_optimum_cpu_residency(c->cpu, i, true);
+			}
+		}
+	}
+
+	if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
+		c->last_level = c->default_level;
+	else
+		c->last_level = c->nlevels-1;
+
+	for (i = 0; i < c->nlevels; i++) {
+		for (j = 0; j < c->nlevels; j++) {
+			if (i >= j) {
+				c->levels[i].pwr.residencies[j] = 0;
+				continue;
+			}
+			c->levels[i].pwr.residencies[j] = calculate_residency(
+				&c->levels[i].pwr, &c->levels[j].pwr);
+		}
+	}
+	set_optimum_cluster_residency(c, true);
+	return c;
+
+failed_parse_cluster:
+	pr_err("Failed parse cluster:%s\n", key);
+	if (parent)
+		list_del(&c->list);
+	free_cluster_node(c);
+failed_parse_params:
+	pr_err("Failed parse params\n");
+	return NULL;
+}
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
+{
+	struct device_node *top = NULL;
+	struct lpm_cluster *c;
+
+	use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
+
+	top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
+	if (!top) {
+		pr_err("Failed to find root node\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	lpm_pdev = pdev;
+	c = parse_cluster(top, NULL);
+	of_node_put(top);
+	return c;
+}
+
+void cluster_dt_walkthrough(struct lpm_cluster *cluster)
+{
+	struct list_head *list;
+	int i, j;
+	static int id;
+	char str[10] = {0};
+
+	if (!cluster)
+		return;
+
+	for (i = 0; i < id; i++)
+		snprintf(str+i, 10 - i, "\t");
+	pr_info("%d\n", __LINE__);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *l = &cluster->levels[i];
+
+		pr_info("%d ndevices:%d\n", __LINE__, cluster->ndevices);
+		for (j = 0; j < cluster->ndevices; j++)
+			pr_info("%sDevice: %pk id:%pk\n", str,
+					&cluster->name[j], &l->mode[i]);
+	}
+
+	if (cluster->cpu) {
+		pr_info("%d\n", __LINE__);
+		for (j = 0; j < cluster->cpu->nlevels; j++)
+			pr_info("%s\tCPU mode: %s id:%d\n", str,
+					cluster->cpu->levels[j].name,
+					cluster->cpu->levels[j].mode);
+	}
+
+	id++;
+
+
+	list_for_each(list, &cluster->child) {
+		struct lpm_cluster *n;
+
+		pr_info("%d\n", __LINE__);
+		n = list_entry(list, typeof(*n), list);
+		cluster_dt_walkthrough(n);
+	}
+	id--;
+}
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index fe3ef48..1d1d7e7 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -614,6 +614,7 @@
 				break;
 			}
 		}
+		of_node_put(cpu_node);
 		cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
 	}
 
@@ -651,13 +652,16 @@
 		cpu->nlevels++;
 
 		ret = parse_cpu_mode(n, l);
-		if (ret)
+		if (ret) {
+			of_node_put(n);
 			return ret;
+		}
 
 		ret = parse_power_params(n, &l->pwr);
-		if (ret)
+		if (ret) {
+			of_node_put(n);
 			return ret;
-
+		}
 		key = "qcom,use-broadcast-timer";
 		l->use_bc_timer = of_property_read_bool(n, key);
 
@@ -670,6 +674,7 @@
 			l->reset_level = LPM_RESET_LVL_NONE;
 		else if (ret)
 			return ret;
+		of_node_put(n);
 	}
 
 	for (i = 0; i < cpu->nlevels; i++) {
@@ -708,7 +713,7 @@
 
 static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
 {
-	int ret, i;
+	int ret;
 	char *key;
 	struct lpm_cpu *cpu;
 
@@ -724,12 +729,12 @@
 	key = "qcom,psci-mode-shift";
 	ret = of_property_read_u32(node, key, &cpu->psci_mode_shift);
 	if (ret)
-		goto failed_parse_params;
+		goto failed;
 
 	key = "qcom,psci-mode-mask";
 	ret = of_property_read_u32(node, key, &cpu->psci_mode_mask);
 	if (ret)
-		goto failed_parse_params;
+		goto failed;
 
 	key = "qcom,disable-prediction";
 	cpu->lpm_prediction = !(of_property_read_bool(node, key));
@@ -757,20 +762,14 @@
 	key = "parse_cpu";
 	ret = parse_cpu(node, cpu);
 	if (ret)
-		goto failed_parse_cpu;
+		goto failed;
 
 	cpumask_or(&c->child_cpus, &c->child_cpus, &cpu->related_cpus);
 	list_add(&cpu->list, &c->cpu);
 
 	return ret;
 
-failed_parse_cpu:
-	for (i = 0; i < cpu->nlevels; i++) {
-		kfree(cpu->levels[i].name);
-		cpu->levels[i].name = NULL;
-	}
-
-failed_parse_params:
+failed:
 	pr_err("Failed to read key: %s node: %s\n", key, node->name);
 	return ret;
 }
@@ -785,15 +784,8 @@
 		free_cluster_node(cl);
 	};
 
-	list_for_each_entry_safe(cpu, n, &cluster->cpu, list) {
-		int i;
-
+	list_for_each_entry_safe(cpu, n, &cluster->cpu, list)
 		list_del(&cpu->list);
-		for (i = 0; i < cpu->nlevels; i++) {
-			kfree(cpu->levels[i].name);
-			cpu->levels[i].name = NULL;
-		}
-	}
 }
 
 /*
@@ -833,8 +825,11 @@
 
 		key = "qcom,pm-cluster-level";
 		if (!of_node_cmp(n->name, key)) {
-			if (parse_cluster_level(n, c))
+			if (parse_cluster_level(n, c)) {
+				of_node_put(n);
 				goto failed_parse_cluster;
+			}
+			of_node_put(n);
 			continue;
 		}
 
@@ -843,22 +838,28 @@
 			struct lpm_cluster *child;
 
 			child = parse_cluster(n, c);
-			if (!child)
+			if (!child) {
+				of_node_put(n);
 				goto failed_parse_cluster;
+			}
 
 			list_add(&child->list, &c->child);
 			cpumask_or(&c->child_cpus, &c->child_cpus,
 					&child->child_cpus);
 			c->aff_level = child->aff_level + 1;
+			of_node_put(n);
 			continue;
 		}
 
 		key = "qcom,pm-cpu";
 		if (!of_node_cmp(n->name, key)) {
-			if (parse_cpu_levels(n, c))
+			if (parse_cpu_levels(n, c)) {
+				of_node_put(n);
 				goto failed_parse_cluster;
+			}
 
 			c->aff_level = 1;
+			of_node_put(n);
 		}
 	}
 
@@ -886,13 +887,13 @@
 		list_del(&c->list);
 	free_cluster_node(c);
 failed_parse_params:
-	c->parent = NULL;
 	pr_err("Failed parse params\n");
 	return NULL;
 }
 struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
 {
 	struct device_node *top = NULL;
+	struct lpm_cluster *c;
 
 	top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
 	if (!top) {
@@ -901,7 +902,9 @@
 	}
 
 	lpm_pdev = pdev;
-	return parse_cluster(top, NULL);
+	c = parse_cluster(top, NULL);
+	of_node_put(top);
+	return c;
 }
 
 void cluster_dt_walkthrough(struct lpm_cluster *cluster)
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 9ae640d..c7ec868 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -159,7 +159,7 @@
 	uint32_t latency = 0;
 	int i;
 
-	if (!cluster->list.next) {
+	if (list_empty(&cluster->list)) {
 		for (i = 0; i < cluster->nlevels; i++) {
 			level = &cluster->levels[i];
 			pwr_params = &level->pwr;
@@ -341,6 +341,11 @@
 {
 	unsigned int cpu = raw_smp_processor_id();
 	struct hrtimer *cpu_histtimer = &per_cpu(histtimer, cpu);
+	ktime_t time_rem;
+
+	time_rem = hrtimer_get_remaining(cpu_histtimer);
+	if (ktime_to_us(time_rem) <= 0)
+		return;
 
 	hrtimer_try_to_cancel(cpu_histtimer);
 }
@@ -386,11 +391,21 @@
 {
 	int cpu = raw_smp_processor_id();
 	struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+	ktime_t time_rem;
 
-	hrtimer_try_to_cancel(&cluster->histtimer);
+	time_rem = hrtimer_get_remaining(&cluster->histtimer);
+	if (ktime_to_us(time_rem) > 0)
+		hrtimer_try_to_cancel(&cluster->histtimer);
 
-	if (cluster->parent)
+	if (cluster->parent) {
+		time_rem = hrtimer_get_remaining(
+			&cluster->parent->histtimer);
+
+		if (ktime_to_us(time_rem) <= 0)
+			return;
+
 		hrtimer_try_to_cancel(&cluster->parent->histtimer);
+	}
 }
 
 static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
@@ -1394,11 +1409,11 @@
 	dev->last_residency = ktime_us_delta(ktime_get(), start);
 	update_history(dev, idx);
 	trace_cpu_idle_exit(idx, success);
-	local_irq_enable();
 	if (lpm_prediction && cpu->lpm_prediction) {
 		histtimer_cancel();
 		clusttimer_cancel();
 	}
+	local_irq_enable();
 	return idx;
 }
 
@@ -1783,12 +1798,10 @@
 #endif
 
 	rc = platform_driver_register(&lpm_driver);
-	if (rc) {
-		pr_info("Error registering %s\n", lpm_driver.driver.name);
-		goto fail;
-	}
+	if (rc)
+		pr_info("Error registering %s rc=%d\n", lpm_driver.driver.name,
+									rc);
 
-fail:
 	return rc;
 }
 late_initcall(lpm_levels_module_init);
diff --git a/drivers/cpuidle/lpm-workarounds.c b/drivers/cpuidle/lpm-workarounds.c
new file mode 100644
index 0000000..657e2b9
--- /dev/null
+++ b/drivers/cpuidle/lpm-workarounds.c
@@ -0,0 +1,147 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <lpm-workarounds.h>
+
+static struct regulator *lpm_cx_reg;
+static struct work_struct dummy_vote_work;
+static struct workqueue_struct *lpm_wa_wq;
+static bool lpm_wa_cx_turbo_unvote;
+static bool skip_l2_spm;
+
+/* While exiting from RPM assisted power collapse on some targets like MSM8939
+ * the CX is bumped to turbo mode by RPM. To reduce the power impact, APSS
+ * low power driver need to remove the CX turbo vote.
+ */
+static void send_dummy_cx_vote(struct work_struct *w)
+{
+	if (lpm_cx_reg) {
+		regulator_set_voltage(lpm_cx_reg,
+			RPM_REGULATOR_CORNER_SUPER_TURBO,
+			RPM_REGULATOR_CORNER_SUPER_TURBO);
+
+		regulator_set_voltage(lpm_cx_reg,
+			RPM_REGULATOR_CORNER_NONE,
+			RPM_REGULATOR_CORNER_SUPER_TURBO);
+	}
+}
+
+/*
+ * lpm_wa_cx_unvote_send(): Unvote for CX turbo mode
+ */
+void lpm_wa_cx_unvote_send(void)
+{
+	if (lpm_wa_cx_turbo_unvote)
+		queue_work(lpm_wa_wq, &dummy_vote_work);
+}
+EXPORT_SYMBOL(lpm_wa_cx_unvote_send);
+
+static int lpm_wa_cx_unvote_init(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	lpm_cx_reg = devm_regulator_get(&pdev->dev, "lpm-cx");
+	if (IS_ERR(lpm_cx_reg)) {
+		ret = PTR_ERR(lpm_cx_reg);
+		if (ret != -EPROBE_DEFER)
+			pr_err("Unable to get the CX regulator\n");
+		return ret;
+	}
+
+	INIT_WORK(&dummy_vote_work, send_dummy_cx_vote);
+
+	lpm_wa_wq = alloc_workqueue("lpm-wa",
+				WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+
+	return ret;
+}
+
+static int lpm_wa_cx_unvote_exit(void)
+{
+	if (lpm_wa_wq)
+		destroy_workqueue(lpm_wa_wq);
+
+	return 0;
+}
+
+bool lpm_wa_get_skip_l2_spm(void)
+{
+	return skip_l2_spm;
+}
+EXPORT_SYMBOL(lpm_wa_get_skip_l2_spm);
+
+static int lpm_wa_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	lpm_wa_cx_turbo_unvote = of_property_read_bool(pdev->dev.of_node,
+					"qcom,lpm-wa-cx-turbo-unvote");
+	if (lpm_wa_cx_turbo_unvote) {
+		ret = lpm_wa_cx_unvote_init(pdev);
+		if (ret) {
+			pr_err("%s: Failed to initialize lpm_wa_cx_unvote (%d)\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
+	skip_l2_spm = of_property_read_bool(pdev->dev.of_node,
+						"qcom,lpm-wa-skip-l2-spm");
+
+	return ret;
+}
+
+static int lpm_wa_remove(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	if (lpm_wa_cx_turbo_unvote)
+		ret = lpm_wa_cx_unvote_exit();
+
+	return ret;
+}
+
+static const struct of_device_id lpm_wa_mtch_tbl[] = {
+	{.compatible = "qcom,lpm-workarounds"},
+	{},
+};
+
+static struct platform_driver lpm_wa_driver = {
+	.probe = lpm_wa_probe,
+	.remove = lpm_wa_remove,
+	.driver = {
+		.name = "lpm-workarounds",
+		.owner = THIS_MODULE,
+		.of_match_table = lpm_wa_mtch_tbl,
+	},
+};
+
+static int __init lpm_wa_module_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&lpm_wa_driver);
+	if (ret)
+		pr_info("Error registering %s\n", lpm_wa_driver.driver.name);
+
+	return ret;
+}
+late_initcall(lpm_wa_module_init);
diff --git a/drivers/cpuidle/lpm-workarounds.h b/drivers/cpuidle/lpm-workarounds.h
new file mode 100644
index 0000000..a290dcb
--- /dev/null
+++ b/drivers/cpuidle/lpm-workarounds.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LPM_WA_H
+#define __LPM_WA_H
+
+void lpm_wa_cx_unvote_send(void);
+bool lpm_wa_get_skip_l2_spm(void);
+
+#endif  /* __LPM_WA_H */
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 3aa75aa..199d573 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -77,6 +77,8 @@
 #define QCOM_ICE_ENCRYPT	0x1
 #define QCOM_ICE_DECRYPT	0x2
 #define QCOM_SECT_LEN_IN_BYTE	512
+#define QCOM_UD_FOOTER_SIZE	0x4000
+#define QCOM_UD_FOOTER_SECS	(QCOM_UD_FOOTER_SIZE / QCOM_SECT_LEN_IN_BYTE)
 
 struct ice_clk_info {
 	struct list_head list;
@@ -127,8 +129,6 @@
 };
 
 static int ice_fde_flag;
-static unsigned long userdata_start;
-static unsigned long userdata_end;
 static struct ice_crypto_setting ice_data;
 
 static int qti_ice_setting_config(struct request *req,
@@ -152,22 +152,29 @@
 		return -EPERM;
 	}
 
+	if (!setting)
+		return -EINVAL;
+
 	if ((short)(crypto_data->key_index) >= 0) {
 
 		memcpy(&setting->crypto_data, crypto_data,
 				sizeof(setting->crypto_data));
 
-		if (rq_data_dir(req) == WRITE &&
-				(ice_fde_flag & QCOM_ICE_ENCRYPT))
-			setting->encr_bypass = false;
-		else if (rq_data_dir(req) == READ &&
-				(ice_fde_flag & QCOM_ICE_DECRYPT))
-			setting->decr_bypass = false;
-		else {
+		switch (rq_data_dir(req)) {
+		case WRITE:
+			if (!ice_fde_flag || (ice_fde_flag & QCOM_ICE_ENCRYPT))
+				setting->encr_bypass = false;
+			break;
+		case READ:
+			if (!ice_fde_flag || (ice_fde_flag & QCOM_ICE_DECRYPT))
+				setting->decr_bypass = false;
+			break;
+		default:
 			/* Should I say BUG_ON */
 			setting->encr_bypass = true;
 			setting->decr_bypass = true;
-			pr_debug("%s direction unknown", __func__);
+			pr_debug("%s(): direction unknown\n", __func__);
+			break;
 		}
 	}
 
@@ -181,26 +188,6 @@
 }
 EXPORT_SYMBOL(qcom_ice_set_fde_flag);
 
-int qcom_ice_set_fde_conf(sector_t s_sector, sector_t size,
-					int index, int mode)
-{
-	userdata_start = s_sector;
-	userdata_end = s_sector + size;
-	if (INT_MAX - s_sector < size) {
-		WARN_ON(1);
-		return -EINVAL;
-	}
-	ice_data.key_index = index;
-	ice_data.algo_mode = mode;
-	ice_data.key_size = ICE_CRYPTO_KEY_SIZE_256;
-	ice_data.key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
-
-	pr_debug("%s sector info set start %lu end %lu\n", __func__,
-		userdata_start, userdata_end);
-	return 0;
-}
-EXPORT_SYMBOL(qcom_ice_set_fde_conf);
-
 static int qcom_ice_enable_clocks(struct ice_device *, bool);
 
 #ifdef CONFIG_MSM_BUS_SCALING
@@ -1483,12 +1470,15 @@
 		struct request *req,
 		struct ice_data_setting *setting, bool async)
 {
+	struct ice_crypto_setting *crypto_data;
 	struct ice_crypto_setting pfk_crypto_data = {0};
 	int ret = 0;
 	bool is_pfe = false;
 	sector_t data_size;
+	union map_info *info;
+	unsigned long sec_end = 0;
 
-	if (!pdev || !req || !setting) {
+	if (!pdev || !req) {
 		pr_err("%s: Invalid params passed\n", __func__);
 		return -EINVAL;
 	}
@@ -1507,6 +1497,7 @@
 		/* It is not an error to have a request with no  bio */
 		return 0;
 	}
+    //pr_err("%s bio is %pK\n", __func__, req->bio);
 
 	ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
 	if (is_pfe) {
@@ -1521,22 +1512,46 @@
 				&pfk_crypto_data, setting);
 	}
 
-	if (ice_fde_flag == 0)
-		return 0;
-
-	if ((req->__sector >= userdata_start) &&
-			(req->__sector < userdata_end)) {
-	/*
-	 * Ugly hack to address non-block-size aligned userdata end address in
-	 * eMMC based devices.
-	 */
-		data_size = req->__data_len/QCOM_SECT_LEN_IN_BYTE;
-
-		if ((req->__sector + data_size) > userdata_end)
-			return 0;
-		else
+	if (!ice_fde_flag) {
+		if (bio_flagged(req->bio, BIO_INLINECRYPT)) {
+			info = dm_get_rq_mapinfo(req);
+			if (!info) {
+				pr_debug("%s info not available in request\n",
+							__func__);
+				return 0;
+			}
+			crypto_data = (struct ice_crypto_setting *)info->ptr;
+			if (!crypto_data) {
+				pr_err("%s crypto_data not available in req\n",
+							__func__);
+				return -EINVAL;
+			}
 			return qti_ice_setting_config(req, pdev,
-				&ice_data, setting);
+						crypto_data, setting);
+		}
+		return 0;
+	}
+
+	if (req->part && req->part->info && req->part->info->volname[0]) {
+		if (!strcmp(req->part->info->volname, "userdata")) {
+			sec_end = req->part->start_sect + req->part->nr_sects -
+					QCOM_UD_FOOTER_SECS;
+			if ((req->__sector >= req->part->start_sect) &&
+				(req->__sector < sec_end)) {
+				/*
+				 * Ugly hack to address non-block-size aligned
+				 * userdata end address in eMMC based devices.
+				 */
+				data_size = req->__data_len /
+						QCOM_SECT_LEN_IN_BYTE;
+
+				if ((req->__sector + data_size) > sec_end)
+					return 0;
+				else
+					return qti_ice_setting_config(req, pdev,
+						&ice_data, setting);
+			}
+		}
 	}
 
 	/*
@@ -1664,7 +1679,7 @@
 
 	list_for_each_entry(ice_dev, &ice_devices, list) {
 		if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
-			pr_info("%s: found ice device %p\n", __func__, ice_dev);
+			pr_debug("%s: ice device %pK\n", __func__, ice_dev);
 			return ice_dev;
 		}
 	}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index d0b16e5..d8305dd 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -750,7 +750,10 @@
 	if (final)
 		new_len = DIV_ROUND_UP(new_len, bs) * bs;
 	else
-		new_len = new_len / bs * bs;
+		new_len = (new_len - 1) / bs * bs;
+
+	if (nbytes != new_len)
+		list_ok = false;
 
 	while (nbytes > 0 && sg_tmp) {
 		n++;
@@ -846,6 +849,8 @@
 			xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
 		else
 			xmit_len = xmit_len / bs * bs;
+	} else if (!final) {
+		xmit_len -= bs;
 	}
 
 	hash_later = rctx->total - xmit_len;
@@ -873,14 +878,21 @@
 	}
 
 	if (hash_later) {
-		if (req->nbytes) {
-			scatterwalk_map_and_copy(rctx->buffer, req->src,
-						 req->nbytes - hash_later,
-						 hash_later, 0);
-		} else {
+		int offset = 0;
+
+		if (hash_later > req->nbytes) {
 			memcpy(rctx->buffer, rctx->buffer + xmit_len,
-			       hash_later);
+			       hash_later - req->nbytes);
+			offset = hash_later - req->nbytes;
 		}
+
+		if (req->nbytes) {
+			scatterwalk_map_and_copy(rctx->buffer + offset,
+						 req->src,
+						 offset + req->nbytes -
+						 hash_later, hash_later, 0);
+		}
+
 		rctx->bufcnt = hash_later;
 	} else {
 		rctx->bufcnt = 0;
@@ -1130,7 +1142,7 @@
 	ctx = ahash_request_ctx(req);
 
 	err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
-	if (err)
+	if (err || !ctx->total)
 		goto err1;
 
 	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
@@ -1189,11 +1201,10 @@
 	if (!req->nbytes)
 		return 0;
 
-	if (ctx->total + req->nbytes < ctx->buflen) {
+	if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
 		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
 					 0, req->nbytes, 0);
 		ctx->bufcnt += req->nbytes;
-		ctx->total += req->nbytes;
 		return 0;
 	}
 
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 3ac6c6c..16bb660 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -422,6 +422,7 @@
 
 module_platform_driver(sun4i_ss_driver);
 
+MODULE_ALIAS("platform:sun4i-ss");
 MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 42c060c..7c71722 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1116,10 +1116,10 @@
 	return count;
 }
 
-int talitos_sg_map(struct device *dev, struct scatterlist *src,
-		   unsigned int len, struct talitos_edesc *edesc,
-		   struct talitos_ptr *ptr,
-		   int sg_count, unsigned int offset, int tbl_off)
+static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+			      unsigned int len, struct talitos_edesc *edesc,
+			      struct talitos_ptr *ptr, int sg_count,
+			      unsigned int offset, int tbl_off, int elen)
 {
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	bool is_sec1 = has_ftr_sec1(priv);
@@ -1130,7 +1130,7 @@
 	}
 
 	to_talitos_ptr_len(ptr, len, is_sec1);
-	to_talitos_ptr_ext_set(ptr, 0, is_sec1);
+	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
 
 	if (sg_count == 1) {
 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
@@ -1140,7 +1140,7 @@
 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
 		return sg_count;
 	}
-	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
+	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
 					 &edesc->link_tbl[tbl_off]);
 	if (sg_count == 1) {
 		/* Only one segment now, so no link tbl needed*/
@@ -1154,6 +1154,15 @@
 	return sg_count;
 }
 
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+			  unsigned int len, struct talitos_edesc *edesc,
+			  struct talitos_ptr *ptr, int sg_count,
+			  unsigned int offset, int tbl_off)
+{
+	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+				  tbl_off, 0);
+}
+
 /*
  * fill in and submit ipsec_esp descriptor
  */
@@ -1171,7 +1180,7 @@
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	int tbl_off = 0;
 	int sg_count, ret;
-	int sg_link_tbl_len;
+	int elen = 0;
 	bool sync_needed = false;
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	bool is_sec1 = has_ftr_sec1(priv);
@@ -1225,20 +1234,12 @@
 	 * extent is bytes of HMAC postpended to ciphertext,
 	 * typically 12 for ipsec
 	 */
-	to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
-	to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
+	if ((desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+	    (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
+		elen = authsize;
 
-	sg_link_tbl_len = cryptlen;
-
-	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
-		to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
-
-		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
-			sg_link_tbl_len += authsize;
-	}
-
-	ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
-			     &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
+	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+				 sg_count, areq->assoclen, tbl_off, elen);
 
 	if (ret > 1) {
 		tbl_off += ret;
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index 1dca479..fe249e7 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -82,7 +82,7 @@
 {
 	ktime_t ts;
 	unsigned int diff;
-	unsigned long freq = 0;
+	uint64_t freq = 0;
 
 	ts = ktime_get();
 	diff = ktime_to_us(ktime_sub(ts, cpustats->prev_ts));
@@ -420,6 +420,7 @@
 	.driver = {
 		.name = "arm-memlat-mon",
 		.of_match_table = memlat_match_table,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index 33e16261..d2aaffb 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -1128,6 +1128,7 @@
 	.driver = {
 		.name = "bimc-bwmon",
 		.of_match_table = bimc_bwmon_match_table,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 3357aaf..2298de2 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -969,7 +969,8 @@
 	if (df->governor == governor) {
 		ret = 0;
 		goto out;
-	} else if (df->governor->immutable || governor->immutable) {
+	} else if ((df->governor && df->governor->immutable) ||
+					governor->immutable) {
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/drivers/devfreq/governor_cpufreq.c b/drivers/devfreq/governor_cpufreq.c
index 03ec792..3c7fc8b 100644
--- a/drivers/devfreq/governor_cpufreq.c
+++ b/drivers/devfreq/governor_cpufreq.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -182,14 +182,14 @@
 	struct cpufreq_policy *policy = data;
 
 	switch (event) {
-	case CPUFREQ_CREATE_POLICY:
+	case CPUFREQ_START:
 		mutex_lock(&state_lock);
 		add_policy(policy);
 		update_all_devfreqs();
 		mutex_unlock(&state_lock);
 		break;
 
-	case CPUFREQ_REMOVE_POLICY:
+	case CPUFREQ_STOP:
 		mutex_lock(&state_lock);
 		if (state[policy->cpu]) {
 			state[policy->cpu]->on = false;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 7053bb4..1936383 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -305,7 +305,8 @@
 
 	poll_wait(file, &sync_file->wq, wait);
 
-	if (!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
+	if (list_empty(&sync_file->cb.node) &&
+	    !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
 		if (fence_add_callback(sync_file->fence, &sync_file->cb,
 					   fence_check_cb_func) < 0)
 			wake_up_all(&sync_file->wq);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b7d7f2d..ee7b48d 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1473,10 +1473,10 @@
 	for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
 		check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
 		rmb();
-		initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
-		rmb();
 		cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
 		rmb();
+		initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
+		rmb();
 		cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
 		rmb();
 
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 21726a2..b9c2972 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1755,19 +1755,26 @@
 	if (IS_ERR(sdma->clk_ahb))
 		return PTR_ERR(sdma->clk_ahb);
 
-	clk_prepare(sdma->clk_ipg);
-	clk_prepare(sdma->clk_ahb);
+	ret = clk_prepare(sdma->clk_ipg);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare(sdma->clk_ahb);
+	if (ret)
+		goto err_clk;
 
 	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
 			       sdma);
 	if (ret)
-		return ret;
+		goto err_irq;
 
 	sdma->irq = irq;
 
 	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
-	if (!sdma->script_addrs)
-		return -ENOMEM;
+	if (!sdma->script_addrs) {
+		ret = -ENOMEM;
+		goto err_irq;
+	}
 
 	/* initially no scripts available */
 	saddr_arr = (s32 *)sdma->script_addrs;
@@ -1882,6 +1889,10 @@
 	dma_async_device_unregister(&sdma->dma_device);
 err_init:
 	kfree(sdma->script_addrs);
+err_irq:
+	clk_unprepare(sdma->clk_ahb);
+err_clk:
+	clk_unprepare(sdma->clk_ipg);
 	return ret;
 }
 
@@ -1893,6 +1904,8 @@
 	devm_free_irq(&pdev->dev, sdma->irq, sdma);
 	dma_async_device_unregister(&sdma->dma_device);
 	kfree(sdma->script_addrs);
+	clk_unprepare(sdma->clk_ahb);
+	clk_unprepare(sdma->clk_ipg);
 	/* Kill the tasklet */
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
 		struct sdma_channel *sdmac = &sdma->channel[i];
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f3e211f..7186664 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -152,6 +152,7 @@
 	void __iomem *dma_base;
 	void __iomem *glob_base;
 	struct clk *clk;
+	struct clk *reg_clk;
 	struct tasklet_struct irq_tasklet;
 	struct list_head free_sw_desc;
 	struct dma_device dmadev;
@@ -697,13 +698,26 @@
 	if (ret)
 		return ret;
 
+	xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
+	if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
+		if (!IS_ERR(xor_dev->reg_clk)) {
+			ret = clk_prepare_enable(xor_dev->reg_clk);
+			if (ret)
+				return ret;
+		} else {
+			return PTR_ERR(xor_dev->reg_clk);
+		}
+	}
+
 	xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
+	if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
+		ret = EPROBE_DEFER;
+		goto disable_reg_clk;
+	}
 	if (!IS_ERR(xor_dev->clk)) {
 		ret = clk_prepare_enable(xor_dev->clk);
 		if (ret)
-			return ret;
+			goto disable_reg_clk;
 	}
 
 	ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
@@ -812,8 +826,9 @@
 free_msi_irqs:
 	platform_msi_domain_free_irqs(&pdev->dev);
 disable_clk:
-	if (!IS_ERR(xor_dev->clk))
-		clk_disable_unprepare(xor_dev->clk);
+	clk_disable_unprepare(xor_dev->clk);
+disable_reg_clk:
+	clk_disable_unprepare(xor_dev->reg_clk);
 	return ret;
 }
 
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fb2e747..2c449bd 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1570,7 +1570,7 @@
 /* Returns 1 if state was updated, 0 otherwise */
 static int pl330_update(struct pl330_dmac *pl330)
 {
-	struct dma_pl330_desc *descdone, *tmp;
+	struct dma_pl330_desc *descdone;
 	unsigned long flags;
 	void __iomem *regs;
 	u32 val;
@@ -1648,7 +1648,9 @@
 	}
 
 	/* Now that we are in no hurry, do the callbacks */
-	list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
+	while (!list_empty(&pl330->req_done)) {
+		descdone = list_first_entry(&pl330->req_done,
+					    struct dma_pl330_desc, rqd);
 		list_del(&descdone->rqd);
 		spin_unlock_irqrestore(&pl330->lock, flags);
 		dma_pl330_rqcb(descdone, PL330_ERR_NONE);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 03c4eb3..6497f52 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -387,6 +387,7 @@
 	struct device_dma_parameters dma_parms;
 	struct bam_chan *channels;
 	u32 num_channels;
+	u32 num_ees;
 
 	/* execution environment ID, from DT */
 	u32 ee;
@@ -1076,15 +1077,19 @@
 	u32 val;
 
 	/* read revision and configuration information */
-	val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
-	val &= NUM_EES_MASK;
+	if (!bdev->num_ees) {
+		val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
+		bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
+	}
 
 	/* check that configured EE is within range */
-	if (bdev->ee >= val)
+	if (bdev->ee >= bdev->num_ees)
 		return -EINVAL;
 
-	val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
-	bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+	if (!bdev->num_channels) {
+		val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
+		bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+	}
 
 	if (bdev->controlled_remotely)
 		return 0;
@@ -1179,6 +1184,18 @@
 	bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
 						"qcom,controlled-remotely");
 
+	if (bdev->controlled_remotely) {
+		ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
+					   &bdev->num_channels);
+		if (ret)
+			dev_err(bdev->dev, "num-channels unspecified in dt\n");
+
+		ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
+					   &bdev->num_ees);
+		if (ret)
+			dev_err(bdev->dev, "num-ees unspecified in dt\n");
+	}
+
 	bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
 	if (IS_ERR(bdev->bamclk))
 		return PTR_ERR(bdev->bamclk);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 4c357d4..d032032 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -870,7 +870,7 @@
 
 	rcar_dmac_chan_configure_desc(chan, desc);
 
-	max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
+	max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
 
 	/*
 	 * Allocate and fill the transfer chunk descriptors. We own the only
@@ -1246,8 +1246,17 @@
 	 * If the cookie doesn't correspond to the currently running transfer
 	 * then the descriptor hasn't been processed yet, and the residue is
 	 * equal to the full descriptor size.
+	 * Also, a client driver is possible to call this function before
+	 * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
+	 * will be the next descriptor, and the done list will appear. So, if
+	 * the argument cookie matches the done list's cookie, we can assume
+	 * the residue is zero.
 	 */
 	if (cookie != desc->async_tx.cookie) {
+		list_for_each_entry(desc, &chan->desc.done, node) {
+			if (cookie == desc->async_tx.cookie)
+				return 0;
+		}
 		list_for_each_entry(desc, &chan->desc.pending, node) {
 			if (cookie == desc->async_tx.cookie)
 				return desc->size;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index cb9b857..61c19b8 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -759,7 +759,7 @@
 		/* Non-ECC RAM? */
 		printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
 		res = -ENODEV;
-		goto err2;
+		goto err;
 	}
 
 	edac_dbg(3, "init mci\n");
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 2e093c3..2b108fa 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -277,9 +277,8 @@
 	if (info->vbus_gpiod)
 		enable_irq(info->vbus_irq);
 
-	if (!device_may_wakeup(dev))
-		queue_delayed_work(system_power_efficient_wq,
-				   &info->wq_detcable, 0);
+	queue_delayed_work(system_power_efficient_wq,
+			   &info->wq_detcable, 0);
 
 	return ret;
 }
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8bf8926..d731b41 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1130,7 +1130,13 @@
 		return -ENOMEM;
 
 	offset = (void *)&desc->buffer - (void *)desc;
-	desc->buffer_size = PAGE_SIZE - offset;
+	/*
+	 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
+	 * for descriptors, even 0x10-byte ones. This can cause page faults when
+	 * an IOMMU is in use and the oversized read crosses a page boundary.
+	 * Work around this by always leaving at least 0x10 bytes of padding.
+	 */
+	desc->buffer_size = PAGE_SIZE - offset - 0x10;
 	desc->buffer_bus = bus_addr + offset;
 	desc->used = 0;
 
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 88bebe1..42844c3 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -18,7 +18,7 @@
  * of and an antecedent to, SMBIOS, which stands for System
  * Management BIOS.  See further: http://www.dmtf.org/standards
  */
-static const char dmi_empty_string[] = "        ";
+static const char dmi_empty_string[] = "";
 
 static u32 dmi_ver __initdata;
 static u32 dmi_len;
@@ -44,25 +44,21 @@
 static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
 {
 	const u8 *bp = ((u8 *) dm) + dm->length;
+	const u8 *nsp;
 
 	if (s) {
-		s--;
-		while (s > 0 && *bp) {
+		while (--s > 0 && *bp)
 			bp += strlen(bp) + 1;
-			s--;
-		}
 
-		if (*bp != 0) {
-			size_t len = strlen(bp)+1;
-			size_t cmp_len = len > 8 ? 8 : len;
-
-			if (!memcmp(bp, dmi_empty_string, cmp_len))
-				return dmi_empty_string;
+		/* Strings containing only spaces are considered empty */
+		nsp = bp;
+		while (*nsp == ' ')
+			nsp++;
+		if (*nsp != '\0')
 			return bp;
-		}
 	}
 
-	return "";
+	return dmi_empty_string;
 }
 
 static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 3d50bae..1f81e63 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -59,7 +59,10 @@
 	return cpu == resident_cpu;
 }
 
-struct psci_operations psci_ops;
+struct psci_operations psci_ops = {
+	.conduit = PSCI_CONDUIT_NONE,
+	.smccc_version = SMCCC_VERSION_1_0,
+};
 
 typedef unsigned long (psci_fn)(unsigned long, unsigned long,
 				unsigned long, unsigned long);
@@ -210,6 +213,22 @@
 			      0, 0, 0);
 }
 
+static void set_conduit(enum psci_conduit conduit)
+{
+	switch (conduit) {
+	case PSCI_CONDUIT_HVC:
+		invoke_psci_fn = __invoke_psci_fn_hvc;
+		break;
+	case PSCI_CONDUIT_SMC:
+		invoke_psci_fn = __invoke_psci_fn_smc;
+		break;
+	default:
+		WARN(1, "Unexpected PSCI conduit %d\n", conduit);
+	}
+
+	psci_ops.conduit = conduit;
+}
+
 static int get_set_conduit_method(struct device_node *np)
 {
 	const char *method;
@@ -222,9 +241,9 @@
 	}
 
 	if (!strcmp("hvc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_hvc;
+		set_conduit(PSCI_CONDUIT_HVC);
 	} else if (!strcmp("smc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_smc;
+		set_conduit(PSCI_CONDUIT_SMC);
 	} else {
 		pr_warn("invalid \"method\" property: %s\n", method);
 		return -EINVAL;
@@ -495,6 +514,31 @@
 	pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
 }
 
+static void __init psci_init_smccc(void)
+{
+	u32 ver = ARM_SMCCC_VERSION_1_0;
+	int feature;
+
+	feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
+
+	if (feature != PSCI_RET_NOT_SUPPORTED) {
+		u32 ret;
+		ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
+		if (ret == ARM_SMCCC_VERSION_1_1) {
+			psci_ops.smccc_version = SMCCC_VERSION_1_1;
+			ver = ret;
+		}
+	}
+
+	/*
+	 * Conveniently, the SMCCC and PSCI versions are encoded the
+	 * same way. No, this isn't accidental.
+	 */
+	pr_info("SMC Calling Convention v%d.%d\n",
+		PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+
+}
+
 static void __init psci_0_2_set_functions(void)
 {
 	pr_info("Using standard PSCI v0.2 function IDs\n");
@@ -543,6 +587,7 @@
 	psci_init_migrate();
 
 	if (PSCI_VERSION_MAJOR(ver) >= 1) {
+		psci_init_smccc();
 		psci_init_cpu_suspend();
 		psci_init_system_suspend();
 	}
@@ -656,9 +701,9 @@
 	pr_info("probing for conduit method from ACPI.\n");
 
 	if (acpi_psci_use_hvc())
-		invoke_psci_fn = __invoke_psci_fn_hvc;
+		set_conduit(PSCI_CONDUIT_HVC);
 	else
-		invoke_psci_fn = __invoke_psci_fn_smc;
+		set_conduit(PSCI_CONDUIT_SMC);
 
 	return psci_probe();
 }
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 03a5925..a9daf71 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -256,7 +256,7 @@
 	if (set)
 		reg |= bit;
 	else
-		reg &= bit;
+		reg &= ~bit;
 	iowrite32(reg, addr);
 
 	spin_unlock_irqrestore(&gpio->lock, flags);
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 7c446d1..1d87b07 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -90,8 +90,18 @@
 {
 	int reg;
 
-	if (gpio == 94)
-		return GPIOPANELCTL;
+	if (gpio >= CRYSTALCOVE_GPIO_NUM) {
+		/*
+		 * Virtual GPIO called from ACPI, for now we only support
+		 * the panel ctl.
+		 */
+		switch (gpio) {
+		case 0x5e:
+			return GPIOPANELCTL;
+		default:
+			return -EOPNOTSUPP;
+		}
+	}
 
 	if (reg_type == CTRL_IN) {
 		if (gpio < 8)
@@ -130,36 +140,36 @@
 static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio)
 {
 	struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+	int reg = to_reg(gpio, CTRL_OUT);
 
-	if (gpio > CRYSTALCOVE_VGPIO_NUM)
+	if (reg < 0)
 		return 0;
 
-	return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
-			    CTLO_INPUT_SET);
+	return regmap_write(cg->regmap, reg, CTLO_INPUT_SET);
 }
 
 static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio,
 				    int value)
 {
 	struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+	int reg = to_reg(gpio, CTRL_OUT);
 
-	if (gpio > CRYSTALCOVE_VGPIO_NUM)
+	if (reg < 0)
 		return 0;
 
-	return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
-			    CTLO_OUTPUT_SET | value);
+	return regmap_write(cg->regmap, reg, CTLO_OUTPUT_SET | value);
 }
 
 static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio)
 {
 	struct crystalcove_gpio *cg = gpiochip_get_data(chip);
-	int ret;
 	unsigned int val;
+	int ret, reg = to_reg(gpio, CTRL_IN);
 
-	if (gpio > CRYSTALCOVE_VGPIO_NUM)
+	if (reg < 0)
 		return 0;
 
-	ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val);
+	ret = regmap_read(cg->regmap, reg, &val);
 	if (ret)
 		return ret;
 
@@ -170,14 +180,15 @@
 				 unsigned gpio, int value)
 {
 	struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+	int reg = to_reg(gpio, CTRL_OUT);
 
-	if (gpio > CRYSTALCOVE_VGPIO_NUM)
+	if (reg < 0)
 		return;
 
 	if (value)
-		regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1);
+		regmap_update_bits(cg->regmap, reg, 1, 1);
 	else
-		regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
+		regmap_update_bits(cg->regmap, reg, 1, 0);
 }
 
 static int crystalcove_irq_type(struct irq_data *data, unsigned type)
@@ -185,6 +196,9 @@
 	struct crystalcove_gpio *cg =
 		gpiochip_get_data(irq_data_get_irq_chip_data(data));
 
+	if (data->hwirq >= CRYSTALCOVE_GPIO_NUM)
+		return 0;
+
 	switch (type) {
 	case IRQ_TYPE_NONE:
 		cg->intcnt_value = CTLI_INTCNT_DIS;
@@ -235,8 +249,10 @@
 	struct crystalcove_gpio *cg =
 		gpiochip_get_data(irq_data_get_irq_chip_data(data));
 
-	cg->set_irq_mask = false;
-	cg->update |= UPDATE_IRQ_MASK;
+	if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
+		cg->set_irq_mask = false;
+		cg->update |= UPDATE_IRQ_MASK;
+	}
 }
 
 static void crystalcove_irq_mask(struct irq_data *data)
@@ -244,8 +260,10 @@
 	struct crystalcove_gpio *cg =
 		gpiochip_get_data(irq_data_get_irq_chip_data(data));
 
-	cg->set_irq_mask = true;
-	cg->update |= UPDATE_IRQ_MASK;
+	if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
+		cg->set_irq_mask = true;
+		cg->update |= UPDATE_IRQ_MASK;
+	}
 }
 
 static struct irq_chip crystalcove_irqchip = {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f3c3680..56b2419 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -425,7 +425,7 @@
 	struct gpiohandle_request handlereq;
 	struct linehandle_state *lh;
 	struct file *file;
-	int fd, i, ret;
+	int fd, i, count = 0, ret;
 
 	if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
 		return -EFAULT;
@@ -471,6 +471,7 @@
 		if (ret)
 			goto out_free_descs;
 		lh->descs[i] = desc;
+		count = i;
 
 		if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
 			set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -537,7 +538,7 @@
 out_put_unused_fd:
 	put_unused_fd(fd);
 out_free_descs:
-	for (; i >= 0; i--)
+	for (i = 0; i < count; i++)
 		gpiod_free(lh->descs[i]);
 	kfree(lh->label);
 out_free_lh:
@@ -794,7 +795,7 @@
 	desc = &gdev->descs[offset];
 	ret = gpiod_request(desc, le->label);
 	if (ret)
-		goto out_free_desc;
+		goto out_free_label;
 	le->desc = desc;
 	le->eflags = eflags;
 
@@ -3231,7 +3232,8 @@
 		return desc;
 	}
 
-	status = gpiod_request(desc, con_id);
+	/* If a connection label was passed use that, else use the device name as label */
+	status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
 	if (status < 0)
 		return ERR_PTR(status);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 0e8f897..0217f5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -569,6 +569,7 @@
 	{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+	{ 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0, 0, 0, 0, 0 },
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index c02db01f6..fe011c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -201,8 +201,10 @@
 	for (i = 0; i < list->num_entries; i++) {
 		unsigned priority = list->array[i].priority;
 
-		list_add_tail(&list->array[i].tv.head,
-			      &bucket[priority]);
+		if (!list->array[i].robj->parent)
+			list_add_tail(&list->array[i].tv.head,
+				      &bucket[priority]);
+
 		list->array[i].user_pages = NULL;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index cb505f6..c801624 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -519,7 +519,7 @@
 	INIT_LIST_HEAD(&duplicates);
 	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 
-	if (p->uf_entry.robj)
+	if (p->uf_entry.robj && !p->uf_entry.robj->parent)
 		list_add(&p->uf_entry.tv.head, &p->validated);
 
 	if (need_mmap_lock)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index a88d365..564362e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1484,10 +1484,11 @@
 static const u32 vgpr_init_regs[] =
 {
 	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
-	mmCOMPUTE_RESOURCE_LIMITS, 0,
+	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
 	mmCOMPUTE_NUM_THREAD_X, 256*4,
 	mmCOMPUTE_NUM_THREAD_Y, 1,
 	mmCOMPUTE_NUM_THREAD_Z, 1,
+	mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
 	mmCOMPUTE_PGM_RSRC2, 20,
 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1504,10 +1505,11 @@
 static const u32 sgpr1_init_regs[] =
 {
 	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
-	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
+	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
 	mmCOMPUTE_NUM_THREAD_X, 256*5,
 	mmCOMPUTE_NUM_THREAD_Y, 1,
 	mmCOMPUTE_NUM_THREAD_Z, 1,
+	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
 	mmCOMPUTE_PGM_RSRC2, 20,
 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1528,6 +1530,7 @@
 	mmCOMPUTE_NUM_THREAD_X, 256*5,
 	mmCOMPUTE_NUM_THREAD_Y, 1,
 	mmCOMPUTE_NUM_THREAD_Z, 1,
+	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
 	mmCOMPUTE_PGM_RSRC2, 20,
 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 002862b..3fa8320 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6449,9 +6449,9 @@
 {
 	u32 lane_width;
 	u32 new_lane_width =
-		(amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+		((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
 	u32 current_lane_width =
-		(amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+		((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
 
 	if (new_lane_width != current_lane_width) {
 		amdgpu_set_pcie_lanes(adev, new_lane_width);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index ef7c8de..171480b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -317,7 +317,8 @@
 
 	/* init process apertures*/
 	process->is_32bit_user_mode = in_compat_syscall();
-	if (kfd_init_apertures(process) != 0)
+	err = kfd_init_apertures(process);
+	if (err != 0)
 		goto err_init_apretures;
 
 	return process;
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index afec232..cfd80bc 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -53,7 +53,9 @@
 	}
 
 	drm_mode_connector_update_edid_property(connector, edid);
-	return drm_add_edid_modes(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	kfree(edid);
+	return ret;
 
 fallback:
 	/*
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index a7b2a75..cdb5358 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -322,19 +322,44 @@
 {
 	uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
 	ssize_t ret;
+	int retry;
 
 	if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
 		return 0;
 
-	ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
-				     &tmds_oen, sizeof(tmds_oen));
-	if (ret) {
-		DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
-			      enable ? "enable" : "disable");
-		return ret;
+	/*
+	 * LSPCON adapters in low-power state may ignore the first write, so
+	 * read back and verify the written value a few times.
+	 */
+	for (retry = 0; retry < 3; retry++) {
+		uint8_t tmp;
+
+		ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+					     &tmds_oen, sizeof(tmds_oen));
+		if (ret) {
+			DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
+				      enable ? "enable" : "disable",
+				      retry + 1);
+			return ret;
+		}
+
+		ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
+					    &tmp, sizeof(tmp));
+		if (ret) {
+			DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
+				      enable ? "enabling" : "disabling",
+				      retry + 1);
+			return ret;
+		}
+
+		if (tmp == tmds_oen)
+			return 0;
 	}
 
-	return 0;
+	DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
+		      enable ? "enabling" : "disabling");
+
+	return -EIO;
 }
 EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 603d842..699db13 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -926,7 +926,7 @@
 	struct drm_device *drm_dev = g2d->subdrv.drm_dev;
 	struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
 	struct drm_exynos_pending_g2d_event *e;
-	struct timeval now;
+	struct timespec64 now;
 
 	if (list_empty(&runqueue_node->event_list))
 		return;
@@ -934,9 +934,9 @@
 	e = list_first_entry(&runqueue_node->event_list,
 			     struct drm_exynos_pending_g2d_event, base.link);
 
-	do_gettimeofday(&now);
+	ktime_get_ts64(&now);
 	e->event.tv_sec = now.tv_sec;
-	e->event.tv_usec = now.tv_usec;
+	e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
 	e->event.cmdlist_no = cmdlist_no;
 
 	drm_send_event(drm_dev, &e->base);
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
index 3049613..d7cbe53 100644
--- a/drivers/gpu/drm/exynos/regs-fimc.h
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -569,7 +569,7 @@
 #define EXYNOS_CIIMGEFF_FIN_EMBOSSING		(4 << 26)
 #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE		(5 << 26)
 #define EXYNOS_CIIMGEFF_FIN_MASK			(7 << 26)
-#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK		((0xff < 13) | (0xff < 0))
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK		((0xff << 13) | (0xff << 0))
 
 /* Real input DMA size register */
 #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE	(1 << 31)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 36a665f..e23748c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3681,7 +3681,11 @@
 					    struct intel_display_error_state *error);
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
+				    u32 val, int timeout_us);
+#define sandybridge_pcode_write(dev_priv, mbox, val)	\
+	sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
+
 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
 		      u32 reply_mask, u32 reply, int timeout_base_ms);
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ce32303..c185625 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6012,8 +6012,8 @@
 
 	/* Inform power controller of upcoming frequency change */
 	mutex_lock(&dev_priv->rps.hw_lock);
-	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-				      0x80000000);
+	ret = sandybridge_pcode_write_timeout(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+					      0x80000000, 2000);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	if (ret) {
@@ -6044,8 +6044,9 @@
 	I915_WRITE(CDCLK_CTL, val);
 
 	mutex_lock(&dev_priv->rps.hw_lock);
-	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-				      DIV_ROUND_UP(cdclk, 25000));
+	ret = sandybridge_pcode_write_timeout(dev_priv,
+					      HSW_PCODE_DE_WRITE_FREQ_REQ,
+					      DIV_ROUND_UP(cdclk, 25000), 2000);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e1d47d5..3517c0e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -321,7 +321,8 @@
 
 	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
 	POSTING_READ(lvds_encoder->reg);
-	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
+
+	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
 		DRM_ERROR("timed out waiting for panel to power on\n");
 
 	intel_panel_enable_backlight(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 49de476..05427d2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7913,8 +7913,8 @@
 	return 0;
 }
 
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
-			    u32 mbox, u32 val)
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
+				    u32 mbox, u32 val, int timeout_us)
 {
 	int status;
 
@@ -7935,7 +7935,7 @@
 
 	if (intel_wait_for_register_fw(dev_priv,
 				       GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
-				       500)) {
+				       timeout_us)) {
 		DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
 		return -ETIMEDOUT;
 	}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 6be515a..8dbba61 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -189,7 +189,11 @@
 				  struct drm_crtc_state *old_crtc_state)
 {
 	drm_crtc_vblank_on(crtc);
+}
 
+static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_crtc_state)
+{
 	spin_lock_irq(&crtc->dev->event_lock);
 	if (crtc->state->event) {
 		WARN_ON(drm_crtc_vblank_get(crtc));
@@ -257,6 +261,7 @@
 	.mode_set_nofb = ipu_crtc_mode_set_nofb,
 	.atomic_check = ipu_crtc_atomic_check,
 	.atomic_begin = ipu_crtc_atomic_begin,
+	.atomic_flush = ipu_crtc_atomic_flush,
 	.atomic_disable = ipu_crtc_atomic_disable,
 	.enable = ipu_crtc_enable,
 };
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 5e73dfa..bfe98b5 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -772,8 +772,6 @@
 	catalog = dp_catalog_get_priv(ctrl);
 	io_data = catalog->io.dp_link;
 
-	misc_val = dp_read(catalog, io_data, DP_MISC1_MISC0);
-	misc_val |= cc;
 	misc_val |= (tb << 5);
 	misc_val |= BIT(0); /* Configure clock to synchronous mode */
 
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 3e5cdc6..d9839e7 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -1182,7 +1182,8 @@
 			dp->hdcp.ops->off(dp->hdcp.data);
 	}
 
-	if (dp->usbpd->hpd_high && dp->usbpd->alt_mode_cfg_done) {
+	if (dp->usbpd->hpd_high && !dp_display_is_sink_count_zero(dp) &&
+		dp->usbpd->alt_mode_cfg_done) {
 		if (dp->audio_supported)
 			dp->audio->off(dp->audio);
 
@@ -1235,7 +1236,8 @@
 	 * any notification from driver. Initialize post_open callback to notify
 	 * DP connection once framework restarts.
 	 */
-	if (dp->usbpd->hpd_high && dp->usbpd->alt_mode_cfg_done)
+	if (dp->usbpd->hpd_high && !dp_display_is_sink_count_zero(dp) &&
+		dp->usbpd->alt_mode_cfg_done)
 		dp_display->post_open = dp_display_post_open;
 
 	dp->power_on = false;
@@ -1486,6 +1488,7 @@
 	.driver = {
 		.name = "msm-dp-display",
 		.of_match_table = dp_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index d083e72..a457070 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -214,6 +214,7 @@
 	phy->ops.ulps_ops.is_lanes_in_ulps =
 		dsi_phy_hw_v3_0_is_lanes_in_ulps;
 	phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v3_0;
+	phy->ops.clamp_ctrl = dsi_phy_hw_v3_0_clamp_ctrl;
 	phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset;
 	phy->ops.toggle_resync_fifo = dsi_phy_hw_v3_0_toggle_resync_fifo;
 }
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 0e2db430..9a923aa 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -102,12 +102,14 @@
 bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
 int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
 		u32 *timing_val, u32 size);
+void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable);
 int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy);
 void dsi_phy_hw_v3_0_toggle_resync_fifo(struct dsi_phy_hw *phy);
 
 /* DSI controller common ops */
 u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
-void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries,
+			       u32 size);
 void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
 void dsi_ctrl_hw_cmn_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
 					     u32 ints);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
index d89760e..bdc60d2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
@@ -43,6 +43,13 @@
 	DSI_LINK_CLK_MAX,
 };
 
+enum dsi_link_clk_op_type {
+	DSI_LINK_CLK_SET_RATE = BIT(0),
+	DSI_LINK_CLK_PREPARE = BIT(1),
+	DSI_LINK_CLK_ENABLE = BIT(2),
+	DSI_LINK_CLK_START = BIT(0) | BIT(1) | BIT(2),
+};
+
 enum dsi_clk_type {
 	DSI_CORE_CLK = BIT(0),
 	DSI_LINK_CLK = BIT(1),
@@ -50,6 +57,12 @@
 	DSI_CLKS_MAX = BIT(2),
 };
 
+enum dsi_lclk_type {
+	DSI_LINK_NONE = 0,
+	DSI_LINK_LP_CLK = BIT(0),
+	DSI_LINK_HS_CLK = BIT(1),
+};
+
 struct dsi_clk_ctrl_info {
 	enum dsi_clk_type clk_type;
 	enum dsi_clk_state clk_state;
@@ -82,23 +95,29 @@
 };
 
 /**
- * struct dsi_link_clk_info - Link clock information for DSI hardware.
- * @byte_clk:        Handle to DSI byte clock.
- * @pixel_clk:       Handle to DSI pixel clock.
- * @esc_clk:         Handle to DSI escape clock.
+ * struct dsi_link_hs_clk_info - Set of high speed link clocks for DSI HW
+ * @byte_clk:        Handle to DSI byte_clk.
+ * @pixel_clk:       Handle to DSI pixel_clk.
  * @byte_intf_clk:   Handle to DSI byte intf. clock.
  */
-struct dsi_link_clk_info {
+struct dsi_link_hs_clk_info {
 	struct clk *byte_clk;
 	struct clk *pixel_clk;
-	struct clk *esc_clk;
 	struct clk *byte_intf_clk;
 };
 
 /**
+ * struct dsi_link_lp_clk_info - Set of low power link clocks for DSI HW.
+ * @esc_clk:         Handle to DSI escape clock.
+ */
+struct dsi_link_lp_clk_info {
+	struct clk *esc_clk;
+};
+
+/**
  * struct link_clk_freq - Clock frequency information for Link clocks
- * @byte_clk_rate:   Frequency of DSI byte clock in KHz.
- * @pixel_clk_rate:  Frequency of DSI pixel clock in KHz.
+ * @byte_clk_rate:   Frequency of DSI byte_clk in KHz.
+ * @pixel_clk_rate:  Frequency of DSI pixel_clk in KHz.
  * @esc_clk_rate:    Frequency of DSI escape clock in KHz.
  */
 struct link_clk_freq {
@@ -111,48 +130,56 @@
  * typedef *pre_clockoff_cb() - Callback before clock is turned off
  * @priv: private data pointer.
  * @clk_type: clock which is being turned off.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
  * @new_state: next state for the clock.
  *
  * @return: error code.
  */
 typedef int (*pre_clockoff_cb)(void *priv,
 			       enum dsi_clk_type clk_type,
+			       enum dsi_lclk_type l_type,
 			       enum dsi_clk_state new_state);
 
 /**
  * typedef *post_clockoff_cb() - Callback after clock is turned off
  * @priv: private data pointer.
  * @clk_type: clock which was turned off.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
  * @curr_state: current state for the clock.
  *
  * @return: error code.
  */
 typedef int (*post_clockoff_cb)(void *priv,
 				enum dsi_clk_type clk_type,
+				enum dsi_lclk_type l_type,
 				enum dsi_clk_state curr_state);
 
 /**
  * typedef *post_clockon_cb() - Callback after clock is turned on
  * @priv: private data pointer.
  * @clk_type: clock which was turned on.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
  * @curr_state: current state for the clock.
  *
  * @return: error code.
  */
 typedef int (*post_clockon_cb)(void *priv,
 			       enum dsi_clk_type clk_type,
+			       enum dsi_lclk_type l_type,
 			       enum dsi_clk_state curr_state);
 
 /**
  * typedef *pre_clockon_cb() - Callback before clock is turned on
  * @priv: private data pointer.
  * @clk_type: clock which is being turned on.
+ * @l_type: specifies if the clock is HS or LP type.Valid only for link clocks.
  * @new_state: next state for the clock.
  *
  * @return: error code.
  */
 typedef int (*pre_clockon_cb)(void *priv,
 			      enum dsi_clk_type clk_type,
+			      enum dsi_lclk_type l_type,
 			      enum dsi_clk_state new_state);
 
 
@@ -160,7 +187,8 @@
  * struct dsi_clk_info - clock information for DSI hardware.
  * @name:                    client name.
  * @c_clks[MAX_DSI_CTRL]     array of core clock configurations
- * @l_clks[MAX_DSI_CTRL]     array of link clock configurations
+ * @l_lp_clks[MAX_DSI_CTRL]  array of low power(esc) clock configurations
+ * @l_hs_clks[MAX_DSI_CTRL]  array of high speed clock configurations
  * @bus_handle[MAX_DSI_CTRL] array of bus handles
  * @ctrl_index[MAX_DSI_CTRL] array of DSI controller indexes mapped
  *                           to core and link clock configurations
@@ -175,7 +203,8 @@
 struct dsi_clk_info {
 	char name[MAX_STRING_LEN];
 	struct dsi_core_clk_info c_clks[MAX_DSI_CTRL];
-	struct dsi_link_clk_info l_clks[MAX_DSI_CTRL];
+	struct dsi_link_lp_clk_info l_lp_clks[MAX_DSI_CTRL];
+	struct dsi_link_hs_clk_info l_hs_clks[MAX_DSI_CTRL];
 	u32 bus_handle[MAX_DSI_CTRL];
 	u32 ctrl_index[MAX_DSI_CTRL];
 	pre_clockoff_cb pre_clkoff_cb;
@@ -189,8 +218,8 @@
 
 /**
  * struct dsi_clk_link_set - Pair of clock handles to describe link clocks
- * @byte_clk:     Handle to DSi byte clock.
- * @pixel_clk:    Handle to DSI pixel clock.
+ * @byte_clk:     Handle to DSi byte_clk.
+ * @pixel_clk:    Handle to DSI pixel_clk.
  */
 struct dsi_clk_link_set {
 	struct clk *byte_clk;
@@ -263,10 +292,10 @@
 
 
 /**
- * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
+ * dsi_clk_set_pixel_clk_rate() - set frequency for pixel_clk
  * @client:       DSI clock client pointer.
- * @pixel_clk: Pixel clock rate in Hz.
- * @index:      Index of the DSI controller.
+ * @pixel_clk:    Pixel_clk rate in Hz.
+ * @index:        Index of the DSI controller.
  * return: error code in case of failure or 0 for success.
  */
 int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 434d383..2e3828e 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -24,7 +24,8 @@
 };
 
 struct dsi_link_clks {
-	struct dsi_link_clk_info clks;
+	struct dsi_link_hs_clk_info hs_clks;
+	struct dsi_link_lp_clk_info lp_clks;
 	struct link_clk_freq freq;
 };
 
@@ -124,7 +125,7 @@
 	struct dsi_clk_mngr *mngr;
 
 	mngr = c->mngr;
-	rc = clk_set_rate(mngr->link_clks[index].clks.pixel_clk, pixel_clk);
+	rc = clk_set_rate(mngr->link_clks[index].hs_clks.pixel_clk, pixel_clk);
 	if (rc)
 		pr_err("failed to set clk rate for pixel clk, rc=%d\n", rc);
 	else
@@ -147,7 +148,7 @@
 	struct dsi_clk_mngr *mngr;
 
 	mngr = c->mngr;
-	rc = clk_set_rate(mngr->link_clks[index].clks.byte_clk, byte_clk);
+	rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_clk, byte_clk);
 	if (rc)
 		pr_err("failed to set clk rate for byte clk, rc=%d\n", rc);
 	else
@@ -285,38 +286,39 @@
 	return rc;
 }
 
-static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks, int index)
+static int dsi_link_hs_clk_set_rate(struct dsi_link_hs_clk_info *link_hs_clks,
+		int index)
 {
 	int rc = 0;
 	struct dsi_clk_mngr *mngr;
+	struct dsi_link_clks *l_clks;
 
 	if (index >= MAX_DSI_CTRL) {
 		pr_err("Invalid DSI ctrl index\n");
 		return -EINVAL;
 	}
 
+	l_clks = container_of(link_hs_clks, struct dsi_link_clks, hs_clks);
 	mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[index]);
-	if (mngr->is_cont_splash_enabled)
-		return 0;
+
 	/*
 	 * In an ideal world, cont_splash_enabled should not be required inside
 	 * the clock manager. But, in the current driver cont_splash_enabled
 	 * flag is set inside mdp driver and there is no interface event
 	 * associated with this flag setting.
 	 */
-	rc = clk_set_rate(l_clks->clks.esc_clk, l_clks->freq.esc_clk_rate);
-	if (rc) {
-		pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
-		goto error;
-	}
+	if (mngr->is_cont_splash_enabled)
+		return 0;
 
-	rc = clk_set_rate(l_clks->clks.byte_clk, l_clks->freq.byte_clk_rate);
+	rc = clk_set_rate(link_hs_clks->byte_clk,
+		l_clks->freq.byte_clk_rate);
 	if (rc) {
 		pr_err("clk_set_rate failed for byte_clk rc = %d\n", rc);
 		goto error;
 	}
 
-	rc = clk_set_rate(l_clks->clks.pixel_clk, l_clks->freq.pix_clk_rate);
+	rc = clk_set_rate(link_hs_clks->pixel_clk,
+		l_clks->freq.pix_clk_rate);
 	if (rc) {
 		pr_err("clk_set_rate failed for pixel_clk rc = %d\n", rc);
 		goto error;
@@ -327,8 +329,8 @@
 	 * For DPHY: byte_intf_clk_rate = byte_clk_rate / 2
 	 * todo: this needs to be revisited when support for CPHY is added
 	 */
-	if (l_clks->clks.byte_intf_clk) {
-		rc = clk_set_rate(l_clks->clks.byte_intf_clk,
+	if (link_hs_clks->byte_intf_clk) {
+		rc = clk_set_rate(link_hs_clks->byte_intf_clk,
 			(l_clks->freq.byte_clk_rate / 2));
 		if (rc) {
 			pr_err("set_rate failed for byte_intf_clk rc = %d\n",
@@ -340,30 +342,24 @@
 	return rc;
 }
 
-static int dsi_link_clk_prepare(struct dsi_link_clks *l_clks)
+static int dsi_link_hs_clk_prepare(struct dsi_link_hs_clk_info *link_hs_clks)
 {
 	int rc = 0;
 
-	rc = clk_prepare(l_clks->clks.esc_clk);
-	if (rc) {
-		pr_err("Failed to prepare dsi esc clk, rc=%d\n", rc);
-		goto esc_clk_err;
-	}
-
-	rc = clk_prepare(l_clks->clks.byte_clk);
+	rc = clk_prepare(link_hs_clks->byte_clk);
 	if (rc) {
 		pr_err("Failed to prepare dsi byte clk, rc=%d\n", rc);
 		goto byte_clk_err;
 	}
 
-	rc = clk_prepare(l_clks->clks.pixel_clk);
+	rc = clk_prepare(link_hs_clks->pixel_clk);
 	if (rc) {
 		pr_err("Failed to prepare dsi pixel clk, rc=%d\n", rc);
 		goto pixel_clk_err;
 	}
 
-	if (l_clks->clks.byte_intf_clk) {
-		rc = clk_prepare(l_clks->clks.byte_intf_clk);
+	if (link_hs_clks->byte_intf_clk) {
+		rc = clk_prepare(link_hs_clks->byte_intf_clk);
 		if (rc) {
 			pr_err("Failed to prepare dsi byte intf clk, rc=%d\n",
 				rc);
@@ -374,48 +370,39 @@
 	return rc;
 
 byte_intf_clk_err:
-	clk_unprepare(l_clks->clks.pixel_clk);
+	clk_unprepare(link_hs_clks->pixel_clk);
 pixel_clk_err:
-	clk_unprepare(l_clks->clks.byte_clk);
+	clk_unprepare(link_hs_clks->byte_clk);
 byte_clk_err:
-	clk_unprepare(l_clks->clks.esc_clk);
-esc_clk_err:
 	return rc;
 }
 
-static void dsi_link_clk_unprepare(struct dsi_link_clks *l_clks)
+static void dsi_link_hs_clk_unprepare(struct dsi_link_hs_clk_info *link_hs_clks)
 {
-	if (l_clks->clks.byte_intf_clk)
-		clk_unprepare(l_clks->clks.byte_intf_clk);
-	clk_unprepare(l_clks->clks.pixel_clk);
-	clk_unprepare(l_clks->clks.byte_clk);
-	clk_unprepare(l_clks->clks.esc_clk);
+	if (link_hs_clks->byte_intf_clk)
+		clk_unprepare(link_hs_clks->byte_intf_clk);
+	clk_unprepare(link_hs_clks->pixel_clk);
+	clk_unprepare(link_hs_clks->byte_clk);
 }
 
-static int dsi_link_clk_enable(struct dsi_link_clks *l_clks)
+static int dsi_link_hs_clk_enable(struct dsi_link_hs_clk_info *link_hs_clks)
 {
 	int rc = 0;
 
-	rc = clk_enable(l_clks->clks.esc_clk);
-	if (rc) {
-		pr_err("Failed to enable dsi esc clk, rc=%d\n", rc);
-		goto esc_clk_err;
-	}
-
-	rc = clk_enable(l_clks->clks.byte_clk);
+	rc = clk_enable(link_hs_clks->byte_clk);
 	if (rc) {
 		pr_err("Failed to enable dsi byte clk, rc=%d\n", rc);
 		goto byte_clk_err;
 	}
 
-	rc = clk_enable(l_clks->clks.pixel_clk);
+	rc = clk_enable(link_hs_clks->pixel_clk);
 	if (rc) {
 		pr_err("Failed to enable dsi pixel clk, rc=%d\n", rc);
 		goto pixel_clk_err;
 	}
 
-	if (l_clks->clks.byte_intf_clk) {
-		rc = clk_enable(l_clks->clks.byte_intf_clk);
+	if (link_hs_clks->byte_intf_clk) {
+		rc = clk_enable(link_hs_clks->byte_intf_clk);
 		if (rc) {
 			pr_err("Failed to enable dsi byte intf clk, rc=%d\n",
 				rc);
@@ -426,28 +413,26 @@
 	return rc;
 
 byte_intf_clk_err:
-	clk_disable(l_clks->clks.pixel_clk);
+	clk_disable(link_hs_clks->pixel_clk);
 pixel_clk_err:
-	clk_disable(l_clks->clks.byte_clk);
+	clk_disable(link_hs_clks->byte_clk);
 byte_clk_err:
-	clk_disable(l_clks->clks.esc_clk);
-esc_clk_err:
 	return rc;
 }
 
-static void dsi_link_clk_disable(struct dsi_link_clks *l_clks)
+static void dsi_link_hs_clk_disable(struct dsi_link_hs_clk_info *link_hs_clks)
 {
-	if (l_clks->clks.byte_intf_clk)
-		clk_disable(l_clks->clks.byte_intf_clk);
-	clk_disable(l_clks->clks.esc_clk);
-	clk_disable(l_clks->clks.pixel_clk);
-	clk_disable(l_clks->clks.byte_clk);
+	if (link_hs_clks->byte_intf_clk)
+		clk_disable(link_hs_clks->byte_intf_clk);
+	clk_disable(link_hs_clks->pixel_clk);
+	clk_disable(link_hs_clks->byte_clk);
 }
 
 /**
  * dsi_link_clk_start() - enable dsi link clocks
  */
-static int dsi_link_clk_start(struct dsi_link_clks *clks, int index)
+static int dsi_link_hs_clk_start(struct dsi_link_hs_clk_info *link_hs_clks,
+	enum dsi_link_clk_op_type op_type, int index)
 {
 	int rc = 0;
 
@@ -456,28 +441,34 @@
 		return -EINVAL;
 	}
 
-	rc = dsi_link_clk_set_rate(clks, index);
-	if (rc) {
-		pr_err("failed to set clk rates, rc = %d\n", rc);
-		goto error;
+	if (op_type & DSI_LINK_CLK_SET_RATE) {
+		rc = dsi_link_hs_clk_set_rate(link_hs_clks, index);
+		if (rc) {
+			pr_err("failed to set HS clk rates, rc = %d\n", rc);
+			goto error;
+		}
 	}
 
-	rc = dsi_link_clk_prepare(clks);
-	if (rc) {
-		pr_err("failed to prepare link clks, rc = %d\n", rc);
-		goto error;
+	if (op_type & DSI_LINK_CLK_PREPARE) {
+		rc = dsi_link_hs_clk_prepare(link_hs_clks);
+		if (rc) {
+			pr_err("failed to prepare link HS clks, rc = %d\n", rc);
+			goto error;
+		}
 	}
 
-	rc = dsi_link_clk_enable(clks);
-	if (rc) {
-		pr_err("failed to enable link clks, rc = %d\n", rc);
-		goto error_unprepare;
+	if (op_type & DSI_LINK_CLK_ENABLE) {
+		rc = dsi_link_hs_clk_enable(link_hs_clks);
+		if (rc) {
+			pr_err("failed to enable link HS clks, rc = %d\n", rc);
+			goto error_unprepare;
+		}
 	}
 
-	pr_debug("Link clocks are enabled\n");
+	pr_debug("HS Link clocks are enabled\n");
 	return rc;
 error_unprepare:
-	dsi_link_clk_unprepare(clks);
+	dsi_link_hs_clk_unprepare(link_hs_clks);
 error:
 	return rc;
 }
@@ -485,13 +476,69 @@
 /**
  * dsi_link_clk_stop() - Stop DSI link clocks.
  */
-int dsi_link_clk_stop(struct dsi_link_clks *clks)
+static int dsi_link_hs_clk_stop(struct dsi_link_hs_clk_info *link_hs_clks)
 {
-	dsi_link_clk_disable(clks);
-	dsi_link_clk_unprepare(clks);
+	struct dsi_link_clks *l_clks;
 
-	pr_debug("Link clocks disabled\n");
+	l_clks = container_of(link_hs_clks, struct dsi_link_clks, hs_clks);
 
+	dsi_link_hs_clk_disable(link_hs_clks);
+	dsi_link_hs_clk_unprepare(link_hs_clks);
+
+	pr_debug("HS Link clocks disabled\n");
+
+	return 0;
+}
+
+static int dsi_link_lp_clk_start(struct dsi_link_lp_clk_info *link_lp_clks)
+{
+	int rc = 0;
+	struct dsi_clk_mngr *mngr;
+	struct dsi_link_clks *l_clks;
+
+	l_clks = container_of(link_lp_clks, struct dsi_link_clks, lp_clks);
+
+	mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[0]);
+	if (!mngr)
+		return -EINVAL;
+	/*
+	 * In an ideal world, cont_splash_enabled should not be required inside
+	 * the clock manager. But, in the current driver cont_splash_enabled
+	 * flag is set inside mdp driver and there is no interface event
+	 * associated with this flag setting. Also, set rate for clock need not
+	 * be called for every enable call. It should be done only once when
+	 * coming out of suspend.
+	 */
+	if (mngr->is_cont_splash_enabled)
+		goto prepare;
+
+	rc = clk_set_rate(link_lp_clks->esc_clk, l_clks->freq.esc_clk_rate);
+	if (rc) {
+		pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
+		goto error;
+	}
+
+prepare:
+	rc = clk_prepare_enable(link_lp_clks->esc_clk);
+	if (rc) {
+		pr_err("Failed to enable dsi esc clk\n");
+		clk_unprepare(l_clks->lp_clks.esc_clk);
+	}
+error:
+	pr_debug("LP Link clocks are enabled\n");
+	return rc;
+}
+
+static int dsi_link_lp_clk_stop(
+	struct dsi_link_lp_clk_info *link_lp_clks)
+{
+	struct dsi_link_clks *l_clks;
+
+	l_clks = container_of(link_lp_clks, struct dsi_link_clks, lp_clks);
+
+	clk_disable_unprepare(l_clks->lp_clks.esc_clk);
+
+	pr_debug("LP Link clocks are disabled\n");
 	return 0;
 }
 
@@ -556,7 +603,7 @@
 }
 
 static int dsi_display_link_clk_enable(struct dsi_link_clks *clks,
-	u32 ctrl_count, u32 master_ndx)
+	enum dsi_lclk_type l_type, u32 ctrl_count, u32 master_ndx)
 {
 	int rc = 0;
 	int i;
@@ -570,27 +617,56 @@
 
 	m_clks = &clks[master_ndx];
 
-	rc = dsi_link_clk_start(m_clks, master_ndx);
-	if (rc) {
-		pr_err("failed to turn on master clocks, rc=%d\n", rc);
-		goto error;
+	if (l_type & DSI_LINK_LP_CLK) {
+		rc = dsi_link_lp_clk_start(&m_clks->lp_clks);
+		if (rc) {
+			pr_err("failed to turn on master lp link clocks, rc=%d\n",
+				rc);
+			goto error;
+		}
 	}
 
-	/* Turn on rest of the core clocks */
+	if (l_type & DSI_LINK_HS_CLK) {
+		rc = dsi_link_hs_clk_start(&m_clks->hs_clks,
+			DSI_LINK_CLK_START, master_ndx);
+		if (rc) {
+			pr_err("failed to turn on master hs link clocks, rc=%d\n",
+				rc);
+			goto error;
+		}
+	}
+
 	for (i = 0; i < ctrl_count; i++) {
 		clk = &clks[i];
 		if (!clk || (clk == m_clks))
 			continue;
 
-		rc = dsi_link_clk_start(clk, i);
-		if (rc) {
-			pr_err("failed to turn on clocks, rc=%d\n", rc);
-			goto error_disable_master;
+		if (l_type & DSI_LINK_LP_CLK) {
+			rc = dsi_link_lp_clk_start(&clk->lp_clks);
+			if (rc) {
+				pr_err("failed to turn on lp link clocks, rc=%d\n",
+					rc);
+				goto error_disable_master;
+			}
+		}
+
+		if (l_type & DSI_LINK_HS_CLK) {
+			rc = dsi_link_hs_clk_start(&clk->hs_clks,
+				DSI_LINK_CLK_START, i);
+			if (rc) {
+				pr_err("failed to turn on hs link clocks, rc=%d\n",
+					rc);
+				goto error_disable_master;
+			}
 		}
 	}
 	return rc;
+
 error_disable_master:
-	(void)dsi_link_clk_stop(m_clks);
+	if (l_type == DSI_LINK_LP_CLK)
+		(void)dsi_link_lp_clk_stop(&m_clks->lp_clks);
+	else if (l_type == DSI_LINK_HS_CLK)
+		(void)dsi_link_hs_clk_stop(&m_clks->hs_clks);
 error:
 	return rc;
 }
@@ -646,7 +722,7 @@
 }
 
 static int dsi_display_link_clk_disable(struct dsi_link_clks *clks,
-	u32 ctrl_count, u32 master_ndx)
+	enum dsi_lclk_type l_type, u32 ctrl_count, u32 master_ndx)
 {
 	int rc = 0;
 	int i;
@@ -667,15 +743,100 @@
 		if (!clk || (clk == m_clks))
 			continue;
 
-		rc = dsi_link_clk_stop(clk);
-		if (rc)
-			pr_err("failed to turn off clocks, rc=%d\n", rc);
+		if (l_type & DSI_LINK_LP_CLK) {
+			rc = dsi_link_lp_clk_stop(&clk->lp_clks);
+			if (rc)
+				pr_err("failed to turn off lp link clocks, rc=%d\n",
+					rc);
+		}
+
+		if (l_type & DSI_LINK_HS_CLK) {
+			rc = dsi_link_hs_clk_stop(&clk->hs_clks);
+			if (rc)
+				pr_err("failed to turn off hs link clocks, rc=%d\n",
+					rc);
+		}
 	}
 
-	rc = dsi_link_clk_stop(m_clks);
-	if (rc)
-		pr_err("failed to turn off master clocks, rc=%d\n", rc);
+	if (l_type & DSI_LINK_LP_CLK) {
+		rc = dsi_link_lp_clk_stop(&m_clks->lp_clks);
+		if (rc)
+			pr_err("failed to turn off master lp link clocks, rc=%d\n",
+				rc);
+	}
 
+	if (l_type & DSI_LINK_HS_CLK) {
+		rc = dsi_link_hs_clk_stop(&m_clks->hs_clks);
+		if (rc)
+			pr_err("failed to turn off master hs link clocks, rc=%d\n",
+				rc);
+	}
+
+	return rc;
+}
+
+static int dsi_clk_update_link_clk_state(struct dsi_link_clks *l_clks,
+	enum dsi_lclk_type l_type, u32 l_state, bool enable)
+{
+	int rc = 0;
+	struct dsi_clk_mngr *mngr;
+
+	mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[0]);
+	if (!mngr)
+		return -EINVAL;
+
+	if (enable) {
+		if (mngr->pre_clkon_cb) {
+			rc = mngr->pre_clkon_cb(mngr->priv_data, DSI_LINK_CLK,
+				l_type, l_state);
+			if (rc) {
+				pr_err("pre link clk on cb failed for type %d\n",
+					l_type);
+				goto error;
+			}
+		}
+		rc = dsi_display_link_clk_enable(l_clks, l_type,
+				mngr->dsi_ctrl_count, mngr->master_ndx);
+		if (rc) {
+			pr_err("failed to start link clk type %d rc=%d\n",
+				l_type, rc);
+			goto error;
+		}
+
+		if (mngr->post_clkon_cb) {
+			rc = mngr->post_clkon_cb(mngr->priv_data, DSI_LINK_CLK,
+				l_type, l_state);
+			if (rc) {
+				pr_err("post link clk on cb failed for type %d\n",
+					l_type);
+				goto error;
+			}
+		}
+	} else {
+		if (mngr->pre_clkoff_cb) {
+			rc = mngr->pre_clkoff_cb(mngr->priv_data,
+				DSI_LINK_CLK, l_type, l_state);
+			if (rc)
+				pr_err("pre link clk off cb failed\n");
+		}
+
+		rc = dsi_display_link_clk_disable(l_clks, l_type,
+			mngr->dsi_ctrl_count, mngr->master_ndx);
+		if (rc) {
+			pr_err("failed to stop link clk type %d, rc = %d\n",
+			       l_type, rc);
+			goto error;
+		}
+
+		if (mngr->post_clkoff_cb) {
+			rc = mngr->post_clkoff_cb(mngr->priv_data,
+				DSI_LINK_CLK, l_type, l_state);
+			if (rc)
+				pr_err("post link clk off cb failed\n");
+		}
+	}
+
+error:
 	return rc;
 }
 
@@ -710,6 +871,7 @@
 		if (mngr->core_clk_state == DSI_CLK_OFF) {
 			rc = mngr->pre_clkon_cb(mngr->priv_data,
 						DSI_CORE_CLK,
+						DSI_LINK_NONE,
 						DSI_CLK_ON);
 			if (rc) {
 				pr_err("failed to turn on MDP FS rc= %d\n", rc);
@@ -726,6 +888,7 @@
 		if (mngr->post_clkon_cb) {
 			rc = mngr->post_clkon_cb(mngr->priv_data,
 						 DSI_CORE_CLK,
+						 DSI_LINK_NONE,
 						 DSI_CLK_ON);
 			if (rc)
 				pr_err("post clk on cb failed, rc = %d\n", rc);
@@ -735,25 +898,15 @@
 
 	if (l_clks) {
 		if (l_state == DSI_CLK_ON) {
-			if (mngr->pre_clkon_cb) {
-				rc = mngr->pre_clkon_cb(mngr->priv_data,
-					DSI_LINK_CLK, l_state);
-				if (rc)
-					pr_err("pre link clk on cb failed\n");
-			}
-			rc = dsi_display_link_clk_enable(l_clks,
-					mngr->dsi_ctrl_count, mngr->master_ndx);
-			if (rc) {
-				pr_err("failed to start link clk rc= %d\n", rc);
+			rc = dsi_clk_update_link_clk_state(l_clks,
+				DSI_LINK_LP_CLK, l_state, true);
+			if (rc)
 				goto error;
-			}
-			if (mngr->post_clkon_cb) {
-				rc = mngr->post_clkon_cb(mngr->priv_data,
-							DSI_LINK_CLK,
-							l_state);
-				if (rc)
-					pr_err("post link clk on cb failed\n");
-			}
+
+			rc = dsi_clk_update_link_clk_state(l_clks,
+				DSI_LINK_HS_CLK, l_state, true);
+			if (rc)
+				goto error;
 		} else {
 			/*
 			 * Two conditions that need to be checked for Link
@@ -784,36 +937,26 @@
 				}
 
 				rc = dsi_display_link_clk_enable(l_clks,
+					(DSI_LINK_LP_CLK & DSI_LINK_HS_CLK),
 					mngr->dsi_ctrl_count, mngr->master_ndx);
 				if (rc) {
-					pr_err("Link clks did not start\n");
+					pr_err("LP Link clks did not start\n");
 					goto error;
 				}
 				l_c_on = true;
 				pr_debug("ECG: core and Link_on\n");
 			}
 
-			if (mngr->pre_clkoff_cb) {
-				rc = mngr->pre_clkoff_cb(mngr->priv_data,
-					DSI_LINK_CLK, l_state);
-				if (rc)
-					pr_err("pre link clk off cb failed\n");
-			}
-
-			rc = dsi_display_link_clk_disable(l_clks,
-				mngr->dsi_ctrl_count, mngr->master_ndx);
-			if (rc) {
-				pr_err("failed to stop link clk, rc = %d\n",
-				       rc);
+			rc = dsi_clk_update_link_clk_state(l_clks,
+				DSI_LINK_HS_CLK, l_state, false);
+			if (rc)
 				goto error;
-			}
 
-			if (mngr->post_clkoff_cb) {
-				rc = mngr->post_clkoff_cb(mngr->priv_data,
-					DSI_LINK_CLK, l_state);
-				if (rc)
-					pr_err("post link clk off cb failed\n");
-			}
+			rc = dsi_clk_update_link_clk_state(l_clks,
+				DSI_LINK_LP_CLK, l_state, false);
+			if (rc)
+				goto error;
+
 			/*
 			 * This check is to save unnecessary clock state
 			 * change when going from EARLY_GATE to OFF. In the
@@ -872,6 +1015,7 @@
 		if (mngr->pre_clkoff_cb) {
 			rc = mngr->pre_clkoff_cb(mngr->priv_data,
 						 DSI_CORE_CLK,
+						 DSI_LINK_NONE,
 						 c_state);
 			if (rc)
 				pr_err("pre core clk off cb failed\n");
@@ -888,6 +1032,7 @@
 			if (mngr->post_clkoff_cb) {
 				rc = mngr->post_clkoff_cb(mngr->priv_data,
 						DSI_CORE_CLK,
+						DSI_LINK_NONE,
 						DSI_CLK_OFF);
 				if (rc)
 					pr_err("post clkoff cb fail, rc = %d\n",
@@ -1095,7 +1240,8 @@
 	}
 
 	rc = dsi_display_link_clk_disable(l_clks,
-		mngr->dsi_ctrl_count, mngr->master_ndx);
+			(DSI_LINK_LP_CLK | DSI_LINK_HS_CLK),
+			mngr->dsi_ctrl_count, mngr->master_ndx);
 	if (rc) {
 		pr_err("%s, failed to stop link clk, rc = %d\n",
 			__func__, rc);
@@ -1103,7 +1249,8 @@
 	}
 
 	rc = dsi_display_link_clk_enable(l_clks,
-		mngr->dsi_ctrl_count, mngr->master_ndx);
+			(DSI_LINK_LP_CLK | DSI_LINK_HS_CLK),
+			mngr->dsi_ctrl_count, mngr->master_ndx);
 	if (rc) {
 		pr_err("%s, failed to start link clk rc= %d\n",
 			__func__, rc);
@@ -1267,8 +1414,10 @@
 	for (i = 0; i < mngr->dsi_ctrl_count; i++) {
 		memcpy(&mngr->core_clks[i].clks, &info->c_clks[i],
 			sizeof(struct dsi_core_clk_info));
-		memcpy(&mngr->link_clks[i].clks, &info->l_clks[i],
-			sizeof(struct dsi_link_clk_info));
+		memcpy(&mngr->link_clks[i].hs_clks, &info->l_hs_clks[i],
+			sizeof(struct dsi_link_hs_clk_info));
+		memcpy(&mngr->link_clks[i].lp_clks, &info->l_lp_clks[i],
+			sizeof(struct dsi_link_lp_clk_info));
 		mngr->core_clks[i].bus_handle = info->bus_handle[i];
 		mngr->ctrl_index[i] = info->ctrl_index[i];
 	}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index c7c640f5..a4af58a 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -243,6 +243,8 @@
 						dsi_ctrl->cell_index);
 	sde_dbg_reg_register_base(dbg_name, dsi_ctrl->hw.base,
 				msm_iomap_size(dsi_ctrl->pdev, "dsi_ctrl"));
+	sde_dbg_reg_register_dump_range(dbg_name, dbg_name, 0,
+				msm_iomap_size(dsi_ctrl->pdev, "dsi_ctrl"), 0);
 error_remove_dir:
 	debugfs_remove(dir);
 error:
@@ -272,6 +274,8 @@
 	int rc = 0;
 	struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
 
+	SDE_EVT32(dsi_ctrl->cell_index, op);
+
 	switch (op) {
 	case DSI_CTRL_OP_POWER_STATE_CHANGE:
 		if (state->power_state == op_state) {
@@ -461,7 +465,7 @@
 	}
 
 	ctrl->hw.base = ptr;
-	pr_debug("[%s] map dsi_ctrl registers to %p\n", ctrl->name,
+	pr_debug("[%s] map dsi_ctrl registers to %pK\n", ctrl->name,
 		 ctrl->hw.base);
 
 	switch (ctrl->version) {
@@ -498,7 +502,8 @@
 static int dsi_ctrl_clocks_deinit(struct dsi_ctrl *ctrl)
 {
 	struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
-	struct dsi_link_clk_info *link = &ctrl->clk_info.link_clks;
+	struct dsi_link_lp_clk_info *lp_link = &ctrl->clk_info.lp_link_clks;
+	struct dsi_link_hs_clk_info *hs_link = &ctrl->clk_info.hs_link_clks;
 	struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
 
 	if (core->mdp_core_clk)
@@ -514,16 +519,17 @@
 
 	memset(core, 0x0, sizeof(*core));
 
-	if (link->byte_clk)
-		devm_clk_put(&ctrl->pdev->dev, link->byte_clk);
-	if (link->pixel_clk)
-		devm_clk_put(&ctrl->pdev->dev, link->pixel_clk);
-	if (link->esc_clk)
-		devm_clk_put(&ctrl->pdev->dev, link->esc_clk);
-	if (link->byte_intf_clk)
-		devm_clk_put(&ctrl->pdev->dev, link->byte_intf_clk);
+	if (hs_link->byte_clk)
+		devm_clk_put(&ctrl->pdev->dev, hs_link->byte_clk);
+	if (hs_link->pixel_clk)
+		devm_clk_put(&ctrl->pdev->dev, hs_link->pixel_clk);
+	if (lp_link->esc_clk)
+		devm_clk_put(&ctrl->pdev->dev, lp_link->esc_clk);
+	if (hs_link->byte_intf_clk)
+		devm_clk_put(&ctrl->pdev->dev, hs_link->byte_intf_clk);
 
-	memset(link, 0x0, sizeof(*link));
+	memset(hs_link, 0x0, sizeof(*hs_link));
+	memset(lp_link, 0x0, sizeof(*lp_link));
 
 	if (rcg->byte_clk)
 		devm_clk_put(&ctrl->pdev->dev, rcg->byte_clk);
@@ -540,7 +546,8 @@
 {
 	int rc = 0;
 	struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
-	struct dsi_link_clk_info *link = &ctrl->clk_info.link_clks;
+	struct dsi_link_lp_clk_info *lp_link = &ctrl->clk_info.lp_link_clks;
+	struct dsi_link_hs_clk_info *hs_link = &ctrl->clk_info.hs_link_clks;
 	struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
 
 	core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
@@ -573,30 +580,30 @@
 		pr_debug("can't get mnoc clock, rc=%d\n", rc);
 	}
 
-	link->byte_clk = devm_clk_get(&pdev->dev, "byte_clk");
-	if (IS_ERR(link->byte_clk)) {
-		rc = PTR_ERR(link->byte_clk);
+	hs_link->byte_clk = devm_clk_get(&pdev->dev, "byte_clk");
+	if (IS_ERR(hs_link->byte_clk)) {
+		rc = PTR_ERR(hs_link->byte_clk);
 		pr_err("failed to get byte_clk, rc=%d\n", rc);
 		goto fail;
 	}
 
-	link->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk");
-	if (IS_ERR(link->pixel_clk)) {
-		rc = PTR_ERR(link->pixel_clk);
+	hs_link->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk");
+	if (IS_ERR(hs_link->pixel_clk)) {
+		rc = PTR_ERR(hs_link->pixel_clk);
 		pr_err("failed to get pixel_clk, rc=%d\n", rc);
 		goto fail;
 	}
 
-	link->esc_clk = devm_clk_get(&pdev->dev, "esc_clk");
-	if (IS_ERR(link->esc_clk)) {
-		rc = PTR_ERR(link->esc_clk);
+	lp_link->esc_clk = devm_clk_get(&pdev->dev, "esc_clk");
+	if (IS_ERR(lp_link->esc_clk)) {
+		rc = PTR_ERR(lp_link->esc_clk);
 		pr_err("failed to get esc_clk, rc=%d\n", rc);
 		goto fail;
 	}
 
-	link->byte_intf_clk = devm_clk_get(&pdev->dev, "byte_intf_clk");
-	if (IS_ERR(link->byte_intf_clk)) {
-		link->byte_intf_clk = NULL;
+	hs_link->byte_intf_clk = devm_clk_get(&pdev->dev, "byte_intf_clk");
+	if (IS_ERR(hs_link->byte_intf_clk)) {
+		hs_link->byte_intf_clk = NULL;
 		pr_debug("can't find byte intf clk, rc=%d\n", rc);
 	}
 
@@ -1265,6 +1272,7 @@
 		.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
 		.tx_len = 2,
 		.tx_buf = tx,
+		.flags = rx_msg->flags,
 	};
 
 	rc = dsi_message_tx(dsi_ctrl, &msg, flags);
@@ -1335,13 +1343,20 @@
 	u32 current_read_len = 0, total_bytes_read = 0;
 	bool short_resp = false;
 	bool read_done = false;
-	u32 dlen, diff, rlen = msg->rx_len;
+	u32 dlen, diff, rlen;
 	unsigned char *buff;
 	char cmd;
 	struct dsi_cmd_desc *of_cmd;
 
+	if (!msg) {
+		pr_err("Invalid msg\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
 	of_cmd = container_of(msg, struct dsi_cmd_desc, msg);
 
+	rlen = msg->rx_len;
 	if (msg->rx_len <= 2) {
 		short_resp = true;
 		rd_pkt_size = msg->rx_len;
@@ -1378,9 +1393,9 @@
 		 * wait before reading rdbk_data register, if any delay is
 		 * required after sending the read command.
 		 */
-		if (of_cmd && of_cmd->post_wait_ms)
-			usleep_range(of_cmd->post_wait_ms * 1000,
-				     ((of_cmd->post_wait_ms * 1000) + 10));
+		if (msg->wait_ms)
+			usleep_range(msg->wait_ms * 1000,
+				     ((msg->wait_ms * 1000) + 10));
 
 		dlen = dsi_ctrl->hw.ops.get_cmd_read_data(&dsi_ctrl->hw,
 					buff, total_bytes_read,
@@ -1783,16 +1798,20 @@
 	.driver = {
 		.name = "drm_dsi_ctrl",
 		.of_match_table = msm_dsi_of_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
 #if defined(CONFIG_DEBUG_FS)
 
-void dsi_ctrl_debug_dump(void)
+void dsi_ctrl_debug_dump(u32 *entries, u32 size)
 {
 	struct list_head *pos, *tmp;
 	struct dsi_ctrl *ctrl = NULL;
 
+	if (!entries || !size)
+		return;
+
 	mutex_lock(&dsi_ctrl_list_lock);
 	list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
 		struct dsi_ctrl_list_item *n;
@@ -1800,7 +1819,7 @@
 		n = list_entry(pos, struct dsi_ctrl_list_item, list);
 		ctrl = n->ctrl;
 		pr_err("dsi ctrl:%d\n", ctrl->cell_index);
-		ctrl->hw.ops.debug_bus(&ctrl->hw);
+		ctrl->hw.ops.debug_bus(&ctrl->hw, entries, size);
 	}
 	mutex_unlock(&dsi_ctrl_list_lock);
 }
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index e08ef99..6ac7dd7 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -60,6 +60,18 @@
 #define DSI_CTRL_MAX_CMD_FIFO_STORE_SIZE 64
 
 /**
+ * enum dsi_channel_id - defines dsi channel id.
+ * @DSI_CTRL_LEFT:    DSI 0 channel
+ * @DSI_CTRL_RIGHT:   DSI 1 channel
+ * @DSI_CTRL_MAX:  Maximum value.
+ */
+enum dsi_channel_id {
+	DSI_CTRL_LEFT = 0,
+	DSI_CTRL_RIGHT,
+	DSI_CTRL_MAX,
+};
+
+/**
  * enum dsi_power_state - defines power states for dsi controller.
  * @DSI_CTRL_POWER_VREG_OFF:    Digital and analog supplies for DSI controller
 				turned off
@@ -100,7 +112,8 @@
 /**
  * struct dsi_ctrl_clk_info - clock information for DSI controller
  * @core_clks:          Core clocks needed to access DSI controller registers.
- * @link_clks:          Link clocks required to transmit data over DSI link.
+ * @hs_link_clks:       Clocks required to transmit high speed data over DSI
+ * @lp_link_clks:       Clocks required to perform low power ops over DSI
  * @rcg_clks:           Root clock generation clocks generated in MMSS_CC. The
  *			output of the PLL is set as parent for these root
  *			clocks. These clocks are specific to controller
@@ -114,7 +127,8 @@
 struct dsi_ctrl_clk_info {
 	/* Clocks parsed from DT */
 	struct dsi_core_clk_info core_clks;
-	struct dsi_link_clk_info link_clks;
+	struct dsi_link_hs_clk_info hs_link_clks;
+	struct dsi_link_lp_clk_info lp_link_clks;
 	struct dsi_clk_link_set rcg_clks;
 
 	/* Clocks set by DSI Manager */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index d6fab59..73aab3f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -457,8 +457,10 @@
 	/**
 	 * debug_bus() - get dsi debug bus status.
 	 * @ctrl:        Pointer to the controller host hardware.
+	 * @entries:     Array of dsi debug bus control values.
+	 * @size:        Size of dsi debug bus control array.
 	 */
-	void (*debug_bus)(struct dsi_ctrl_hw *ctrl);
+	void (*debug_bus)(struct dsi_ctrl_hw *ctrl, u32 *entries, u32 size);
 
 	/**
 	 * soft_reset() - perform a soft reset on DSI controller
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index c1af52f..6dde454 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -459,18 +459,20 @@
 	pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
 }
 
-void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl)
+void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries, u32 size)
 {
-	u32 reg = 0;
+	u32 reg = 0, i = 0;
 
-	DSI_W32(ctrl, DSI_DEBUG_BUS_CTL, 0x181);
-
-	/* make sure that debug test point is enabled */
-	wmb();
-	reg = DSI_R32(ctrl, DSI_DEBUG_BUS_STATUS);
-
-	pr_err("[DSI_%d] debug bus status:0x%x\n", ctrl->index, reg);
+	for (i = 0; i < size; i++) {
+		DSI_W32(ctrl, DSI_DEBUG_BUS_CTL, entries[i]);
+		/* make sure that debug test point is enabled */
+		wmb();
+		reg = DSI_R32(ctrl, DSI_DEBUG_BUS_STATUS);
+		pr_err("[DSI_%d] debug bus ctrl: 0x%x status:0x%x\n",
+				ctrl->index, entries[i], reg);
+	}
 }
+
 /**
  * cmd_engine_setup() - setup dsi host controller for command mode
  * @ctrl:          Pointer to the controller host hardware.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 5c34b8e..9a75de6 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -48,13 +48,16 @@
 static char dsi_display_primary[MAX_CMDLINE_PARAM_LEN];
 static char dsi_display_secondary[MAX_CMDLINE_PARAM_LEN];
 static struct dsi_display_boot_param boot_displays[MAX_DSI_ACTIVE_DISPLAY];
-static struct device_node *default_active_node;
+static struct device_node *primary_active_node;
+static struct device_node *secondary_active_node;
+
 static const struct of_device_id dsi_display_dt_match[] = {
 	{.compatible = "qcom,dsi-display"},
 	{}
 };
 
-static struct dsi_display *main_display;
+static struct dsi_display *primary_display;
+static struct dsi_display *secondary_display;
 
 static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display)
 {
@@ -568,6 +571,8 @@
 
 	for (i = 0; i < count; ++i) {
 		memset(config->status_buf, 0x0, SZ_4K);
+		if (config->status_cmd.state == DSI_CMD_SET_STATE_LP)
+			cmds[i].msg.flags |= MIPI_DSI_MSG_USE_LPM;
 		if (cmds[i].last_command) {
 			cmds[i].msg.flags |= MIPI_DSI_MSG_LASTCOMMAND;
 			flags |= DSI_CTRL_CMD_LAST_COMMAND;
@@ -614,12 +619,20 @@
 
 static int dsi_display_status_reg_read(struct dsi_display *display)
 {
-	int rc = 0, i;
+	int rc = 0, i, cmd_channel_idx = DSI_CTRL_LEFT;
 	struct dsi_display_ctrl *m_ctrl, *ctrl;
 
 	pr_debug(" ++\n");
 
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
+	/*
+	 * Check the Panel DSI command channel.
+	 * If the cmd_channel is set, then we should
+	 * choose the right DSI(DSI1) controller to send command,
+	 * else we choose the left(DSI0) controller.
+	 */
+	if (display->panel->esd_config.cmd_channel)
+		cmd_channel_idx = DSI_CTRL_RIGHT;
+	m_ctrl = &display->ctrl[cmd_channel_idx];
 
 	if (display->tx_cmd_buf == NULL) {
 		rc = dsi_host_alloc_cmd_tx_buffer(display);
@@ -715,6 +728,7 @@
 		dsi_panel_release_panel_lock(panel);
 		return rc;
 	}
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 
 	if (te_check_override && gpio_is_valid(dsi_display->disp_te_gpio))
 		status_mode = ESD_MODE_PANEL_TE;
@@ -738,6 +752,7 @@
 	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
 		DSI_ALL_CLKS, DSI_CLK_OFF);
 	dsi_panel_release_panel_lock(panel);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 
 	return rc;
 }
@@ -753,7 +768,7 @@
 	cmd->msg.channel = cmd_buf[2];
 	cmd->msg.flags = cmd_buf[3];
 	cmd->msg.ctrl = 0;
-	cmd->post_wait_ms = cmd_buf[4];
+	cmd->post_wait_ms = cmd->msg.wait_ms = cmd_buf[4];
 	cmd->msg.tx_len = ((cmd_buf[5] << 8) | (cmd_buf[6]));
 
 	if (cmd->msg.tx_len > payload_len) {
@@ -1153,7 +1168,7 @@
 		rc = dsi_panel_trigger_esd_attack(display->panel);
 		if (rc) {
 			pr_err("Failed to trigger ESD attack\n");
-			return rc;
+			goto error;
 		}
 	}
 
@@ -1577,9 +1592,19 @@
 	m_ctrl = &display->ctrl[display->cmd_master_idx];
 	ulps_enabled = display->ulps_enabled;
 
+	/*
+	 * Clamp control can be either through the DSI controller or
+	 * the DSI PHY depending on hardware variation
+	 */
 	rc = dsi_ctrl_set_clamp_state(m_ctrl->ctrl, enable, ulps_enabled);
 	if (rc) {
-		pr_err("DSI Clamp state change(%d) failed\n", enable);
+		pr_err("DSI ctrl clamp state change(%d) failed\n", enable);
+		return rc;
+	}
+
+	rc = dsi_phy_set_clamp_state(m_ctrl->phy, enable);
+	if (rc) {
+		pr_err("DSI phy clamp state change(%d) failed\n", enable);
 		return rc;
 	}
 
@@ -1593,7 +1618,18 @@
 			pr_err("DSI Clamp state change(%d) failed\n", enable);
 			return rc;
 		}
+
+		rc = dsi_phy_set_clamp_state(ctrl->phy, enable);
+		if (rc) {
+			pr_err("DSI phy clamp state change(%d) failed\n",
+				enable);
+			return rc;
+		}
+
+		pr_debug("Clamps %s for ctrl%d\n",
+			enable ? "enabled" : "disabled", i);
 	}
+
 	display->clamp_enabled = enable;
 	return 0;
 }
@@ -1986,11 +2022,9 @@
 			boot_displays[i].name[j] = *(disp_buf + j);
 		boot_displays[i].name[j] = '\0';
 
-		if (i == DSI_PRIMARY) {
+		if (i == DSI_PRIMARY)
 			boot_displays[i].is_primary = true;
-			/* Currently, secondary DSI display is not supported */
-			boot_displays[i].boot_disp_en = true;
-		}
+		boot_displays[i].boot_disp_en = true;
 	}
 	return 0;
 }
@@ -2013,6 +2047,8 @@
 
 	for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
 		node = boot_displays[i].node;
+		if (!node)
+			continue;
 		ctrl_count = of_count_phandle_with_args(node, "qcom,dsi-ctrl",
 								NULL);
 
@@ -2054,11 +2090,12 @@
 
 	pr_err("index = %d\n", index);
 
-	if (boot_displays[index].node)
-		return boot_displays[index].node;
-	else if ((index == (MAX_DSI_ACTIVE_DISPLAY - 1))
-			&& (default_active_node))
-		return default_active_node;
+	if ((index == DSI_PRIMARY)
+			&& (primary_active_node))
+		return primary_active_node;
+	else if ((index == DSI_SECONDARY)
+			&& (secondary_active_node))
+		return secondary_active_node;
 	else
 		return NULL;
 }
@@ -2998,12 +3035,14 @@
 
 int dsi_pre_clkoff_cb(void *priv,
 			   enum dsi_clk_type clk,
+			   enum dsi_lclk_type l_type,
 			   enum dsi_clk_state new_state)
 {
 	int rc = 0;
 	struct dsi_display *display = priv;
 
-	if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF)) {
+	if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) &&
+		(l_type && DSI_LINK_LP_CLK)) {
 		/*
 		 * If ULPS feature is enabled, enter ULPS first.
 		 * However, when blanking the panel, we should enter ULPS
@@ -3055,13 +3094,14 @@
 
 int dsi_post_clkon_cb(void *priv,
 			   enum dsi_clk_type clk,
+			   enum dsi_lclk_type l_type,
 			   enum dsi_clk_state curr_state)
 {
 	int rc = 0;
 	struct dsi_display *display = priv;
 	bool mmss_clamp = false;
 
-	if (clk & DSI_CORE_CLK) {
+	if ((clk & DSI_LINK_CLK) && (l_type & DSI_LINK_LP_CLK)) {
 		mmss_clamp = display->clamp_enabled;
 		/*
 		 * controller setup is needed if coming out of idle
@@ -3070,6 +3110,13 @@
 		if (mmss_clamp)
 			dsi_display_ctrl_setup(display);
 
+		/*
+		 * Phy setup is needed if coming out of idle
+		 * power collapse with clamps enabled.
+		 */
+		if (display->phy_idle_power_off || mmss_clamp)
+			dsi_display_phy_idle_on(display, mmss_clamp);
+
 		if (display->ulps_enabled && mmss_clamp) {
 			/*
 			 * ULPS Entry Request. This is needed if the lanes were
@@ -3107,18 +3154,9 @@
 				__func__, rc);
 			goto error;
 		}
-
-		/*
-		 * Phy setup is needed if coming out of idle
-		 * power collapse with clamps enabled.
-		 */
-		if (display->phy_idle_power_off || mmss_clamp)
-			dsi_display_phy_idle_on(display, mmss_clamp);
-
-		/* enable dsi to serve irqs */
-		dsi_display_ctrl_irq_update(display, true);
 	}
-	if (clk & DSI_LINK_CLK) {
+
+	if ((clk & DSI_LINK_CLK) && (l_type & DSI_LINK_HS_CLK)) {
 		/*
 		 * Toggle the resync FIFO everytime clock changes, except
 		 * when cont-splash screen transition is going on.
@@ -3137,12 +3175,18 @@
 			}
 		}
 	}
+
+	/* enable dsi to serve irqs */
+	if (clk & DSI_CORE_CLK)
+		dsi_display_ctrl_irq_update(display, true);
+
 error:
 	return rc;
 }
 
 int dsi_post_clkoff_cb(void *priv,
 			    enum dsi_clk_type clk_type,
+			    enum dsi_lclk_type l_type,
 			    enum dsi_clk_state curr_state)
 {
 	int rc = 0;
@@ -3170,6 +3214,7 @@
 
 int dsi_pre_clkon_cb(void *priv,
 			  enum dsi_clk_type clk_type,
+			  enum dsi_lclk_type l_type,
 			  enum dsi_clk_state new_state)
 {
 	int rc = 0;
@@ -3638,6 +3683,7 @@
 	/* For split DSI, update the clock master first */
 
 	pr_debug("configuring seamless dynamic fps\n\n");
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 
 	m_ctrl = &display->ctrl[display->clk_master_idx];
 	rc = dsi_ctrl_async_timing_update(m_ctrl->ctrl, timing);
@@ -3672,6 +3718,7 @@
 	panel_mode->dsi_mode_flags = 0;
 
 error:
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 	return rc;
 }
 
@@ -4386,10 +4433,16 @@
 			goto error_ctrl_deinit;
 		}
 
-		memcpy(&info.c_clks[i], &display_ctrl->ctrl->clk_info.core_clks,
-			sizeof(struct dsi_core_clk_info));
-		memcpy(&info.l_clks[i], &display_ctrl->ctrl->clk_info.link_clks,
-			sizeof(struct dsi_link_clk_info));
+		memcpy(&info.c_clks[i],
+				(&display_ctrl->ctrl->clk_info.core_clks),
+				sizeof(struct dsi_core_clk_info));
+		memcpy(&info.l_hs_clks[i],
+				(&display_ctrl->ctrl->clk_info.hs_link_clks),
+				sizeof(struct dsi_link_hs_clk_info));
+		memcpy(&info.l_lp_clks[i],
+				(&display_ctrl->ctrl->clk_info.lp_link_clks),
+				sizeof(struct dsi_link_lp_clk_info));
+
 		info.c_clks[i].phandle = &priv->phandle;
 		info.bus_handle[i] =
 			display_ctrl->ctrl->axi_bus_info.bus_handle;
@@ -4592,6 +4645,7 @@
 	.driver = {
 		.name = "msm-dsi-display",
 		.of_match_table = dsi_display_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
@@ -4599,8 +4653,7 @@
 {
 	int rc = 0;
 	struct dsi_display *display;
-	static bool display_from_cmdline, boot_displays_parsed;
-	static bool comp_add_success;
+	static bool boot_displays_parsed;
 	static struct device_node *primary_np, *secondary_np;
 
 	if (!pdev || !pdev->dev.of_node) {
@@ -4629,55 +4682,53 @@
 	display->cmdline_topology = NO_OVERRIDE;
 	display->cmdline_timing = 0;
 
-	if ((!display_from_cmdline) &&
-			(boot_displays[DSI_PRIMARY].boot_disp_en)) {
-		display->is_active = dsi_display_name_compare(pdev->dev.of_node,
-						display->name, DSI_PRIMARY);
-		if (display->is_active) {
-			if (comp_add_success) {
-				(void)_dsi_display_dev_deinit(main_display);
-				component_del(&main_display->pdev->dev,
-					      &dsi_display_comp_ops);
-				mutex_lock(&dsi_display_list_lock);
-				list_del(&main_display->list);
-				mutex_unlock(&dsi_display_list_lock);
-				comp_add_success = false;
-				default_active_node = NULL;
-				pr_debug("removed the existing comp ops\n");
-			}
-			/*
-			 * Need to add component for
-			 * the secondary DSI display
-			 * when more than one DSI display
-			 * is supported.
-			 */
-			pr_debug("cmdline primary dsi: %s\n",
-						display->name);
-			display_from_cmdline = true;
-			dsi_display_parse_cmdline_topology(display,
-					DSI_PRIMARY);
-			primary_np = pdev->dev.of_node;
+	if (boot_displays[DSI_PRIMARY].boot_disp_en && !primary_np &&
+		dsi_display_name_compare(pdev->dev.of_node,
+			display->name, DSI_PRIMARY)) {
+		if (primary_display) {
+			(void)_dsi_display_dev_deinit(primary_display);
+			component_del(&primary_display->pdev->dev,
+					&dsi_display_comp_ops);
+			mutex_lock(&dsi_display_list_lock);
+			list_del(&primary_display->list);
+			mutex_unlock(&dsi_display_list_lock);
+			primary_active_node = NULL;
+			pr_debug("removed the existing comp ops\n");
 		}
+		/*
+		 * Need to add component for
+		 * the secondary DSI display
+		 * when more than one DSI display
+		 * is supported.
+		 */
+		pr_debug("cmdline primary dsi: %s\n", display->name);
+		display->is_active = true;
+		dsi_display_parse_cmdline_topology(display, DSI_PRIMARY);
+		primary_np = pdev->dev.of_node;
 	}
 
-	if (boot_displays[DSI_SECONDARY].boot_disp_en) {
-		if (!secondary_np) {
-			if (dsi_display_name_compare(pdev->dev.of_node,
-				display->name, DSI_SECONDARY)) {
-				pr_debug("cmdline secondary dsi: %s\n",
-							display->name);
-				secondary_np = pdev->dev.of_node;
-				if (primary_np) {
-					if (validate_dsi_display_selection()) {
-					display->is_active = true;
-					dsi_display_parse_cmdline_topology
-						(display, DSI_SECONDARY);
-					} else {
-						boot_displays[DSI_SECONDARY]
-							.boot_disp_en = false;
-					}
-				}
+	if (boot_displays[DSI_SECONDARY].boot_disp_en && !secondary_np &&
+		dsi_display_name_compare(pdev->dev.of_node,
+			display->name, DSI_SECONDARY)) {
+		pr_debug("cmdline secondary dsi: %s\n", display->name);
+		if (validate_dsi_display_selection()) {
+			if (secondary_display) {
+				(void)_dsi_display_dev_deinit(
+						secondary_display);
+				component_del(&secondary_display->pdev->dev,
+						&dsi_display_comp_ops);
+				mutex_lock(&dsi_display_list_lock);
+				list_del(&secondary_display->list);
+				mutex_unlock(&dsi_display_list_lock);
+				secondary_active_node = NULL;
+				pr_debug("removed the existing comp ops\n");
 			}
+			display->is_active = true;
+			dsi_display_parse_cmdline_topology(display,
+					DSI_SECONDARY);
+			secondary_np = pdev->dev.of_node;
+		} else {
+			boot_displays[DSI_SECONDARY].boot_disp_en = false;
 		}
 	}
 	display->display_type = of_get_property(pdev->dev.of_node,
@@ -4692,12 +4743,18 @@
 	list_add(&display->list, &dsi_display_list);
 	mutex_unlock(&dsi_display_list_lock);
 
-	if (!display_from_cmdline)
+	if (!strcmp(display->display_type, "primary") && !primary_np)
+		display->is_active = of_property_read_bool(pdev->dev.of_node,
+						"qcom,dsi-display-active");
+	else if (strcmp(display->display_type, "primary") && !secondary_np)
 		display->is_active = of_property_read_bool(pdev->dev.of_node,
 						"qcom,dsi-display-active");
 
 	if (display->is_active) {
-		main_display = display;
+		if (!strcmp(display->display_type, "primary"))
+			primary_display = display;
+		else
+			secondary_display = display;
 		rc = _dsi_display_dev_init(display);
 		if (rc) {
 			pr_err("device init failed, rc=%d\n", rc);
@@ -4708,10 +4765,11 @@
 		if (rc)
 			pr_err("component add failed, rc=%d\n", rc);
 
-		comp_add_success = true;
 		pr_debug("Component_add success: %s\n", display->name);
-		if (!display_from_cmdline)
-			default_active_node = pdev->dev.of_node;
+		if (!strcmp(display->display_type, "primary"))
+			primary_active_node = pdev->dev.of_node;
+		else
+			secondary_active_node = pdev->dev.of_node;
 	}
 	return rc;
 }
@@ -4950,7 +5008,10 @@
 		info->h_tile_instance[i] = display->ctrl[i].ctrl->cell_index;
 
 	info->is_connected = true;
-	info->is_primary = true;
+	info->is_primary = false;
+	if (!strcmp(display->display_type, "primary"))
+		info->is_primary = true;
+
 	info->width_mm = phy_props.panel_width_mm;
 	info->height_mm = phy_props.panel_height_mm;
 	info->max_width = 1920;
@@ -5787,6 +5848,7 @@
 		return -EINVAL;
 	}
 
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 	mutex_lock(&display->display_lock);
 
 	mode = display->panel->cur_mode;
@@ -5921,6 +5983,7 @@
 	(void)dsi_panel_post_unprepare(display->panel);
 error:
 	mutex_unlock(&display->display_lock);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 	return rc;
 }
 
@@ -6144,6 +6207,7 @@
 		pr_err("no valid mode set for the display");
 		return -EINVAL;
 	}
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 
 	/* Engine states and panel states are populated during splash
 	 * resource init and hence we return early
@@ -6229,6 +6293,7 @@
 	(void)dsi_panel_disable(display->panel);
 error:
 	mutex_unlock(&display->display_lock);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 	return rc;
 }
 
@@ -6291,6 +6356,7 @@
 		return -EINVAL;
 	}
 
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 	mutex_lock(&display->display_lock);
 
 	rc = dsi_display_wake_up(display);
@@ -6319,6 +6385,7 @@
 		       display->name, rc);
 
 	mutex_unlock(&display->display_lock);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 	return rc;
 }
 
@@ -6348,6 +6415,7 @@
 		return -EINVAL;
 	}
 
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 	mutex_lock(&display->display_lock);
 
 	rc = dsi_display_wake_up(display);
@@ -6401,6 +6469,7 @@
 		       display->name, rc);
 
 	mutex_unlock(&display->display_lock);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 6b1c029..5612016 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -497,12 +497,14 @@
  * dsi_pre_clkoff_cb() - Callback before clock is turned off
  * @priv: private data pointer.
  * @clk_type: clock which is being turned on.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
  * @new_state: next state for the clock.
  *
  * @return: error code.
  */
 int dsi_pre_clkoff_cb(void *priv, enum dsi_clk_type clk_type,
-	enum dsi_clk_state new_state);
+		enum dsi_lclk_type l_type,
+		enum dsi_clk_state new_state);
 
 /**
  * dsi_display_update_pps() - update PPS buffer.
@@ -519,35 +521,40 @@
  * dsi_post_clkoff_cb() - Callback after clock is turned off
  * @priv: private data pointer.
  * @clk_type: clock which is being turned on.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
  * @curr_state: current state for the clock.
  *
  * @return: error code.
  */
 int dsi_post_clkoff_cb(void *priv, enum dsi_clk_type clk_type,
-	enum dsi_clk_state curr_state);
+		enum dsi_lclk_type l_type,
+		enum dsi_clk_state curr_state);
 
 /**
  * dsi_post_clkon_cb() - Callback after clock is turned on
  * @priv: private data pointer.
  * @clk_type: clock which is being turned on.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
  * @curr_state: current state for the clock.
  *
  * @return: error code.
  */
 int dsi_post_clkon_cb(void *priv, enum dsi_clk_type clk_type,
-	enum dsi_clk_state curr_state);
-
+		enum dsi_lclk_type l_type,
+		enum dsi_clk_state curr_state);
 
 /**
  * dsi_pre_clkon_cb() - Callback before clock is turned on
  * @priv: private data pointer.
  * @clk_type: clock which is being turned on.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
  * @new_state: next state for the clock.
  *
  * @return: error code.
  */
 int dsi_pre_clkon_cb(void *priv, enum dsi_clk_type clk_type,
-	enum dsi_clk_state new_state);
+		enum dsi_lclk_type l_type,
+		enum dsi_clk_state new_state);
 
 /**
  * dsi_display_unprepare() - power off display hardware.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 79df631..dab85f4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1536,7 +1536,7 @@
 		cmd[i].msg.channel = data[2];
 		cmd[i].msg.flags |= (data[3] == 1 ? MIPI_DSI_MSG_REQ_ACK : 0);
 		cmd[i].msg.ctrl = 0;
-		cmd[i].post_wait_ms = data[4];
+		cmd[i].post_wait_ms = cmd[i].msg.wait_ms = data[4];
 		cmd[i].msg.tx_len = ((data[5] << 8) | (data[6]));
 
 		size = cmd[i].msg.tx_len * sizeof(u8);
@@ -1771,6 +1771,9 @@
 
 	panel->sync_broadcast_en = of_property_read_bool(of_node,
 			"qcom,cmd-sync-wait-broadcast");
+
+	panel->lp11_init = of_property_read_bool(of_node,
+			"qcom,mdss-dsi-lp11-init");
 	return 0;
 }
 
@@ -2773,6 +2776,9 @@
 				esd_config->groups * status_len);
 	}
 
+	esd_config->cmd_channel = of_property_read_bool(of_node,
+		"qcom,mdss-dsi-panel-cmds-only-by-right");
+
 	return 0;
 
 error4:
@@ -3511,6 +3517,7 @@
 	set->cmds[0].msg.tx_buf = caset;
 	set->cmds[0].msg.rx_len = 0;
 	set->cmds[0].msg.rx_buf = 0;
+	set->cmds[0].msg.wait_ms = 0;
 	set->cmds[0].last_command = 0;
 	set->cmds[0].post_wait_ms = 0;
 
@@ -3522,6 +3529,7 @@
 	set->cmds[1].msg.tx_buf = paset;
 	set->cmds[1].msg.rx_len = 0;
 	set->cmds[1].msg.rx_buf = 0;
+	set->cmds[1].msg.wait_ms = 0;
 	set->cmds[1].last_command = 1;
 	set->cmds[1].post_wait_ms = 0;
 
@@ -3746,14 +3754,6 @@
 		goto error;
 	}
 
-	if (panel->lp11_init) {
-		rc = dsi_panel_power_off(panel);
-		if (rc) {
-			pr_err("[%s] panel power_Off failed, rc=%d\n",
-			       panel->name, rc);
-			goto error;
-		}
-	}
 error:
 	mutex_unlock(&panel->panel_lock);
 	return rc;
@@ -3773,13 +3773,11 @@
 
 	mutex_lock(&panel->panel_lock);
 
-	if (!panel->lp11_init) {
-		rc = dsi_panel_power_off(panel);
-		if (rc) {
-			pr_err("[%s] panel power_Off failed, rc=%d\n",
-			       panel->name, rc);
-			goto error;
-		}
+	rc = dsi_panel_power_off(panel);
+	if (rc) {
+		pr_err("[%s] panel power_Off failed, rc=%d\n",
+		       panel->name, rc);
+		goto error;
 	}
 error:
 	mutex_unlock(&panel->panel_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index f8b65ab..c0ecb7f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -129,6 +129,7 @@
 
 struct drm_panel_esd_config {
 	bool esd_enabled;
+	bool cmd_channel;
 
 	enum esd_check_status_mode status_mode;
 	struct dsi_panel_cmd_set status_cmd;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 6a7a84c..3d6711f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -107,7 +107,8 @@
 
 	phy->hw.base = ptr;
 
-	pr_debug("[%s] map dsi_phy registers to %p\n", phy->name, phy->hw.base);
+	pr_debug("[%s] map dsi_phy registers to %pK\n",
+		phy->name, phy->hw.base);
 
 	return rc;
 }
@@ -564,6 +565,8 @@
 	snprintf(dbg_name, DSI_DEBUG_NAME_LEN, "dsi%d_phy", dsi_phy->index);
 	sde_dbg_reg_register_base(dbg_name, dsi_phy->hw.base,
 				msm_iomap_size(dsi_phy->pdev, "dsi_phy"));
+	sde_dbg_reg_register_dump_range(dbg_name, dbg_name, 0,
+				msm_iomap_size(dsi_phy->pdev, "dsi_phy"), 0);
 	return 0;
 }
 
@@ -902,6 +905,26 @@
 }
 
 /**
+ * dsi_phy_set_clamp_state() - configure clamps for DSI lanes
+ * @phy:        DSI PHY handle.
+ * @enable:     boolean to specify clamp enable/disable.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_clamp_state(struct msm_dsi_phy *phy, bool enable)
+{
+	if (!phy)
+		return -EINVAL;
+
+	pr_debug("[%s] enable=%d\n", phy->name, enable);
+
+	if (phy->hw.ops.clamp_ctrl)
+		phy->hw.ops.clamp_ctrl(&phy->hw, enable);
+
+	return 0;
+}
+
+/**
  * dsi_phy_idle_ctrl() - enable/disable DSI PHY during idle screen
  * @phy:          DSI PHY handle
  * @enable:       boolean to specify PHY enable/disable.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
index 56d5ee3..4163411 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -218,6 +218,15 @@
 int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable);
 
 /**
+ * dsi_phy_set_clamp_state() - configure clamps for DSI lanes
+ * @phy:        DSI PHY handle.
+ * @enable:     boolean to specify clamp enable/disable.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_clamp_state(struct msm_dsi_phy *phy, bool enable);
+
+/**
  * dsi_phy_set_clk_freq() - set DSI PHY clock frequency setting
  * @phy:          DSI PHY handle
  * @clk_freq:     link clock frequency
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
index e31899d4..d24a613 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -234,6 +234,14 @@
 				u32 *timing, u32 size);
 
 	/**
+	 * clamp_ctrl() - configure clamps for DSI lanes
+	 * @phy:        DSI PHY handle.
+	 * @enable:     boolean to specify clamp enable/disable.
+	 * Return:    error code.
+	 */
+	void (*clamp_ctrl)(struct dsi_phy_hw *phy, bool enable);
+
+	/**
 	 * phy_lane_reset() - Reset dsi phy lanes in case of error.
 	 * @phy:      Pointer to DSI PHY hardware object.
 	 * Return:    error code.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
index b078231..5015806 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
@@ -196,10 +196,31 @@
 		DSI_W32(phy, DSIPHY_LNX_OFFSET_BOT_CTRL(i), 0x0);
 		DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
 	}
+}
 
-	/* Toggle BIT 0 to release freeze I/0 */
-	DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), 0x05);
-	DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), 0x04);
+void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable)
+{
+	u32 reg;
+
+	pr_debug("enable=%s\n", enable ? "true" : "false");
+
+	/*
+	 * DSI PHY lane clamps, also referred to as PHY FreezeIO is
+	 * enalbed by default as part of the initialization sequnce.
+	 * This would get triggered anytime the chip FreezeIO is asserted.
+	 */
+	if (enable)
+		return;
+
+	/*
+	 * Toggle BIT 0 to exlplictly release PHY freeze I/0 to disable
+	 * the clamps.
+	 */
+	reg = DSI_R32(phy, DSIPHY_LNX_TX_DCTRL(3));
+	DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), reg | BIT(0));
+	wmb(); /* Ensure that the freezeio bit is toggled */
+	DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), reg & ~BIT(0));
+	wmb(); /* Ensure that the freezeio bit is toggled */
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index ec572f8..e457322 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -169,6 +169,7 @@
 	.driver = {
 		.name = "msm_dsi",
 		.of_match_table = dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0940e84..3a0f180 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015,2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -54,7 +54,7 @@
 		ret = -ENOMEM;
 		goto fail;
 	}
-	DBG("eDP probed=%p", edp);
+	DBG("eDP probed=%pK", edp);
 
 	edp->pdev = pdev;
 	platform_set_drvdata(pdev, edp);
@@ -128,6 +128,7 @@
 	.driver = {
 		.name = "msm_edp",
 		.of_match_table = dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 6279084..b8f5469 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016, 2018 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -646,6 +646,7 @@
 	.driver = {
 		.name = "hdmi_msm",
 		.of_match_table = msm_hdmi_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index f05d760..46b60b1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2018 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -827,6 +827,7 @@
 	.driver = {
 		.name = "msm_mdp",
 		.of_match_table = mdp5_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 95bdc36..e445098 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2014 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -25,6 +25,8 @@
 #include "msm_fence.h"
 #include "sde_trace.h"
 
+#define MULTIPLE_CONN_DETECTED(x) (x > 1)
+
 struct msm_commit {
 	struct drm_device *dev;
 	struct drm_atomic_state *state;
@@ -111,6 +113,66 @@
 		kfree(c);
 }
 
+static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
+			struct drm_crtc_state *crtc_state, bool enable)
+{
+	struct drm_connector *connector = NULL;
+	struct drm_connector_state  *conn_state = NULL;
+	int i = 0;
+	int conn_cnt = 0;
+
+	if (msm_is_mode_seamless(&crtc_state->mode) ||
+		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode))
+		return true;
+
+	if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
+		return true;
+
+	if (!crtc_state->mode_changed && crtc_state->connectors_changed) {
+		for_each_connector_in_state(state, connector, conn_state, i) {
+			if ((conn_state->crtc == crtc_state->crtc) ||
+					(connector->state->crtc ==
+					 crtc_state->crtc))
+				conn_cnt++;
+
+			if (MULTIPLE_CONN_DETECTED(conn_cnt))
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
+		struct drm_connector_state *old_conn_state, bool enable)
+{
+	if (!old_conn_state || !old_conn_state->crtc)
+		return false;
+
+	if (!old_conn_state->crtc->state->mode_changed &&
+			!old_conn_state->crtc->state->active_changed &&
+			old_conn_state->crtc->state->connectors_changed) {
+		if (old_conn_state->crtc == connector->state->crtc)
+			return true;
+	}
+
+	if (enable)
+		return false;
+
+	if (msm_is_mode_seamless(&connector->encoder->crtc->state->mode))
+		return true;
+
+	if (msm_is_mode_seamless_vrr(
+			&connector->encoder->crtc->state->adjusted_mode))
+		return true;
+
+	if (msm_is_mode_seamless_dms(
+			&connector->encoder->crtc->state->adjusted_mode))
+		return true;
+
+	return false;
+}
+
 static void msm_atomic_wait_for_commit_done(
 		struct drm_device *dev,
 		struct drm_atomic_state *old_state)
@@ -174,14 +236,7 @@
 		if (WARN_ON(!encoder))
 			continue;
 
-		if (msm_is_mode_seamless(
-			&connector->encoder->crtc->state->mode) ||
-			msm_is_mode_seamless_vrr(
-			&connector->encoder->crtc->state->adjusted_mode))
-			continue;
-
-		if (msm_is_mode_seamless_dms(
-			&connector->encoder->crtc->state->adjusted_mode))
+		if (_msm_seamless_for_conn(connector, old_conn_state, false))
 			continue;
 
 		funcs = encoder->helper_private;
@@ -223,11 +278,7 @@
 		if (!old_crtc_state->active)
 			continue;
 
-		if (msm_is_mode_seamless(&crtc->state->mode) ||
-			msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
-			continue;
-
-		if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode))
+		if (_msm_seamless_for_crtc(old_state, crtc->state, false))
 			continue;
 
 		funcs = crtc->helper_private;
@@ -286,8 +337,14 @@
 		mode = &new_crtc_state->mode;
 		adjusted_mode = &new_crtc_state->adjusted_mode;
 
-		if (!new_crtc_state->mode_changed)
+		if (!new_crtc_state->mode_changed &&
+				new_crtc_state->connectors_changed) {
+			if (_msm_seamless_for_conn(connector,
+					old_conn_state, false))
+				continue;
+		} else if (!new_crtc_state->mode_changed) {
 			continue;
+		}
 
 		DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
 				 encoder->base.id, encoder->name);
@@ -365,8 +422,7 @@
 		if (!crtc->state->active)
 			continue;
 
-		if (msm_is_mode_seamless(&crtc->state->mode) ||
-			msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
+		if (_msm_seamless_for_crtc(old_state, crtc->state, true))
 			continue;
 
 		funcs = crtc->helper_private;
@@ -397,18 +453,24 @@
 				    connector->state->crtc->state))
 			continue;
 
+		if (_msm_seamless_for_conn(connector, old_conn_state, true))
+			continue;
+
 		encoder = connector->state->best_encoder;
 		funcs = encoder->helper_private;
 
 		DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
 				 encoder->base.id, encoder->name);
 
-		blank = MSM_DRM_BLANK_UNBLANK;
-		notifier_data.data = &blank;
-		notifier_data.id =
-			connector->state->crtc->index;
-		msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
+		if (connector->state->crtc->state->active_changed) {
+			blank = MSM_DRM_BLANK_UNBLANK;
+			notifier_data.data = &blank;
+			notifier_data.id =
+				connector->state->crtc->index;
+			DRM_DEBUG_ATOMIC("Notify early unblank\n");
+			msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
 					    &notifier_data);
+		}
 		/*
 		 * Each encoder has at most one connector (since we always steal
 		 * it away), so we won't call enable hooks twice.
@@ -444,14 +506,20 @@
 				    connector->state->crtc->state))
 			continue;
 
+		if (_msm_seamless_for_conn(connector, old_conn_state, true))
+			continue;
+
 		encoder = connector->state->best_encoder;
 
 		DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
 				 encoder->base.id, encoder->name);
 
 		drm_bridge_enable(encoder->bridge);
-		msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
+		if (connector->state->crtc->state->active_changed) {
+			DRM_DEBUG_ATOMIC("Notify unblank\n");
+			msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
 					    &notifier_data);
+		}
 	}
 	SDE_ATRACE_END("msm_enable");
 }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0697db8..0f565d3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -156,7 +156,8 @@
 	}
 
 	if (reglog)
-		printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
+		dev_dbg(&pdev->dev, "IO:region %s %pK %08lx\n",
+			dbgname, ptr, size);
 
 	return ptr;
 }
@@ -187,7 +188,7 @@
 void msm_writel(u32 data, void __iomem *addr)
 {
 	if (reglog)
-		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
+		pr_debug("IO:W %pK %08x\n", addr, data);
 	writel(data, addr);
 }
 
@@ -196,7 +197,7 @@
 	u32 val = readl(addr);
 
 	if (reglog)
-		printk(KERN_ERR "IO:R %p %08x\n", addr, val);
+		pr_err("IO:R %pK %08x\n", addr, val);
 	return val;
 }
 
@@ -1024,7 +1025,7 @@
 
 	if (!kms)
 		return -ENXIO;
-	DBG("dev=%p, crtc=%u", dev, pipe);
+	DBG("dev=%pK, crtc=%u", dev, pipe);
 	return vblank_ctrl_queue_work(priv, pipe, true);
 }
 
@@ -1035,7 +1036,7 @@
 
 	if (!kms)
 		return;
-	DBG("dev=%p, crtc=%u", dev, pipe);
+	DBG("dev=%pK, crtc=%u", dev, pipe);
 	vblank_ctrl_queue_work(priv, pipe, false);
 }
 
@@ -1993,6 +1994,7 @@
 		.name   = "msm_drm",
 		.of_match_table = dt_match,
 		.pm     = &msm_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index ce4197b..fcdddb3 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -155,6 +155,7 @@
 	CRTC_PROP_SECURITY_LEVEL,
 	CRTC_PROP_IDLE_TIMEOUT,
 	CRTC_PROP_DEST_SCALER,
+	CRTC_PROP_CAPTURE_OUTPUT,
 
 	CRTC_PROP_ENABLE_SUI_ENHANCEMENT,
 
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index e8bf244..a1c9d82 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -68,7 +68,7 @@
 	msm_fb = to_msm_framebuffer(fb);
 	n = drm_format_num_planes(fb->pixel_format);
 
-	DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+	DBG("destroy: FB ID: %d (%pK)", fb->base.id, fb);
 
 	drm_framebuffer_cleanup(fb);
 
@@ -336,7 +336,7 @@
 	unsigned int hsub, vsub;
 	bool is_modified = false;
 
-	DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+	DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)",
 			dev, mode_cmd, mode_cmd->width, mode_cmd->height,
 			(char *)&mode_cmd->pixel_format);
 
@@ -420,7 +420,7 @@
 		goto fail;
 	}
 
-	DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+	DBG("create: FB ID: %d (%pK)", fb->base.id, fb);
 
 	return fb;
 
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ffd4a33..5b886d0 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -142,7 +142,7 @@
 		goto fail_unlock;
 	}
 
-	DBG("fbi=%p, dev=%p", fbi, dev);
+	DBG("fbi=%pK, dev=%pK", fbi, dev);
 
 	fbdev->fb = fb;
 	helper->fb = fb;
@@ -167,7 +167,7 @@
 	fbi->fix.smem_start = paddr;
 	fbi->fix.smem_len = fbdev->bo->size;
 
-	DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+	DBG("par=%pK, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
 	DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
 
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 277b421..ddd4607 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -253,7 +253,7 @@
 
 	pfn = page_to_pfn(pages[pgoff]);
 
-	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+	VERB("Inserting %pK pfn %lx, pa %lx", vmf->virtual_address,
 			pfn, pfn << PAGE_SHIFT);
 
 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
@@ -428,7 +428,7 @@
 
 	if (!ret && domain) {
 		*iova = domain->iova;
-		if (aspace && aspace->domain_attached)
+		if (aspace && !msm_obj->in_active_list)
 			msm_gem_add_obj_to_aspace_active_list(aspace, obj);
 	} else {
 		obj_remove_domain(domain);
@@ -799,7 +799,7 @@
 		break;
 	}
 
-	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
+	seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK %zu%s\n",
 
 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 			obj->name, obj->refcount.refcount.counter,
@@ -968,6 +968,7 @@
 	INIT_LIST_HEAD(&msm_obj->domains);
 	INIT_LIST_HEAD(&msm_obj->iova_list);
 	msm_obj->aspace = NULL;
+	msm_obj->in_active_list = false;
 
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8521bea..ba01ffb 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -124,6 +124,7 @@
 	struct list_head iova_list;
 
 	struct msm_gem_address_space *aspace;
+	bool in_active_list;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index d02228a..e5b1cc37 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -71,6 +71,7 @@
 {
 	WARN_ON(!mutex_is_locked(&aspace->dev->struct_mutex));
 	list_move_tail(&msm_obj->iova_list, &aspace->active_list);
+	msm_obj->in_active_list = true;
 }
 
 static void smmu_aspace_remove_from_active(
@@ -84,6 +85,7 @@
 	list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
 			iova_list) {
 		if (msm_obj == obj) {
+			msm_obj->in_active_list = false;
 			list_del(&msm_obj->iova_list);
 			break;
 		}
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 85867b2..ccd5e20 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -636,6 +636,7 @@
 	.driver = {
 		.name = "msmdrm_smmu",
 		.of_match_table = msm_smmu_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 15b5465..ab1d86b 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1236,7 +1236,7 @@
 }
 
 void sde_connector_complete_commit(struct drm_connector *connector,
-		ktime_t ts)
+		ktime_t ts, enum sde_fence_event fence_event)
 {
 	if (!connector) {
 		SDE_ERROR("invalid connector\n");
@@ -1244,7 +1244,8 @@
 	}
 
 	/* signal connector's retire fence */
-	sde_fence_signal(&to_sde_connector(connector)->retire_fence, ts, false);
+	sde_fence_signal(&to_sde_connector(connector)->retire_fence,
+			ts, fence_event);
 }
 
 void sde_connector_commit_reset(struct drm_connector *connector, ktime_t ts)
@@ -1255,7 +1256,8 @@
 	}
 
 	/* signal connector's retire fence */
-	sde_fence_signal(&to_sde_connector(connector)->retire_fence, ts, true);
+	sde_fence_signal(&to_sde_connector(connector)->retire_fence,
+			ts, SDE_FENCE_RESET_TIMELINE);
 }
 
 static void sde_connector_update_hdr_props(struct drm_connector *connector)
@@ -1767,6 +1769,14 @@
 	if (!conn)
 		return;
 
+	/* Panel dead notification can come:
+	 * 1) ESD thread
+	 * 2) Commit thread (if TE stops coming)
+	 * So such case, avoid failure notification twice.
+	 */
+	if (conn->panel_dead)
+		return;
+
 	conn->panel_dead = true;
 	event.type = DRM_EVENT_PANEL_DEAD;
 	event.length = sizeof(bool);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 0c24454..51dc92d 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -585,8 +585,10 @@
  * sde_connector_complete_commit - signal completion of current commit
  * @connector: Pointer to drm connector object
  * @ts: timestamp to be updated in the fence signalling
+ * @fence_event: enum value to indicate nature of fence event
  */
-void sde_connector_complete_commit(struct drm_connector *connector, ktime_t ts);
+void sde_connector_complete_commit(struct drm_connector *connector,
+		ktime_t ts, enum sde_fence_event fence_event);
 
 /**
  * sde_connector_commit_reset - reset the completion signal
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 7df1cc9..34a3d5f 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -79,6 +79,12 @@
 
 #define MISR_BUFF_SIZE			256
 
+/*
+ * Time period for fps calculation in micro seconds.
+ * Default value is set to 1 sec.
+ */
+#define CRTC_TIME_PERIOD_CALC_FPS_US	1000000
+
 static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv;
@@ -125,6 +131,37 @@
 									enable);
 }
 
+/*
+ * sde_crtc_calc_fps() - Calculates fps value.
+ * @sde_crtc   : CRTC structure
+ *
+ * This function is called at frame done. It counts the number
+ * of frames done for every 1 sec. Stores the value in measured_fps.
+ * measured_fps value is 10 times the calculated fps value.
+ * For example, measured_fps= 594 for calculated fps of 59.4
+ */
+static void sde_crtc_calc_fps(struct sde_crtc *sde_crtc)
+{
+	ktime_t current_time_us;
+	u64 fps, diff_us;
+
+	current_time_us = ktime_get();
+	diff_us = (u64)ktime_us_delta(current_time_us,
+			sde_crtc->fps_info.last_sampled_time_us);
+	sde_crtc->fps_info.frame_count++;
+
+	if (diff_us >= CRTC_TIME_PERIOD_CALC_FPS_US) {
+		fps = ((u64)sde_crtc->fps_info.frame_count) * 10000000;
+		do_div(fps, diff_us);
+		sde_crtc->fps_info.measured_fps = (unsigned int)fps;
+		SDE_DEBUG(" FPS for crtc%d is %d.%d\n",
+				sde_crtc->base.base.id, (unsigned int)fps/10,
+				(unsigned int)fps%10);
+		sde_crtc->fps_info.last_sampled_time_us = current_time_us;
+		sde_crtc->fps_info.frame_count = 0;
+	}
+}
+
 /**
  * _sde_crtc_rp_to_crtc - get crtc from resource pool object
  * @rp: Pointer to resource pool
@@ -601,6 +638,50 @@
 		return;
 }
 
+static int _sde_debugfs_fps_status_show(struct seq_file *s, void *data)
+{
+	struct sde_crtc *sde_crtc;
+	unsigned int fps_int, fps_float;
+	ktime_t current_time_us;
+	u64 fps, diff_us;
+
+	if (!s || !s->private) {
+		SDE_ERROR("invalid input param(s)\n");
+		return -EAGAIN;
+	}
+
+	sde_crtc = s->private;
+
+	current_time_us = ktime_get();
+	diff_us = (u64)ktime_us_delta(current_time_us,
+			sde_crtc->fps_info.last_sampled_time_us);
+
+	if (diff_us >= CRTC_TIME_PERIOD_CALC_FPS_US) {
+		fps = ((u64)sde_crtc->fps_info.frame_count) * 10000000;
+		do_div(fps, diff_us);
+		sde_crtc->fps_info.measured_fps = (unsigned int)fps;
+		sde_crtc->fps_info.last_sampled_time_us = current_time_us;
+		sde_crtc->fps_info.frame_count = 0;
+		SDE_DEBUG("Measured FPS for crtc%d is %d.%d\n",
+				sde_crtc->base.base.id, (unsigned int)fps/10,
+				(unsigned int)fps%10);
+	}
+
+	fps_int = (unsigned int) sde_crtc->fps_info.measured_fps;
+	fps_float = do_div(fps_int, 10);
+
+	seq_printf(s, "fps: %d.%d\n", fps_int, fps_float);
+
+	return 0;
+}
+
+
+static int _sde_debugfs_fps_status(struct inode *inode, struct file *file)
+{
+	return single_open(file, _sde_debugfs_fps_status_show,
+			inode->i_private);
+}
+
 static ssize_t vsync_event_show(struct device *device,
 	struct device_attribute *attr, char *buf)
 {
@@ -936,7 +1017,9 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *crtc_state;
 	struct sde_rect *crtc_roi;
-	int i, num_attached_conns = 0;
+	struct msm_mode_info mode_info;
+	int i = 0;
+	int rc;
 	bool is_crtc_roi_dirty;
 	bool is_any_conn_roi_dirty;
 
@@ -958,13 +1041,14 @@
 		if (!conn_state || conn_state->crtc != crtc)
 			continue;
 
-		if (num_attached_conns) {
-			SDE_ERROR(
-				"crtc%d: unsupported: roi on crtc w/ >1 connectors\n",
-					DRMID(crtc));
+		rc = sde_connector_get_mode_info(conn_state, &mode_info);
+		if (rc) {
+			SDE_ERROR("failed to get mode info\n");
 			return -EINVAL;
 		}
-		++num_attached_conns;
+
+		if (!mode_info.roi_caps.enabled)
+			continue;
 
 		sde_conn = to_sde_connector(conn_state->connector);
 		sde_conn_state = to_sde_connector_state(conn_state);
@@ -1285,13 +1369,6 @@
 	sde_crtc = to_sde_crtc(crtc);
 	sde_crtc_state = to_sde_crtc_state(state);
 
-	if (hweight_long(state->connector_mask) != 1) {
-		SDE_ERROR("invalid connector count(%d) for crtc: %d\n",
-			(int)hweight_long(state->connector_mask),
-			crtc->base.id);
-		return -EINVAL;
-	}
-
 	/*
 	 * check connector array cached at modeset time since incoming atomic
 	 * state may not include any connectors if they aren't modified
@@ -1307,42 +1384,41 @@
 			SDE_ERROR("failed to get mode info\n");
 			return -EINVAL;
 		}
-		break;
-	}
 
-	if (!mode_info.roi_caps.enabled)
-		return 0;
+		if (!mode_info.roi_caps.enabled)
+			continue;
 
-	if (sde_crtc_state->user_roi_list.num_rects >
-					mode_info.roi_caps.num_roi) {
-		SDE_ERROR("roi count is more than supported limit, %d > %d\n",
-				sde_crtc_state->user_roi_list.num_rects,
-				mode_info.roi_caps.num_roi);
-		return -E2BIG;
-	}
+		if (sde_crtc_state->user_roi_list.num_rects >
+				mode_info.roi_caps.num_roi) {
+			SDE_ERROR("roi count is exceeding limit, %d > %d\n",
+					sde_crtc_state->user_roi_list.num_rects,
+					mode_info.roi_caps.num_roi);
+			return -E2BIG;
+		}
 
-	rc = _sde_crtc_set_crtc_roi(crtc, state);
-	if (rc)
-		return rc;
+		rc = _sde_crtc_set_crtc_roi(crtc, state);
+		if (rc)
+			return rc;
 
-	rc = _sde_crtc_check_autorefresh(crtc, state);
-	if (rc)
-		return rc;
+		rc = _sde_crtc_check_autorefresh(crtc, state);
+		if (rc)
+			return rc;
 
-	for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
-		rc = _sde_crtc_set_lm_roi(crtc, state, lm_idx);
+		for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
+			rc = _sde_crtc_set_lm_roi(crtc, state, lm_idx);
+			if (rc)
+				return rc;
+		}
+
+		rc = _sde_crtc_check_rois_centered_and_symmetric(crtc, state);
+		if (rc)
+			return rc;
+
+		rc = _sde_crtc_check_planes_within_crtc_roi(crtc, state);
 		if (rc)
 			return rc;
 	}
 
-	rc = _sde_crtc_check_rois_centered_and_symmetric(crtc, state);
-	if (rc)
-		return rc;
-
-	rc = _sde_crtc_check_planes_within_crtc_roi(crtc, state);
-	if (rc)
-		return rc;
-
 	return 0;
 }
 
@@ -2116,15 +2192,62 @@
 	}
 }
 
+static void sde_crtc_frame_event_cb(void *data, u32 event)
+{
+	struct drm_crtc *crtc = (struct drm_crtc *)data;
+	struct sde_crtc *sde_crtc;
+	struct msm_drm_private *priv;
+	struct sde_crtc_frame_event *fevent;
+	struct sde_crtc_frame_event_cb_data *cb_data;
+	unsigned long flags;
+	u32 crtc_id;
+
+	cb_data = (struct sde_crtc_frame_event_cb_data *)data;
+	if (!data) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	crtc = cb_data->crtc;
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+	priv = crtc->dev->dev_private;
+	crtc_id = drm_crtc_index(crtc);
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
+	SDE_EVT32_VERBOSE(DRMID(crtc), event);
+
+	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+	fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
+			struct sde_crtc_frame_event, list);
+	if (fevent)
+		list_del_init(&fevent->list);
+	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
+	if (!fevent) {
+		SDE_ERROR("crtc%d event %d overflow\n",
+				crtc->base.id, event);
+		SDE_EVT32(DRMID(crtc), event);
+		return;
+	}
+
+	fevent->event = event;
+	fevent->crtc = crtc;
+	fevent->connector = cb_data->connector;
+	fevent->ts = ktime_get();
+	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+}
+
 void sde_crtc_prepare_commit(struct drm_crtc *crtc,
 		struct drm_crtc_state *old_state)
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct drm_connector *conn;
-	struct sde_crtc_retire_event *retire_event = NULL;
-	unsigned long flags;
-	int i;
+	struct drm_encoder *encoder;
 
 	if (!crtc || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
@@ -2141,31 +2264,17 @@
 	drm_for_each_connector(conn, crtc->dev)
 		if (conn->state && conn->state->crtc == crtc &&
 				cstate->num_connectors < MAX_CONNECTORS) {
+			encoder = conn->state->best_encoder;
+			if (encoder)
+				sde_encoder_register_frame_event_callback(
+						encoder,
+						sde_crtc_frame_event_cb,
+						crtc);
+
 			cstate->connectors[cstate->num_connectors++] = conn;
 			sde_connector_prepare_fence(conn);
 		}
 
-	for (i = 0; i < SDE_CRTC_FRAME_EVENT_SIZE; i++) {
-		retire_event = &sde_crtc->retire_events[i];
-		if (list_empty(&retire_event->list))
-			break;
-		retire_event = NULL;
-	}
-
-	if (retire_event) {
-		retire_event->num_connectors = cstate->num_connectors;
-		for (i = 0; i < cstate->num_connectors; i++)
-			retire_event->connectors[i] = cstate->connectors[i];
-
-		spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-		list_add_tail(&retire_event->list,
-						&sde_crtc->retire_event_list);
-		spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-	} else {
-		SDE_ERROR("crtc%d retire event overflow\n", crtc->base.id);
-		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_ERROR);
-	}
-
 	/* prepare main output fence */
 	sde_fence_prepare(&sde_crtc->output_fence);
 }
@@ -2216,9 +2325,16 @@
 		return INTF_MODE_NONE;
 	}
 
-	drm_for_each_encoder(encoder, crtc->dev)
-		if (encoder->crtc == crtc)
-			return sde_encoder_get_intf_mode(encoder);
+	drm_for_each_encoder(encoder, crtc->dev) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		/* continue if copy encoder is encountered */
+		if (sde_encoder_in_clone_mode(encoder))
+			continue;
+
+		return sde_encoder_get_intf_mode(encoder);
+	}
 
 	return INTF_MODE_NONE;
 }
@@ -2243,38 +2359,16 @@
 	SDE_EVT32_VERBOSE(DRMID(crtc));
 }
 
-static void _sde_crtc_retire_event(struct drm_crtc *crtc, ktime_t ts)
+static void _sde_crtc_retire_event(struct drm_connector *connector,
+		ktime_t ts, bool is_error)
 {
-	struct sde_crtc_retire_event *retire_event;
-	struct sde_crtc *sde_crtc;
-	unsigned long flags;
-	int i;
-
-	if (!crtc) {
+	if (!connector) {
 		SDE_ERROR("invalid param\n");
 		return;
 	}
 
-	sde_crtc = to_sde_crtc(crtc);
-	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-	retire_event = list_first_entry_or_null(&sde_crtc->retire_event_list,
-				struct sde_crtc_retire_event, list);
-	if (retire_event)
-		list_del_init(&retire_event->list);
-	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-
-	if (!retire_event) {
-		SDE_ERROR("crtc%d retire event without kickoff\n",
-								crtc->base.id);
-		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_ERROR);
-		return;
-	}
-
 	SDE_ATRACE_BEGIN("signal_retire_fence");
-	for (i = 0; (i < retire_event->num_connectors) &&
-					retire_event->connectors[i]; ++i)
-		sde_connector_complete_commit(
-					retire_event->connectors[i], ts);
+	sde_connector_complete_commit(connector, ts, is_error);
 	SDE_ATRACE_END("signal_retire_fence");
 }
 
@@ -2286,7 +2380,7 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_kms *sde_kms;
 	unsigned long flags;
-	bool frame_done = false;
+	bool in_clone_mode = false;
 
 	if (!work) {
 		SDE_ERROR("invalid work handle\n");
@@ -2315,10 +2409,11 @@
 
 	SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_ENTRY);
 
-	if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE
-				| SDE_ENCODER_FRAME_EVENT_ERROR
-				| SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+	in_clone_mode = sde_encoder_in_clone_mode(fevent->connector->encoder);
 
+	if (!in_clone_mode && (fevent->event & (SDE_ENCODER_FRAME_EVENT_ERROR
+					| SDE_ENCODER_FRAME_EVENT_PANEL_DEAD
+					| SDE_ENCODER_FRAME_EVENT_DONE))) {
 		if (atomic_read(&sde_crtc->frame_pending) < 1) {
 			/* this should not happen */
 			SDE_ERROR("crtc%d ts:%lld invalid frame_pending:%d\n",
@@ -2339,75 +2434,32 @@
 			SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
 							SDE_EVTLOG_FUNC_CASE3);
 		}
-
-		if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE
-					| SDE_ENCODER_FRAME_EVENT_ERROR))
-			frame_done = true;
 	}
 
 	if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE) {
 		SDE_ATRACE_BEGIN("signal_release_fence");
-		sde_fence_signal(&sde_crtc->output_fence, fevent->ts, false);
+		sde_fence_signal(&sde_crtc->output_fence, fevent->ts,
+				(fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)
+				? SDE_FENCE_SIGNAL_ERROR : SDE_FENCE_SIGNAL);
 		SDE_ATRACE_END("signal_release_fence");
 	}
 
 	if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE)
 		/* this api should be called without spin_lock */
-		_sde_crtc_retire_event(crtc, fevent->ts);
+		_sde_crtc_retire_event(fevent->connector, fevent->ts,
+				(fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)
+				? SDE_FENCE_SIGNAL_ERROR : SDE_FENCE_SIGNAL);
 
 	if (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)
 		SDE_ERROR("crtc%d ts:%lld received panel dead event\n",
 				crtc->base.id, ktime_to_ns(fevent->ts));
 
-	if (frame_done)
-		complete_all(&sde_crtc->frame_done_comp);
-
 	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
 	list_add_tail(&fevent->list, &sde_crtc->frame_event_list);
 	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
 	SDE_ATRACE_END("crtc_frame_event");
 }
 
-static void sde_crtc_frame_event_cb(void *data, u32 event)
-{
-	struct drm_crtc *crtc = (struct drm_crtc *)data;
-	struct sde_crtc *sde_crtc;
-	struct msm_drm_private *priv;
-	struct sde_crtc_frame_event *fevent;
-	unsigned long flags;
-	u32 crtc_id;
-
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-	sde_crtc = to_sde_crtc(crtc);
-	priv = crtc->dev->dev_private;
-	crtc_id = drm_crtc_index(crtc);
-
-	SDE_DEBUG("crtc%d\n", crtc->base.id);
-	SDE_EVT32_VERBOSE(DRMID(crtc), event);
-
-	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
-	fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
-			struct sde_crtc_frame_event, list);
-	if (fevent)
-		list_del_init(&fevent->list);
-	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-
-	if (!fevent) {
-		SDE_ERROR("crtc%d event %d overflow\n",
-				crtc->base.id, event);
-		SDE_EVT32(DRMID(crtc), event);
-		return;
-	}
-
-	fevent->event = event;
-	fevent->crtc = crtc;
-	fevent->ts = ktime_get();
-	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
-}
-
 void sde_crtc_complete_commit(struct drm_crtc *crtc,
 		struct drm_crtc_state *old_state)
 {
@@ -2977,6 +3029,10 @@
 		if (enc->crtc != crtc)
 			continue;
 
+		/* avoid overwriting mixers info from a copy encoder */
+		if (sde_encoder_in_clone_mode(enc))
+			continue;
+
 		_sde_crtc_setup_mixer_for_encoder(crtc, enc);
 	}
 
@@ -3277,10 +3333,10 @@
 			&cstate->property_state);
 }
 
-static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+static int _sde_crtc_flush_event_thread(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc;
-	int ret, rc = 0, i;
+	int i;
 
 	if (!crtc) {
 		SDE_ERROR("invalid argument\n");
@@ -3304,17 +3360,9 @@
 			kthread_flush_work(&sde_crtc->frame_events[i].work);
 	}
 
-	ret = wait_for_completion_timeout(&sde_crtc->frame_done_comp,
-			msecs_to_jiffies(SDE_FRAME_DONE_TIMEOUT));
-	if (!ret) {
-		SDE_ERROR("frame done completion wait timed out, ret:%d\n",
-				ret);
-		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FATAL);
-		rc = -ETIMEDOUT;
-	}
 	SDE_EVT32_VERBOSE(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
 
-	return rc;
+	return 0;
 }
 
 static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc,
@@ -3641,7 +3689,6 @@
 	struct sde_kms *sde_kms;
 	struct sde_crtc_state *cstate;
 	bool is_error, reset_req;
-	int ret;
 
 	if (!crtc) {
 		SDE_ERROR("invalid argument\n");
@@ -3700,20 +3747,10 @@
 	}
 	sde_crtc->reset_request = reset_req;
 
-	/* wait for frame_event_done completion */
-	SDE_ATRACE_BEGIN("wait_for_frame_done_event");
-	ret = _sde_crtc_wait_for_frame_done(crtc);
-	SDE_ATRACE_END("wait_for_frame_done_event");
-	if (ret) {
-		SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
-				crtc->base.id,
-				atomic_read(&sde_crtc->frame_pending));
-
-		is_error = true;
-
-		/* force offline rotation mode since the commit has no pipes */
-		cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
-	}
+	SDE_ATRACE_BEGIN("flush_event_thread");
+	_sde_crtc_flush_event_thread(crtc);
+	SDE_ATRACE_END("flush_event_thread");
+	sde_crtc_calc_fps(sde_crtc);
 
 	if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
 		/* acquire bandwidth and other resources */
@@ -3752,7 +3789,6 @@
 		sde_encoder_kickoff(encoder, false);
 	}
 
-	reinit_completion(&sde_crtc->frame_done_comp);
 	SDE_ATRACE_END("crtc_commit");
 	return;
 }
@@ -4153,11 +4189,7 @@
 	if (cstate->num_ds_enabled)
 		sde_crtc->ds_reconfig = true;
 
-	/* wait for frame_event_done completion */
-	if (_sde_crtc_wait_for_frame_done(crtc))
-		SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
-				crtc->base.id,
-				atomic_read(&sde_crtc->frame_pending));
+	_sde_crtc_flush_event_thread(crtc);
 
 	SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend,
 			sde_crtc->vblank_requested,
@@ -4172,6 +4204,8 @@
 	sde_crtc->enabled = false;
 
 	if (atomic_read(&sde_crtc->frame_pending)) {
+		SDE_ERROR("crtc%d frame_pending%d\n", crtc->base.id,
+				atomic_read(&sde_crtc->frame_pending));
 		SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
 							SDE_EVTLOG_FUNC_CASE2);
 		sde_core_perf_crtc_release_bw(crtc);
@@ -4211,7 +4245,8 @@
 	 * reset the fence timeline if crtc will not be enabled for this commit
 	 */
 	if (!crtc->state->active || !crtc->state->enable) {
-		sde_fence_signal(&sde_crtc->output_fence, ktime_get(), true);
+		sde_fence_signal(&sde_crtc->output_fence,
+				ktime_get(), SDE_FENCE_RESET_TIMELINE);
 		for (i = 0; i < cstate->num_connectors; ++i)
 			sde_connector_commit_reset(cstate->connectors[i],
 					ktime_get());
@@ -4276,7 +4311,7 @@
 		if (encoder->crtc != crtc)
 			continue;
 		sde_encoder_register_frame_event_callback(encoder,
-				sde_crtc_frame_event_cb, (void *)crtc);
+				sde_crtc_frame_event_cb, crtc);
 	}
 
 	if (!sde_crtc->enabled && !sde_crtc->suspend &&
@@ -4664,8 +4699,6 @@
 
 	for (i = 1; i < SSPP_MAX; i++) {
 		if (pipe_staged[i]) {
-			sde_plane_clear_multirect(pipe_staged[i]);
-
 			if (is_sde_plane_virtual(pipe_staged[i]->plane)) {
 				SDE_ERROR(
 					"r1 only virt plane:%d not supported\n",
@@ -4673,6 +4706,7 @@
 				rc  = -EINVAL;
 				goto end;
 			}
+			sde_plane_clear_multirect(pipe_staged[i]);
 		}
 	}
 
@@ -4864,7 +4898,7 @@
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 
-	SDE_DEBUG("%s: cancel: %p\n", sde_crtc->name, file);
+	SDE_DEBUG("%s: cancel: %pK\n", sde_crtc->name, file);
 	_sde_crtc_complete_flip(crtc, file);
 }
 
@@ -4935,6 +4969,11 @@
 		{SDE_DRM_SEC_ONLY, "sec_only"},
 	};
 
+	static const struct drm_prop_enum_list e_cwb_data_points[] = {
+		{CAPTURE_MIXER_OUT, "capture_mixer_out"},
+		{CAPTURE_DSPP_OUT, "capture_pp_out"},
+	};
+
 	SDE_DEBUG("\n");
 
 	if (!crtc || !catalog) {
@@ -5014,6 +5053,12 @@
 		"enable_sui_enhancement", 0, 0, U64_MAX, 0,
 		CRTC_PROP_ENABLE_SUI_ENHANCEMENT);
 
+	if (catalog->has_cwb_support)
+		msm_property_install_enum(&sde_crtc->property_info,
+				"capture_mode", 0, 0, e_cwb_data_points,
+				ARRAY_SIZE(e_cwb_data_points),
+				CRTC_PROP_CAPTURE_OUTPUT);
+
 	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
 		DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
 
@@ -5697,6 +5742,73 @@
 }
 DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
 
+static int _sde_debugfs_fence_status_show(struct seq_file *s, void *data)
+{
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	struct drm_connector *conn;
+	struct drm_mode_object *drm_obj;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct sde_fence_context *ctx;
+
+	if (!s || !s->private)
+		return -EINVAL;
+
+	sde_crtc = s->private;
+	crtc = &sde_crtc->base;
+	cstate = to_sde_crtc_state(crtc->state);
+
+	/* Dump input fence info */
+	seq_puts(s, "===Input fence===\n");
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		struct sde_plane_state *pstate;
+		struct fence *fence;
+
+		pstate = to_sde_plane_state(plane->state);
+		if (!pstate)
+			continue;
+
+		seq_printf(s, "plane:%u stage:%d\n", plane->base.id,
+			pstate->stage);
+
+		fence = pstate->input_fence;
+		if (fence)
+			sde_fence_list_dump(fence, &s);
+	}
+
+	/* Dump release fence info */
+	seq_puts(s, "\n");
+	seq_puts(s, "===Release fence===\n");
+	ctx = &sde_crtc->output_fence;
+	drm_obj = &crtc->base;
+	sde_debugfs_timeline_dump(ctx, drm_obj, &s);
+	seq_puts(s, "\n");
+
+	/* Dump retire fence info */
+	seq_puts(s, "===Retire fence===\n");
+	drm_for_each_connector(conn, crtc->dev)
+		if (conn->state && conn->state->crtc == crtc &&
+				cstate->num_connectors < MAX_CONNECTORS) {
+			struct sde_connector *c_conn;
+
+			c_conn = to_sde_connector(conn);
+			ctx = &c_conn->retire_fence;
+			drm_obj = &conn->base;
+			sde_debugfs_timeline_dump(ctx, drm_obj, &s);
+		}
+
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static int _sde_debugfs_fence_status(struct inode *inode, struct file *file)
+{
+	return single_open(file, _sde_debugfs_fence_status_show,
+				inode->i_private);
+}
+
 static int _sde_crtc_init_debugfs(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc;
@@ -5713,6 +5825,14 @@
 		.read =		_sde_crtc_misr_read,
 		.write =	_sde_crtc_misr_setup,
 	};
+	static const struct file_operations debugfs_fence_fops = {
+		.open =		_sde_debugfs_fence_status,
+		.read =		seq_read,
+	};
+	static const struct file_operations debugfs_fps_fops = {
+		.open =		_sde_debugfs_fps_status,
+		.read =		seq_read,
+	};
 
 	if (!crtc)
 		return -EINVAL;
@@ -5737,6 +5857,10 @@
 			&sde_crtc_debugfs_state_fops);
 	debugfs_create_file("misr_data", 0600, sde_crtc->debugfs_root,
 					sde_crtc, &debugfs_misr_fops);
+	debugfs_create_file("fence_status", 0400, sde_crtc->debugfs_root,
+					sde_crtc, &debugfs_fence_fops);
+	debugfs_create_file("fps", 0400, sde_crtc->debugfs_root,
+					sde_crtc, &debugfs_fps_fops);
 
 	return 0;
 }
@@ -5887,10 +6011,6 @@
 		list_add_tail(&sde_crtc->event_cache[i].list,
 				&sde_crtc->event_free_list);
 
-	INIT_LIST_HEAD(&sde_crtc->retire_event_list);
-	for (i = 0; i < ARRAY_SIZE(sde_crtc->retire_events); i++)
-		INIT_LIST_HEAD(&sde_crtc->retire_events[i].list);
-
 	return rc;
 }
 
@@ -5944,7 +6064,6 @@
 	mutex_init(&sde_crtc->rp_lock);
 	INIT_LIST_HEAD(&sde_crtc->rp_head);
 
-	init_completion(&sde_crtc->frame_done_comp);
 	sde_crtc->enabled = false;
 
 	INIT_LIST_HEAD(&sde_crtc->frame_event_list);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 116af84..99177b1 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -31,7 +31,8 @@
 #define SDE_CRTC_NAME_SIZE	12
 
 /* define the maximum number of in-flight frame events */
-#define SDE_CRTC_FRAME_EVENT_SIZE	4
+/* Expand it to 2x for handling atleast 2 connectors safely */
+#define SDE_CRTC_FRAME_EVENT_SIZE	(4 * 2)
 
 /**
  * enum sde_crtc_client_type: crtc client type
@@ -48,6 +49,16 @@
 };
 
 /**
+ * enum sde_crtc_output_capture_point
+ * @MIXER_OUT : capture mixer output
+ * @DSPP_OUT : capture output of dspp
+ */
+enum sde_crtc_output_capture_point {
+	CAPTURE_MIXER_OUT,
+	CAPTURE_DSPP_OUT
+};
+
+/**
  * @connectors    : Currently associated drm connectors for retire event
  * @num_connectors: Number of associated drm connectors for retire event
  * @list:	event list
@@ -81,9 +92,20 @@
 };
 
 /**
+ * struct sde_crtc_frame_event_cb_data : info of drm objects of a frame event
+ * @crtc:       pointer to drm crtc object registered for frame event
+ * @connector:  pointer to drm connector which is source of frame event
+ */
+struct sde_crtc_frame_event_cb_data {
+	 struct drm_crtc *crtc;
+	 struct drm_connector *connector;
+};
+
+/**
  * struct sde_crtc_frame_event: stores crtc frame event for crtc processing
  * @work:	base work structure
  * @crtc:	Pointer to crtc handling this event
+ * @connector:  pointer to drm connector which is source of frame event
  * @list:	event list
  * @ts:		timestamp at queue entry
  * @event:	event identifier
@@ -91,6 +113,7 @@
 struct sde_crtc_frame_event {
 	struct kthread_work work;
 	struct drm_crtc *crtc;
+	struct drm_connector *connector;
 	struct list_head list;
 	ktime_t ts;
 	u32 event;
@@ -113,6 +136,12 @@
 	void *usr;
 };
 
+struct sde_crtc_fps_info {
+	u32 frame_count;
+	ktime_t last_sampled_time_us;
+	u32 measured_fps;
+};
+
 /*
  * Maximum number of free event structures to cache
  */
@@ -161,9 +190,6 @@
  * @frame_events  : static allocation of in-flight frame events
  * @frame_event_list : available frame event list
  * @spin_lock     : spin lock for frame event, transaction status, etc...
- * @retire_events  : static allocation of retire fence connector
- * @retire_event_list : available retire fence connector list
- * @frame_done_comp    : for frame_event_done synchronization
  * @event_thread  : Pointer to event handler thread
  * @event_worker  : Event worker queue
  * @event_cache   : Local cache of event worker structures
@@ -211,6 +237,7 @@
 	u64 play_count;
 	ktime_t vblank_cb_time;
 	ktime_t vblank_last_cb_time;
+	struct sde_crtc_fps_info fps_info;
 	struct device *sysfs_dev;
 	struct kernfs_node *vsync_event_sf;
 	bool vblank_requested;
@@ -234,9 +261,6 @@
 	struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
 	struct list_head frame_event_list;
 	spinlock_t spin_lock;
-	struct sde_crtc_retire_event retire_events[SDE_CRTC_FRAME_EVENT_SIZE];
-	struct list_head retire_event_list;
-	struct completion frame_done_comp;
 
 	/* for handling internal event thread */
 	struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT];
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index d287b71..73864b6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -240,7 +240,7 @@
 	struct mutex enc_lock;
 	DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
 	void (*crtc_frame_event_cb)(void *, u32 event);
-	void *crtc_frame_event_cb_data;
+	struct sde_crtc_frame_event_cb_data crtc_frame_event_cb_data;
 
 	struct timer_list vsync_event_timer;
 
@@ -422,6 +422,14 @@
 	return false;
 }
 
+int sde_encoder_in_clone_mode(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+
+	return sde_enc && sde_enc->cur_master &&
+		sde_enc->cur_master->in_clone_mode;
+}
+
 static inline int _sde_encoder_power_enable(struct sde_encoder_virt *sde_enc,
 								bool enable)
 {
@@ -730,8 +738,8 @@
 	mutex_destroy(&sde_enc->enc_lock);
 
 	if (sde_enc->input_handler) {
-		input_unregister_handler(sde_enc->input_handler);
 		kfree(sde_enc->input_handler);
+		sde_enc->input_handler = NULL;
 	}
 
 	kfree(sde_enc);
@@ -1481,12 +1489,13 @@
 
 		vsync_cfg.pp_count = sde_enc->num_phys_encs;
 		vsync_cfg.frame_rate = mode_info.frame_rate;
+		vsync_cfg.vsync_source =
+			sde_enc->cur_master->hw_pp->caps->te_source;
 		if (is_dummy)
 			vsync_cfg.vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_1;
 		else if (disp_info->is_te_using_watchdog_timer)
 			vsync_cfg.vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_0;
-		else
-			vsync_cfg.vsync_source = SDE_VSYNC0_SOURCE_GPIO;
+
 		vsync_cfg.is_dummy = is_dummy;
 
 		hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
@@ -1592,11 +1601,16 @@
 	 * only primary command mode panel can request CMD state.
 	 * all other panels/displays can request for VID state including
 	 * secondary command mode panel.
+	 * Clone mode encoder can request CLK STATE only.
 	 */
-	rsc_state = enable ?
-		(((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) &&
-		  disp_info->is_primary) ? SDE_RSC_CMD_STATE :
-		SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
+	if (sde_encoder_in_clone_mode(drm_enc))
+		rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE;
+	else
+		rsc_state = enable ?
+			(((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+			  && disp_info->is_primary) ? SDE_RSC_CMD_STATE :
+			 SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
+
 	prefill_lines = config ? mode_info.prefill_lines +
 		config->inline_rotate_prefill : mode_info.prefill_lines;
 
@@ -1874,7 +1888,7 @@
 {
 	struct drm_encoder *drm_enc = NULL;
 	struct sde_encoder_virt *sde_enc = NULL;
-	struct msm_drm_thread *disp_thread = NULL;
+	struct msm_drm_thread *event_thread = NULL;
 	struct msm_drm_private *priv = NULL;
 
 	if (!handle || !handle->handler || !handle->handler->private) {
@@ -1891,7 +1905,7 @@
 	priv = drm_enc->dev->dev_private;
 	sde_enc = to_sde_encoder_virt(drm_enc);
 	if (!sde_enc->crtc || (sde_enc->crtc->index
-			>= ARRAY_SIZE(priv->disp_thread))) {
+			>= ARRAY_SIZE(priv->event_thread))) {
 		SDE_DEBUG_ENC(sde_enc,
 			"invalid cached CRTC: %d or crtc index: %d\n",
 			sde_enc->crtc == NULL,
@@ -1901,9 +1915,10 @@
 
 	SDE_EVT32_VERBOSE(DRMID(drm_enc));
 
-	disp_thread = &priv->disp_thread[sde_enc->crtc->index];
+	event_thread = &priv->event_thread[sde_enc->crtc->index];
 
-	kthread_queue_work(&disp_thread->worker,
+	/* Queue input event work to event thread */
+	kthread_queue_work(&event_thread->worker,
 				&sde_enc->input_event_work);
 }
 
@@ -2307,10 +2322,17 @@
 			_sde_encoder_resource_control_rsc_update(drm_enc, true);
 			_sde_encoder_resource_control_helper(drm_enc, true);
 
+			/*
+			 * In some cases, commit comes with slight delay
+			 * (> 80 ms)after early wake up, prevent clock switch
+			 * off to avoid jank in next update. So, increase the
+			 * command mode idle timeout sufficiently to prevent
+			 * such case.
+			 */
 			kthread_mod_delayed_work(&disp_thread->worker,
-						&sde_enc->delayed_off_work,
-						msecs_to_jiffies(
-						IDLE_POWERCOLLAPSE_DURATION));
+					&sde_enc->delayed_off_work,
+					msecs_to_jiffies(
+					IDLE_POWERCOLLAPSE_IN_EARLY_WAKEUP));
 
 			sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
 		}
@@ -2490,6 +2512,109 @@
 	}
 }
 
+static int _sde_encoder_input_connect(struct input_handler *handler,
+	struct input_dev *dev, const struct input_device_id *id)
+{
+	struct input_handle *handle;
+	int rc = 0;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = handler->name;
+
+	rc = input_register_handle(handle);
+	if (rc) {
+		pr_err("failed to register input handle\n");
+		goto error;
+	}
+
+	rc = input_open_device(handle);
+	if (rc) {
+		pr_err("failed to open input device\n");
+		goto error_unregister;
+	}
+
+	return 0;
+
+error_unregister:
+	input_unregister_handle(handle);
+
+error:
+	kfree(handle);
+
+	return rc;
+}
+
+static void _sde_encoder_input_disconnect(struct input_handle *handle)
+{
+	 input_close_device(handle);
+	 input_unregister_handle(handle);
+	 kfree(handle);
+}
+
+/**
+ * Structure for specifying event parameters on which to receive callbacks.
+ * This structure will trigger a callback in case of a touch event (specified by
+ * EV_ABS) where there is a change in X and Y coordinates,
+ */
+static const struct input_device_id sde_input_ids[] = {
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_ABS) },
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+					BIT_MASK(ABS_MT_POSITION_X) |
+					BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	{ },
+};
+
+static int _sde_encoder_input_handler_register(
+		struct input_handler *input_handler)
+{
+	int rc = 0;
+
+	rc = input_register_handler(input_handler);
+	if (rc) {
+		pr_err("input_register_handler failed, rc= %d\n", rc);
+		kfree(input_handler);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int _sde_encoder_input_handler(
+		struct sde_encoder_virt *sde_enc)
+{
+	struct input_handler *input_handler = NULL;
+	int rc = 0;
+
+	if (sde_enc->input_handler) {
+		SDE_ERROR_ENC(sde_enc,
+				"input_handle is active. unexpected\n");
+		return -EINVAL;
+	}
+
+	input_handler = kzalloc(sizeof(*sde_enc->input_handler), GFP_KERNEL);
+	if (!input_handler)
+		return -ENOMEM;
+
+	input_handler->event = sde_encoder_input_event_handler;
+	input_handler->connect = _sde_encoder_input_connect;
+	input_handler->disconnect = _sde_encoder_input_disconnect;
+	input_handler->name = "sde";
+	input_handler->id_table = sde_input_ids;
+	input_handler->private = sde_enc;
+
+	sde_enc->input_handler = input_handler;
+
+	return rc;
+}
+
 static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
@@ -2564,12 +2689,14 @@
 	struct msm_compression_info *comp_info = NULL;
 	struct drm_display_mode *cur_mode = NULL;
 	struct msm_mode_info mode_info;
+	struct msm_display_info *disp_info;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	disp_info = &sde_enc->disp_info;
 
 	if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
 		SDE_ERROR("power resource is not enabled\n");
@@ -2607,6 +2734,14 @@
 		return;
 	}
 
+	if (sde_enc->input_handler) {
+		ret = _sde_encoder_input_handler_register(
+				sde_enc->input_handler);
+		if (ret)
+			SDE_ERROR(
+			"input handler registration failed, rc = %d\n", ret);
+	}
+
 	ret = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
 	if (ret) {
 		SDE_ERROR_ENC(sde_enc, "sde resource control failed: %d\n",
@@ -2685,6 +2820,11 @@
 	/* wait for idle */
 	sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
 
+	kthread_flush_work(&sde_enc->input_event_work);
+
+	if (sde_enc->input_handler)
+		input_unregister_handler(sde_enc->input_handler);
+
 	/*
 	 * For primary command mode encoders, execute the resource control
 	 * pre-stop operations before the physical encoders are disabled, to
@@ -2839,8 +2979,8 @@
 }
 
 void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
-		void (*frame_event_cb)(void *, u32 event),
-		void *frame_event_cb_data)
+			void (*frame_event_cb)(void *, u32 event),
+			struct drm_crtc *crtc)
 {
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
 	unsigned long lock_flags;
@@ -2857,7 +2997,7 @@
 
 	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
 	sde_enc->crtc_frame_event_cb = frame_event_cb;
-	sde_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+	sde_enc->crtc_frame_event_cb_data.crtc = crtc;
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 }
 
@@ -2868,6 +3008,9 @@
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
 	unsigned int i;
 
+	sde_enc->crtc_frame_event_cb_data.connector =
+				sde_enc->cur_master->connector;
+
 	if (event & (SDE_ENCODER_FRAME_EVENT_DONE
 			| SDE_ENCODER_FRAME_EVENT_ERROR
 			| SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
@@ -2896,13 +3039,13 @@
 
 			if (sde_enc->crtc_frame_event_cb)
 				sde_enc->crtc_frame_event_cb(
-					sde_enc->crtc_frame_event_cb_data,
+					&sde_enc->crtc_frame_event_cb_data,
 					event);
 		}
 	} else {
 		if (sde_enc->crtc_frame_event_cb)
 			sde_enc->crtc_frame_event_cb(
-				sde_enc->crtc_frame_event_cb_data, event);
+				&sde_enc->crtc_frame_event_cb_data, event);
 	}
 }
 
@@ -3011,6 +3154,10 @@
 		return;
 	}
 
+	/* avoid ctrl start for encoder in clone mode */
+	if (phys->in_clone_mode)
+		return;
+
 	ctl = phys->hw_ctl;
 	if (phys->split_role == ENC_ROLE_SKIP) {
 		SDE_DEBUG_ENC(to_sde_encoder_virt(phys->parent),
@@ -3606,102 +3753,6 @@
 			SDE_ENC_RC_EVENT_EARLY_WAKEUP);
 }
 
-static int _sde_encoder_input_connect(struct input_handler *handler,
-	struct input_dev *dev, const struct input_device_id *id)
-{
-	struct input_handle *handle;
-	int rc = 0;
-
-	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-	if (!handle)
-		return -ENOMEM;
-
-	handle->dev = dev;
-	handle->handler = handler;
-	handle->name = handler->name;
-
-	rc = input_register_handle(handle);
-	if (rc) {
-		pr_err("failed to register input handle\n");
-		goto error;
-	}
-
-	rc = input_open_device(handle);
-	if (rc) {
-		pr_err("failed to open input device\n");
-		goto error_unregister;
-	}
-
-	return 0;
-
-error_unregister:
-	input_unregister_handle(handle);
-
-error:
-	kfree(handle);
-
-	return rc;
-}
-
-static void _sde_encoder_input_disconnect(struct input_handle *handle)
-{
-	 input_close_device(handle);
-	 input_unregister_handle(handle);
-	 kfree(handle);
-}
-
-/**
- * Structure for specifying event parameters on which to receive callbacks.
- * This structure will trigger a callback in case of a touch event (specified by
- * EV_ABS) where there is a change in X and Y coordinates,
- */
-static const struct input_device_id sde_input_ids[] = {
-	{
-		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
-		.evbit = { BIT_MASK(EV_ABS) },
-		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
-					BIT_MASK(ABS_MT_POSITION_X) |
-					BIT_MASK(ABS_MT_POSITION_Y) },
-	},
-	{ },
-};
-
-static int _sde_encoder_input_handler(
-		struct sde_encoder_virt *sde_enc)
-{
-	struct input_handler *input_handler = NULL;
-	int rc = 0;
-
-	if (sde_enc->input_handler) {
-		SDE_ERROR_ENC(sde_enc,
-				"input_handle is active. unexpected\n");
-		return -EINVAL;
-	}
-
-	input_handler = kzalloc(sizeof(*sde_enc->input_handler), GFP_KERNEL);
-	if (!input_handler)
-		return -ENOMEM;
-
-	input_handler->event = sde_encoder_input_event_handler;
-	input_handler->connect = _sde_encoder_input_connect;
-	input_handler->disconnect = _sde_encoder_input_disconnect;
-	input_handler->name = "sde";
-	input_handler->id_table = sde_input_ids;
-	input_handler->private = sde_enc;
-
-	rc = input_register_handler(input_handler);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc,
-			"input_register_handler failed, rc= %d\n", rc);
-		kfree(input_handler);
-		return rc;
-	}
-
-	sde_enc->input_handler = input_handler;
-
-	return rc;
-}
-
 static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
 {
 	struct sde_encoder_virt *sde_enc = container_of(work,
@@ -3959,6 +4010,7 @@
 	}
 
 	if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+			sde_enc->disp_info.is_primary &&
 			!_sde_encoder_wakeup_time(drm_enc, &wakeup_time)) {
 		SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
 		mod_timer(&sde_enc->vsync_event_timer,
@@ -4851,7 +4903,7 @@
 
 int sde_encoder_display_failure_notification(struct drm_encoder *enc)
 {
-	struct msm_drm_thread *disp_thread = NULL;
+	struct msm_drm_thread *event_thread = NULL;
 	struct msm_drm_private *priv = NULL;
 	struct sde_encoder_virt *sde_enc = NULL;
 
@@ -4863,7 +4915,7 @@
 	priv = enc->dev->dev_private;
 	sde_enc = to_sde_encoder_virt(enc);
 	if (!sde_enc->crtc || (sde_enc->crtc->index
-			>= ARRAY_SIZE(priv->disp_thread))) {
+			>= ARRAY_SIZE(priv->event_thread))) {
 		SDE_DEBUG_ENC(sde_enc,
 			"invalid cached CRTC: %d or crtc index: %d\n",
 			sde_enc->crtc == NULL,
@@ -4873,15 +4925,12 @@
 
 	SDE_EVT32_VERBOSE(DRMID(enc));
 
-	disp_thread = &priv->disp_thread[sde_enc->crtc->index];
-	if (current->tgid == disp_thread->thread->tgid) {
-		sde_encoder_resource_control(&sde_enc->base,
-					     SDE_ENC_RC_EVENT_KICKOFF);
-	} else {
-		kthread_queue_work(&disp_thread->worker,
-				   &sde_enc->esd_trigger_work);
-		kthread_flush_work(&sde_enc->esd_trigger_work);
-	}
+	event_thread = &priv->event_thread[sde_enc->crtc->index];
+
+	kthread_queue_work(&event_thread->worker,
+			   &sde_enc->esd_trigger_work);
+	kthread_flush_work(&sde_enc->esd_trigger_work);
+
 	/**
 	 * panel may stop generating te signal (vsync) during esd failure. rsc
 	 * hardware may hang without vsync. Avoid rsc hang by generating the
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 8225dcd..42b9e58 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -31,6 +31,7 @@
 #define SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE	BIT(4)
 
 #define IDLE_POWERCOLLAPSE_DURATION	(66 - 16/2)
+#define IDLE_POWERCOLLAPSE_IN_EARLY_WAKEUP (200 - 16/2)
 
 /**
  * Encoder functions and data types
@@ -97,10 +98,10 @@
  *	will be called after the request is complete, or other events.
  * @encoder:	encoder pointer
  * @cb:		callback pointer, provide NULL to deregister
- * @data:	user data provided to callback
+ * @crtc:	pointer to drm_crtc object interested in frame events
  */
 void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder,
-		void (*cb)(void *, u32), void *data);
+		void (*cb)(void *, u32), struct drm_crtc *crtc);
 
 /**
  * sde_encoder_get_rsc_client - gets the rsc client state for primary
@@ -248,4 +249,12 @@
  */
 int sde_encoder_display_failure_notification(struct drm_encoder *enc);
 
+/**
+ * sde_encoder_in_clone_mode - checks if underlying phys encoder is in clone
+ *	mode or independent display mode. ref@ WB in Concurrent writeback mode.
+ * @drm_enc:    Pointer to drm encoder structure
+ * @Return:     true if successful in updating the encoder structure
+ */
+int sde_encoder_in_clone_mode(struct drm_encoder *enc);
+
 #endif /* __SDE_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index ea53af9..4e9430e 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -194,6 +194,9 @@
  * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
  * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
  * @INTR_IDX_RDPTR:    Readpointer done unterrupt for cmd mode panel
+ * @INTR_IDX_WB_DONE:  Writeback done interrupt for WB
+ * @INTR_IDX_PP2_OVFL: Pingpong overflow interrupt on PP2 for Concurrent WB
+ * @INTR_IDX_PP2_OVFL: Pingpong overflow interrupt on PP3 for Concurrent WB
  * @INTR_IDX_AUTOREFRESH_DONE:  Autorefresh done for cmd mode panel meaning
  *                              autorefresh has triggered a double buffer flip
  */
@@ -204,6 +207,9 @@
 	INTR_IDX_CTL_START,
 	INTR_IDX_RDPTR,
 	INTR_IDX_AUTOREFRESH_DONE,
+	INTR_IDX_WB_DONE,
+	INTR_IDX_PP2_OVFL,
+	INTR_IDX_PP3_OVFL,
 	INTR_IDX_MAX,
 };
 
@@ -263,6 +269,7 @@
  * @irq:			IRQ tracking structures
  * @cont_splash_single_flush	Variable to check if single flush is enabled.
  * @cont_splash_settings	Variable to store continuous splash settings.
+ * @in_clone_mode		Indicates if encoder is in clone mode ref@CWB
  * @vfp_cached:			cached vertical front porch to be used for
  *				programming ROT and MDP fetch start
  */
@@ -295,6 +302,7 @@
 	struct sde_encoder_irq irq[INTR_IDX_MAX];
 	u32 cont_splash_single_flush;
 	bool cont_splash_settings;
+	bool in_clone_mode;
 	int vfp_cached;
 };
 
@@ -368,7 +376,6 @@
  *	writeback specific operations
  * @base:		Baseclass physical encoder structure
  * @hw_wb:		Hardware interface to the wb registers
- * @irq_idx:		IRQ interface lookup index
  * @wbdone_timeout:	Timeout value for writeback done in msec
  * @bypass_irqreg:	Bypass irq register/unregister if non-zero
  * @wbdone_complete:	for wbdone irq synchronization
@@ -392,8 +399,6 @@
 struct sde_encoder_phys_wb {
 	struct sde_encoder_phys base;
 	struct sde_hw_wb *hw_wb;
-	int irq_idx;
-	struct sde_irq_callback irq_cb;
 	u32 wbdone_timeout;
 	u32 bypass_irqreg;
 	struct completion wbdone_complete;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index a3f2c73..82dd64a 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -32,6 +32,7 @@
 
 #define TO_S15D16(_x_)	((_x_) << 7)
 
+#define MULTIPLE_CONN_DETECTED(x) (x > 1)
 /**
  * sde_rgb2yuv_601l - rgb to yuv color space conversion matrix
  *
@@ -398,6 +399,31 @@
 	}
 }
 
+static void _sde_encoder_phys_wb_setup_cwb(struct sde_encoder_phys *phys_enc,
+					bool enable)
+{
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct sde_hw_intf_cfg *intf_cfg = &wb_enc->intf_cfg;
+	struct sde_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+	struct sde_crtc *crtc = to_sde_crtc(wb_enc->crtc);
+
+	if (!phys_enc->in_clone_mode) {
+		SDE_DEBUG("not in CWB mode. early return\n");
+		return;
+	}
+
+	memset(intf_cfg, 0, sizeof(struct sde_hw_intf_cfg));
+	intf_cfg->intf = SDE_NONE;
+	intf_cfg->wb = hw_wb->idx;
+
+	hw_ctl = crtc->mixers[0].hw_ctl;
+	if (hw_ctl && hw_ctl->ops.update_wb_cfg) {
+		hw_ctl->ops.update_wb_cfg(hw_ctl, intf_cfg, enable);
+		SDE_DEBUG("in CWB mode adding WB for CTL_%d\n",
+				hw_ctl->idx - CTL_0);
+	}
+}
 /**
  * sde_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
  * @phys_enc:	Pointer to physical encoder
@@ -408,8 +434,12 @@
 	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
 	struct sde_hw_intf_cfg *intf_cfg = &wb_enc->intf_cfg;
 
-	memset(intf_cfg, 0, sizeof(struct sde_hw_intf_cfg));
+	if (phys_enc->in_clone_mode) {
+		SDE_DEBUG("in CWB mode. early return\n");
+		return;
+	}
 
+	memset(intf_cfg, 0, sizeof(struct sde_hw_intf_cfg));
 	intf_cfg->intf = SDE_NONE;
 	intf_cfg->wb = hw_wb->idx;
 	intf_cfg->mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
@@ -417,6 +447,98 @@
 	if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg)
 		phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl,
 				intf_cfg);
+
+}
+
+static void _sde_enc_phys_wb_detect_cwb(struct sde_encoder_phys *phys_enc,
+		struct drm_crtc_state *crtc_state)
+{
+	struct drm_connector *conn;
+	struct drm_connector_state *conn_state;
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	const struct sde_wb_cfg *wb_cfg = wb_enc->hw_wb->caps;
+	int conn_count = 0;
+
+	phys_enc->in_clone_mode = false;
+
+	/* Check if WB has CWB support */
+	if (!(wb_cfg->features & BIT(SDE_WB_HAS_CWB)))
+		return;
+
+	/* Count the number of connectors on the given crtc */
+	drm_for_each_connector(conn, crtc_state->crtc->dev) {
+		conn_state =
+			drm_atomic_get_connector_state(crtc_state->state, conn);
+		if ((conn->state && conn->state->crtc == crtc_state->crtc) ||
+				(conn_state &&
+				 conn_state->crtc == crtc_state->crtc))
+			conn_count++;
+	}
+
+
+	/* Enable clone mode If crtc has multiple connectors & one is WB */
+	if (MULTIPLE_CONN_DETECTED(conn_count))
+		phys_enc->in_clone_mode = true;
+
+	SDE_DEBUG("detect CWB - status:%d\n", phys_enc->in_clone_mode);
+}
+
+static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
+			struct drm_crtc_state *crtc_state,
+			 struct drm_connector_state *conn_state)
+{
+	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc_state);
+	struct sde_rect wb_roi = {0,};
+	int data_pt;
+	int ds_outw = 0;
+	int ds_outh = 0;
+	int ds_in_use = false;
+	int i = 0;
+	int ret = 0;
+
+	if (!phys_enc->in_clone_mode) {
+		SDE_DEBUG("not in CWB mode. early return\n");
+		goto exit;
+	}
+
+	ret = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi);
+	if (ret) {
+		SDE_ERROR("failed to get roi %d\n", ret);
+		goto exit;
+	}
+
+	data_pt = sde_crtc_get_property(cstate, CRTC_PROP_CAPTURE_OUTPUT);
+
+	/* compute cumulative ds output dimensions if in use */
+	for (i = 0; i < cstate->num_ds; i++)
+		if (cstate->ds_cfg[i].scl3_cfg.enable) {
+			ds_in_use = true;
+			ds_outw += cstate->ds_cfg[i].scl3_cfg.dst_width;
+			ds_outh += cstate->ds_cfg[i].scl3_cfg.dst_height;
+		}
+
+	/* if ds in use check wb roi against ds output dimensions */
+	if ((data_pt == CAPTURE_DSPP_OUT) &&  ds_in_use &&
+			((wb_roi.w != ds_outw) || (wb_roi.h != ds_outh))) {
+		SDE_ERROR("invalid wb roi with dest scalar [%dx%d vs %dx%d]\n",
+				wb_roi.w, wb_roi.h, ds_outw, ds_outh);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* validate conn roi against pu rect */
+	if (!sde_kms_rect_is_null(&cstate->crtc_roi)) {
+		if (wb_roi.w != cstate->crtc_roi.w ||
+				wb_roi.h != cstate->crtc_roi.h) {
+			SDE_ERROR("invalid wb roi with pu [%dx%d vs %dx%d]\n",
+					wb_roi.w, wb_roi.h, cstate->crtc_roi.w,
+					 cstate->crtc_roi.h);
+			ret = -EINVAL;
+			goto exit;
+		}
+	}
+exit:
+	return ret;
 }
 
 /**
@@ -453,6 +575,8 @@
 		return -EINVAL;
 	}
 
+	_sde_enc_phys_wb_detect_cwb(phys_enc, crtc_state);
+
 	memset(&wb_roi, 0, sizeof(struct sde_rect));
 
 	rc = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi);
@@ -542,7 +666,62 @@
 		}
 	}
 
-	return 0;
+	rc = _sde_enc_phys_wb_validate_cwb(phys_enc, crtc_state, conn_state);
+	if (rc) {
+		SDE_ERROR("failed in cwb validation %d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static void _sde_encoder_phys_wb_update_cwb_flush(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc;
+	struct sde_hw_wb *hw_wb;
+	struct sde_hw_ctl *hw_ctl;
+	struct sde_hw_cdm *hw_cdm;
+	struct sde_crtc *crtc;
+	struct sde_crtc_state *crtc_state;
+	u32 flush_mask = 0;
+	int capture_point = 0;
+
+	if (!phys_enc->in_clone_mode) {
+		SDE_DEBUG("not in CWB mode. early return\n");
+		return;
+	}
+
+	wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	crtc = to_sde_crtc(wb_enc->crtc);
+	crtc_state = to_sde_crtc_state(wb_enc->crtc->state);
+
+	hw_wb = wb_enc->hw_wb;
+	hw_cdm = phys_enc->hw_cdm;
+
+	/* In CWB mode, program actual source master sde_hw_ctl from crtc */
+	hw_ctl = crtc->mixers[0].hw_ctl;
+	if (!hw_ctl) {
+		SDE_DEBUG("[wb:%d] no ctl assigned for CWB\n",
+				hw_wb->idx - WB_0);
+		return;
+	}
+
+	capture_point  = sde_crtc_get_property(crtc_state,
+			CRTC_PROP_CAPTURE_OUTPUT);
+
+	phys_enc->hw_mdptop->ops.set_cwb_ppb_cntl(phys_enc->hw_mdptop,
+			crtc->num_mixers == CRTC_DUAL_MIXERS,
+			capture_point == CAPTURE_DSPP_OUT);
+
+	if (hw_ctl->ops.get_bitmask_wb)
+		hw_ctl->ops.get_bitmask_wb(hw_ctl, &flush_mask, hw_wb->idx);
+
+	if (hw_ctl->ops.get_bitmask_cdm && hw_cdm)
+		hw_ctl->ops.get_bitmask_cdm(hw_ctl, &flush_mask, hw_cdm->idx);
+
+	if (hw_ctl->ops.update_pending_flush)
+		hw_ctl->ops.update_pending_flush(hw_ctl, flush_mask);
 }
 
 /**
@@ -551,7 +730,7 @@
  */
 static void _sde_encoder_phys_wb_update_flush(struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_encoder_phys_wb *wb_enc;
 	struct sde_hw_wb *hw_wb;
 	struct sde_hw_ctl *hw_ctl;
 	struct sde_hw_cdm *hw_cdm;
@@ -560,12 +739,18 @@
 	if (!phys_enc)
 		return;
 
+	wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	hw_wb = wb_enc->hw_wb;
-	hw_ctl = phys_enc->hw_ctl;
 	hw_cdm = phys_enc->hw_cdm;
+	hw_ctl = phys_enc->hw_ctl;
 
 	SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
 
+	if (phys_enc->in_clone_mode) {
+		SDE_DEBUG("in CWB mode. early return\n");
+		return;
+	}
+
 	if (!hw_ctl) {
 		SDE_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
 		return;
@@ -659,54 +844,28 @@
 	sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
 
 	sde_encoder_phys_wb_setup_cdp(phys_enc);
+
+	_sde_encoder_phys_wb_setup_cwb(phys_enc, true);
 }
 
-/**
- * sde_encoder_phys_wb_unregister_irq - unregister writeback interrupt handler
- * @phys_enc:	Pointer to physical encoder
- */
-static int sde_encoder_phys_wb_unregister_irq(
-		struct sde_encoder_phys *phys_enc)
-{
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-
-	if (wb_enc->bypass_irqreg)
-		return 0;
-
-	sde_core_irq_disable(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
-	sde_core_irq_unregister_callback(phys_enc->sde_kms, wb_enc->irq_idx,
-			&wb_enc->irq_cb);
-
-	SDE_DEBUG("un-register IRQ for wb %d, irq_idx=%d\n",
-			hw_wb->idx - WB_0,
-			wb_enc->irq_idx);
-
-	return 0;
-}
-
-/**
- * sde_encoder_phys_wb_done_irq - writeback interrupt handler
- * @arg:	Pointer to writeback encoder
- * @irq_idx:	interrupt index
- */
-static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx)
+static void _sde_encoder_phys_wb_frame_done_helper(void *arg, bool frame_error)
 {
 	struct sde_encoder_phys_wb *wb_enc = arg;
 	struct sde_encoder_phys *phys_enc = &wb_enc->base;
 	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-	u32 event = 0;
+	u32 event = frame_error ? SDE_ENCODER_FRAME_EVENT_ERROR : 0;
 
-	SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0,
-			wb_enc->frame_count);
+	event |= SDE_ENCODER_FRAME_EVENT_DONE |
+		SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+
+	SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0, wb_enc->frame_count);
 
 	/* don't notify upper layer for internal commit */
 	if (phys_enc->enable_state == SDE_ENC_DISABLING)
 		goto complete;
 
-	event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
-			| SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE
-			| SDE_ENCODER_FRAME_EVENT_DONE;
+	if (!phys_enc->in_clone_mode)
+		event |= SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
 
 	atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0);
 	if (phys_enc->parent_ops.handle_frame_done)
@@ -724,58 +883,60 @@
 }
 
 /**
- * sde_encoder_phys_wb_register_irq - register writeback interrupt handler
- * @phys_enc:	Pointer to physical encoder
+ * sde_encoder_phys_wb_done_irq - Pingpong overflow interrupt handler for CWB
+ * @arg:	Pointer to writeback encoder
+ * @irq_idx:	interrupt index
  */
-static int sde_encoder_phys_wb_register_irq(struct sde_encoder_phys *phys_enc)
+static void sde_encoder_phys_cwb_ovflow(void *arg, int irq_idx)
 {
-	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-	struct sde_irq_callback *irq_cb = &wb_enc->irq_cb;
-	enum sde_intr_type intr_type;
-	int ret = 0;
+	_sde_encoder_phys_wb_frame_done_helper(arg, true);
+}
+
+/**
+ * sde_encoder_phys_wb_done_irq - writeback interrupt handler
+ * @arg:	Pointer to writeback encoder
+ * @irq_idx:	interrupt index
+ */
+static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx)
+{
+	_sde_encoder_phys_wb_frame_done_helper(arg, false);
+}
+
+/**
+ * sde_encoder_phys_wb_irq_ctrl - irq control of WB
+ * @phys:	Pointer to physical encoder
+ * @enable:	indicates enable or disable interrupts
+ */
+static void sde_encoder_phys_wb_irq_ctrl(
+		struct sde_encoder_phys *phys, bool enable)
+{
+
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys);
+	int index = 0;
+
+	if (!wb_enc)
+		return;
 
 	if (wb_enc->bypass_irqreg)
-		return 0;
+		return;
 
-	intr_type = sde_encoder_phys_wb_get_intr_type(hw_wb);
-	wb_enc->irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms,
-			intr_type, hw_wb->idx);
-	if (wb_enc->irq_idx < 0) {
-		SDE_ERROR(
-			"failed to lookup IRQ index for WB_DONE with wb=%d\n",
-			hw_wb->idx - WB_0);
-		return -EINVAL;
+	if (enable) {
+		sde_encoder_helper_register_irq(phys, INTR_IDX_WB_DONE);
+		if (phys->in_clone_mode) {
+			for (index = 0; index < CRTC_DUAL_MIXERS; index++)
+				sde_encoder_helper_register_irq(phys,
+						index ? INTR_IDX_PP3_OVFL
+						: INTR_IDX_PP2_OVFL);
+		}
+	} else {
+		sde_encoder_helper_unregister_irq(phys, INTR_IDX_WB_DONE);
+		if (phys->in_clone_mode) {
+			for (index = 0; index < CRTC_DUAL_MIXERS; index++)
+				sde_encoder_helper_unregister_irq(phys,
+						index ? INTR_IDX_PP3_OVFL
+						: INTR_IDX_PP2_OVFL);
+		}
 	}
-
-	irq_cb->func = sde_encoder_phys_wb_done_irq;
-	irq_cb->arg = wb_enc;
-	ret = sde_core_irq_register_callback(phys_enc->sde_kms,
-			wb_enc->irq_idx, irq_cb);
-	if (ret) {
-		SDE_ERROR("failed to register IRQ callback WB_DONE\n");
-		return ret;
-	}
-
-	ret = sde_core_irq_enable(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
-	if (ret) {
-		SDE_ERROR(
-			"failed to enable IRQ for WB_DONE, wb %d, irq_idx=%d\n",
-				hw_wb->idx - WB_0,
-				wb_enc->irq_idx);
-		wb_enc->irq_idx = -EINVAL;
-
-		/* Unregister callback on IRQ enable failure */
-		sde_core_irq_unregister_callback(phys_enc->sde_kms,
-				wb_enc->irq_idx, irq_cb);
-		return ret;
-	}
-
-	SDE_DEBUG("registered IRQ for wb %d, irq_idx=%d\n",
-			hw_wb->idx - WB_0,
-			wb_enc->irq_idx);
-
-	return ret;
 }
 
 /**
@@ -846,6 +1007,7 @@
 	u32 irq_status, event = 0;
 	u64 wb_time = 0;
 	int rc = 0;
+	int irq_idx = phys_enc->irq[INTR_IDX_WB_DONE].irq_idx;
 	u32 timeout = max_t(u32, wb_enc->wbdone_timeout, KICKOFF_TIMEOUT_MS);
 
 	/* Return EWOULDBLOCK since we know the wait isn't necessary */
@@ -860,7 +1022,7 @@
 	/* signal completion if commit with no framebuffer */
 	if (!wb_enc->wb_fb) {
 		SDE_DEBUG("no output framebuffer\n");
-		sde_encoder_phys_wb_done_irq(wb_enc, wb_enc->irq_idx);
+		_sde_encoder_phys_wb_frame_done_helper(wb_enc, false);
 	}
 
 	ret = wait_for_completion_timeout(&wb_enc->wbdone_complete,
@@ -870,11 +1032,11 @@
 		SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
 				wb_enc->frame_count);
 		irq_status = sde_core_irq_read(phys_enc->sde_kms,
-				wb_enc->irq_idx, true);
+				irq_idx, true);
 		if (irq_status) {
 			SDE_DEBUG("wb:%d done but irq not triggered\n",
 					WBID(wb_enc));
-			sde_encoder_phys_wb_done_irq(wb_enc, wb_enc->irq_idx);
+			_sde_encoder_phys_wb_frame_done_helper(wb_enc, false);
 		} else {
 			SDE_ERROR("wb:%d kickoff timed out\n",
 					WBID(wb_enc));
@@ -891,8 +1053,6 @@
 		}
 	}
 
-	sde_encoder_phys_wb_unregister_irq(phys_enc);
-
 	if (!rc)
 		wb_enc->end_time = ktime_get();
 
@@ -937,19 +1097,12 @@
 		struct sde_encoder_kickoff_params *params)
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
-	int ret;
 
 	SDE_DEBUG("[wb:%d,%u]\n", wb_enc->hw_wb->idx - WB_0,
 			wb_enc->kickoff_count);
 
 	reinit_completion(&wb_enc->wbdone_complete);
 
-	ret = sde_encoder_phys_wb_register_irq(phys_enc);
-	if (ret) {
-		SDE_ERROR("failed to register irq %d\n", ret);
-		return ret;
-	}
-
 	wb_enc->kickoff_count++;
 
 	/* set OT limit & enable traffic shaper */
@@ -957,6 +1110,8 @@
 
 	_sde_encoder_phys_wb_update_flush(phys_enc);
 
+	_sde_encoder_phys_wb_update_cwb_flush(phys_enc);
+
 	/* vote for iommu/clk/bus */
 	wb_enc->start_time = ktime_get();
 
@@ -977,6 +1132,15 @@
 		return;
 	}
 
+	/*
+	 * Bail out iff in CWB mode. In case of CWB, primary control-path
+	 * which is actually driving would trigger the flush
+	 */
+	if (phys_enc->in_clone_mode) {
+		SDE_DEBUG("in CWB mode. early return\n");
+		return;
+	}
+
 	SDE_DEBUG("[wb:%d]\n", wb_enc->hw_wb->idx - WB_0);
 
 	/* clear pending flush if commit with no framebuffer */
@@ -1183,6 +1347,13 @@
 		goto exit;
 	}
 
+	/* avoid reset frame for CWB */
+	if (phys_enc->in_clone_mode) {
+		_sde_encoder_phys_wb_setup_cwb(phys_enc, false);
+		phys_enc->in_clone_mode = false;
+		goto exit;
+	}
+
 	/* reset h/w before final flush */
 	if (phys_enc->hw_ctl->ops.clear_pending_flush)
 		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
@@ -1320,6 +1491,7 @@
 	ops->trigger_flush = sde_encoder_phys_wb_trigger_flush;
 	ops->trigger_start = sde_encoder_helper_trigger_start;
 	ops->hw_reset = sde_encoder_helper_hw_reset;
+	ops->irq_control = sde_encoder_phys_wb_irq_ctrl;
 }
 
 /**
@@ -1332,6 +1504,7 @@
 	struct sde_encoder_phys *phys_enc;
 	struct sde_encoder_phys_wb *wb_enc;
 	struct sde_hw_mdp *hw_mdp;
+	struct sde_encoder_irq *irq;
 	int ret = 0;
 
 	SDE_DEBUG("\n");
@@ -1348,7 +1521,6 @@
 		ret = -ENOMEM;
 		goto fail_alloc;
 	}
-	wb_enc->irq_idx = -EINVAL;
 	wb_enc->wbdone_timeout = KICKOFF_TIMEOUT_MS;
 	init_completion(&wb_enc->wbdone_complete);
 
@@ -1412,7 +1584,36 @@
 	phys_enc->enc_spinlock = p->enc_spinlock;
 	phys_enc->vblank_ctl_lock = p->vblank_ctl_lock;
 	atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
-	INIT_LIST_HEAD(&wb_enc->irq_cb.list);
+
+	irq = &phys_enc->irq[INTR_IDX_WB_DONE];
+	INIT_LIST_HEAD(&irq->cb.list);
+	irq->name = "wb_done";
+	irq->hw_idx =  wb_enc->hw_wb->idx;
+	irq->irq_idx = -1;
+	irq->intr_type = sde_encoder_phys_wb_get_intr_type(wb_enc->hw_wb);
+	irq->intr_idx = INTR_IDX_WB_DONE;
+	irq->cb.arg = wb_enc;
+	irq->cb.func = sde_encoder_phys_wb_done_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_PP2_OVFL];
+	INIT_LIST_HEAD(&irq->cb.list);
+	irq->name = "pp2_overflow";
+	irq->hw_idx = CWB_2;
+	irq->irq_idx = -1;
+	irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
+	irq->intr_idx = INTR_IDX_PP2_OVFL;
+	irq->cb.arg = wb_enc;
+	irq->cb.func = sde_encoder_phys_cwb_ovflow;
+
+	irq = &phys_enc->irq[INTR_IDX_PP3_OVFL];
+	INIT_LIST_HEAD(&irq->cb.list);
+	irq->name = "pp3_overflow";
+	irq->hw_idx = CWB_3;
+	irq->irq_idx = -1;
+	irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
+	irq->intr_idx = INTR_IDX_PP3_OVFL;
+	irq->cb.arg = wb_enc;
+	irq->cb.func = sde_encoder_phys_cwb_ovflow;
 
 	/* create internal buffer for disable logic */
 	if (_sde_encoder_phys_wb_init_internal_fb(wb_enc,
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 686c640..8ffbb98 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -278,7 +278,8 @@
 	}
 }
 
-static void _sde_fence_trigger(struct sde_fence_context *ctx, ktime_t ts)
+static void _sde_fence_trigger(struct sde_fence_context *ctx,
+		ktime_t ts, bool error)
 {
 	unsigned long flags;
 	struct sde_fence *fc, *next;
@@ -300,6 +301,7 @@
 
 	list_for_each_entry_safe(fc, next, &local_list_head, fence_list) {
 		spin_lock_irqsave(&ctx->lock, flags);
+		fc->base.error = error ? -EBUSY : 0;
 		fc->base.timestamp = ts;
 		is_signaled = fence_is_signaled_locked(&fc->base);
 		spin_unlock_irqrestore(&ctx->lock, flags);
@@ -351,7 +353,7 @@
 
 	if (fd >= 0) {
 		rc = 0;
-		_sde_fence_trigger(ctx, ktime_get());
+		_sde_fence_trigger(ctx, ktime_get(), false);
 	} else {
 		rc = fd;
 	}
@@ -360,7 +362,7 @@
 }
 
 void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
-							bool reset_timeline)
+		enum sde_fence_event fence_event)
 {
 	unsigned long flags;
 
@@ -370,7 +372,7 @@
 	}
 
 	spin_lock_irqsave(&ctx->lock, flags);
-	if (reset_timeline) {
+	if (fence_event == SDE_FENCE_RESET_TIMELINE) {
 		if ((int)(ctx->done_count - ctx->commit_count) < 0) {
 			SDE_ERROR(
 				"timeline reset attempt! done count:%d commit:%d\n",
@@ -378,7 +380,7 @@
 			ctx->done_count = ctx->commit_count;
 			SDE_EVT32(ctx->drm_id, ctx->done_count,
 				ctx->commit_count, ktime_to_us(ts),
-				reset_timeline, SDE_EVTLOG_FATAL);
+				fence_event, SDE_EVTLOG_FATAL);
 		} else {
 			spin_unlock_irqrestore(&ctx->lock, flags);
 			return;
@@ -391,7 +393,7 @@
 		SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
 					ctx->done_count, ctx->commit_count);
 		SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
-			ktime_to_us(ts), reset_timeline, SDE_EVTLOG_FATAL);
+			ktime_to_us(ts), fence_event, SDE_EVTLOG_FATAL);
 		spin_unlock_irqrestore(&ctx->lock, flags);
 		return;
 	}
@@ -400,7 +402,7 @@
 	SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
 			ktime_to_us(ts));
 
-	_sde_fence_trigger(ctx, ts);
+	_sde_fence_trigger(ctx, ts, (fence_event == SDE_FENCE_SIGNAL_ERROR));
 }
 
 void sde_fence_timeline_status(struct sde_fence_context *ctx,
@@ -429,3 +431,55 @@
 		obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
 		ctx->commit_count);
 }
+
+void sde_fence_list_dump(struct fence *fence, struct seq_file **s)
+{
+	char timeline_str[TIMELINE_VAL_LENGTH];
+
+	if (fence->ops->timeline_value_str)
+		fence->ops->timeline_value_str(fence,
+		timeline_str, TIMELINE_VAL_LENGTH);
+
+	seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%x timeline:%s signaled:0x%x\n",
+		fence->ops->get_driver_name(fence),
+		fence->ops->get_timeline_name(fence),
+		fence->seqno, timeline_str,
+		fence->ops->signaled ?
+		fence->ops->signaled(fence) : 0xffffffff);
+}
+
+void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
+		struct drm_mode_object *drm_obj, struct seq_file **s)
+{
+	char *obj_name;
+	struct sde_fence *fc, *next;
+	struct fence *fence;
+
+	if (!ctx || !drm_obj) {
+		SDE_ERROR("invalid input params\n");
+		return;
+	}
+
+	switch (drm_obj->type) {
+	case DRM_MODE_OBJECT_CRTC:
+		obj_name = "crtc";
+		break;
+	case DRM_MODE_OBJECT_CONNECTOR:
+		obj_name = "connector";
+		break;
+	default:
+		obj_name = "unknown";
+		break;
+	}
+
+	seq_printf(*s, "drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
+		obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
+		ctx->commit_count);
+
+	spin_lock(&ctx->list_lock);
+	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
+		fence = &fc->base;
+		sde_fence_list_dump(fence, s);
+	}
+	spin_unlock(&ctx->list_lock);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index 29d2ec7..7d7fd02 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -47,6 +47,18 @@
 	char name[SDE_FENCE_NAME_SIZE];
 };
 
+/**
+ * enum sde_fence_event - sde fence event as hint fence operation
+ * @SDE_FENCE_SIGNAL: Signal the fence cleanly with current timeline
+ * @SDE_FENCE_RESET_TIMELINE: Reset timeline of the fence context
+ * @SDE_FENCE_SIGNAL: Signal the fence but indicate error throughfence status
+ */
+enum sde_fence_event {
+	SDE_FENCE_SIGNAL,
+	SDE_FENCE_RESET_TIMELINE,
+	SDE_FENCE_SIGNAL_ERROR
+};
+
 #if IS_ENABLED(CONFIG_SYNC_FILE)
 /**
  * sde_sync_get - Query sync fence object from a file handle
@@ -128,10 +140,10 @@
  * sde_fence_signal - advance fence timeline to signal outstanding fences
  * @fence: Pointer fence container
  * @ts: fence timestamp
- * @reset_timeline: reset the fence timeline to done count equal to commit count
+ * @fence_event: fence event to indicate nature of fence signal.
  */
 void sde_fence_signal(struct sde_fence_context *fence, ktime_t ts,
-		bool reset_timeline);
+		enum sde_fence_event fence_event);
 
 /**
  * sde_fence_timeline_status - prints fence timeline status
@@ -141,6 +153,22 @@
 void sde_fence_timeline_status(struct sde_fence_context *ctx,
 					struct drm_mode_object *drm_obj);
 
+/**
+ * sde_fence_timeline_dump - utility to dump fence list info in debugfs node
+ * @fence: Pointer fence container
+ * @drm_obj: Pointer to drm object associated with fence timeline
+ * @s: used to writing on debugfs node
+ */
+void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
+		struct drm_mode_object *drm_obj, struct seq_file **s);
+
+/**
+ * sde_fence_timeline_status - dumps fence timeline in debugfs node
+ * @fence: Pointer fence container
+ * @s: used to writing on debugfs node
+ */
+void sde_fence_list_dump(struct fence *fence, struct seq_file **s);
+
 #else
 static inline void *sde_sync_get(uint64_t fd)
 {
@@ -200,6 +228,18 @@
 {
 	/* do nothing */
 }
+
+void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
+		struct drm_mode_object *drm_obj, struct seq_file **s)
+{
+	/* do nothing */
+}
+
+void sde_fence_list_dump(struct fence *fence, struct seq_file **s)
+{
+	/* do nothing */
+}
+
 #endif /* IS_ENABLED(CONFIG_SW_SYNC) */
 
 #endif /* _SDE_FENCE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index eacafe6..baec526 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -249,6 +249,7 @@
 	DITHER_OFF,
 	DITHER_LEN,
 	DITHER_VER,
+	TE_SOURCE,
 	PP_PROP_MAX,
 };
 
@@ -589,6 +590,7 @@
 	{DITHER_OFF, "qcom,sde-dither-off", false, PROP_TYPE_U32_ARRAY},
 	{DITHER_LEN, "qcom,sde-dither-size", false, PROP_TYPE_U32},
 	{DITHER_VER, "qcom,sde-dither-version", false, PROP_TYPE_U32},
+	{TE_SOURCE, "qcom,sde-te-source", false, PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type dsc_prop[] = {
@@ -1694,6 +1696,9 @@
 
 		set_bit(SDE_WB_XY_ROI_OFFSET, &wb->features);
 
+		if (sde_cfg->has_cwb_support)
+			set_bit(SDE_WB_HAS_CWB, &wb->features);
+
 		for (j = 0; j < sde_cfg->mdp_count; j++) {
 			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
 				PROP_BITVALUE_ACCESS(prop_value,
@@ -2625,6 +2630,10 @@
 		snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u",
 				pp->id - PINGPONG_0);
 		pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0);
+		pp->te_source = PROP_VALUE_ACCESS(prop_value, TE_SOURCE, i);
+		if (!prop_exists[TE_SOURCE] ||
+			pp->te_source > SDE_VSYNC_SOURCE_WD_TIMER_0)
+			pp->te_source = SDE_VSYNC0_SOURCE_GPIO;
 
 		sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i);
 		sblk->te.id = SDE_PINGPONG_TE;
@@ -3248,6 +3257,7 @@
 	} else if (IS_SDM845_TARGET(hw_rev)) {
 		/* update sdm845 target here */
 		sde_cfg->has_wb_ubwc = true;
+		sde_cfg->has_cwb_support = true;
 		sde_cfg->perf.min_prefill_lines = 24;
 		sde_cfg->vbif_qos_nlvl = 8;
 		sde_cfg->ts_prefill_rev = 2;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 963b48f..52bdc78 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -261,6 +261,7 @@
  * @SDE_WB_QOS,             Writeback supports QoS control, danger/safe/creq
  * @SDE_WB_QOS_8LVL,        Writeback supports 8-level QoS control
  * @SDE_WB_CDP              Writeback supports client driven prefetch
+ * @SDE_WB_HAS_CWB          Writeback block supports concurrent writeback
  * @SDE_WB_MAX              maximum value
  */
 enum {
@@ -279,6 +280,7 @@
 	SDE_WB_QOS,
 	SDE_WB_QOS_8LVL,
 	SDE_WB_CDP,
+	SDE_WB_HAS_CWB,
 	SDE_WB_MAX
 };
 
@@ -651,6 +653,7 @@
  */
 struct sde_pingpong_cfg  {
 	SDE_HW_BLK_INFO;
+	u32 te_source;
 	const struct sde_pingpong_sub_blks *sblk;
 };
 
@@ -922,6 +925,7 @@
  * @has_src_split      source split feature status
  * @has_cdp            Client driven prefetch feature status
  * @has_wb_ubwc        UBWC feature supported on WB
+ * @has_cwb_support    indicates if device supports primary capture through CWB
  * @ubwc_version       UBWC feature version (0x0 for not supported)
  * @has_sbuf           indicate if stream buffer is available
  * @sbuf_headroom      stream buffer headroom in lines
@@ -959,6 +963,7 @@
 	bool has_cdp;
 	bool has_dim_layer;
 	bool has_wb_ubwc;
+	bool has_cwb_support;
 	u32 ubwc_version;
 	bool has_sbuf;
 	u32 sbuf_headroom;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
index 9e64d78..c7989cd 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -680,7 +680,7 @@
 	void  __iomem *base;
 
 	if (!hw_cfg  || (hw_cfg->len != sizeof(*pcc)  && hw_cfg->payload)) {
-		DRM_ERROR("invalid params hw %p payload %p payloadsize %d \"\
+		DRM_ERROR("invalid params hw %pK payload %pK payloadsize %d \"\
 			  exp size %zd\n",
 			   hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
 			   ((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc));
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 303d96e..2146611 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -576,6 +576,23 @@
 	SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
 }
 
+static void sde_hw_ctl_update_wb_cfg(struct sde_hw_ctl *ctx,
+		struct sde_hw_intf_cfg *cfg, bool enable) {
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 intf_cfg = 0;
+
+	if (!cfg->wb)
+		return;
+
+	intf_cfg = SDE_REG_READ(c, CTL_TOP);
+	if (enable)
+		intf_cfg |= (cfg->wb & 0x3) + 2;
+	else
+		intf_cfg &= ~((cfg->wb & 0x3) + 2);
+
+	SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
 static inline u32 sde_hw_ctl_read_ctl_top(struct sde_hw_ctl *ctx)
 {
 	struct sde_hw_blk_reg_map *c;
@@ -638,6 +655,7 @@
 	ops->read_ctl_top = sde_hw_ctl_read_ctl_top;
 	ops->read_ctl_layers = sde_hw_ctl_read_ctl_layers;
 	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
+	ops->update_wb_cfg = sde_hw_ctl_update_wb_cfg;
 	ops->reset = sde_hw_ctl_reset_control;
 	ops->get_reset = sde_hw_ctl_get_reset_status;
 	ops->hard_reset = sde_hw_ctl_hard_reset;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 9eb31f1..75631dd8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -150,6 +150,15 @@
 	void (*setup_intf_cfg)(struct sde_hw_ctl *ctx,
 		struct sde_hw_intf_cfg *cfg);
 
+	/**
+	 * Update the interface selection with input WB config
+	 * @ctx       : ctl path ctx pointer
+	 * @cfg       : pointer to input wb config
+	 * @enable    : set if true, clear otherwise
+	 */
+	void (*update_wb_cfg)(struct sde_hw_ctl *ctx,
+		struct sde_hw_intf_cfg *cfg, bool enable);
+
 	int (*reset)(struct sde_hw_ctl *c);
 
 	/**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 4e677c2..97a3ea4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -179,7 +179,7 @@
 	u32 reset = BIT(16), val;
 
 	reset = ~reset;
-	for (i = SDE_STAGE_0; i < sblk->maxblendstages; i++) {
+	for (i = SDE_STAGE_0; i <= sblk->maxblendstages; i++) {
 		stage_off = _stage_offset(ctx, i);
 		if (WARN_ON(stage_off < 0))
 			return;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index 8b89ebb..cf646bd 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -376,6 +376,18 @@
 	SDE_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
 }
 
+static void sde_hw_program_cwb_ppb_ctrl(struct sde_hw_mdp *mdp,
+		bool dual, bool dspp_out)
+{
+	u32 value = dspp_out ? 0x4 : 0x0;
+
+	SDE_REG_WRITE(&mdp->hw, PPB2_CNTL, value);
+	if (dual) {
+		value |= 0x1;
+		SDE_REG_WRITE(&mdp->hw, PPB3_CNTL, value);
+	}
+}
+
 static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
 		unsigned long cap)
 {
@@ -385,6 +397,7 @@
 	ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
 	ops->get_danger_status = sde_hw_get_danger_status;
 	ops->setup_vsync_source = sde_hw_setup_vsync_source;
+	ops->set_cwb_ppb_cntl = sde_hw_program_cwb_ppb_ctrl;
 	ops->get_safe_status = sde_hw_get_safe_status;
 	ops->get_split_flush_status = sde_hw_get_split_flush;
 	ops->setup_dce = sde_hw_setup_dce;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 950a62c..210ea53 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -194,6 +194,15 @@
 	 * @mdp: mdp top context driver
 	 */
 	void (*intf_audio_select)(struct sde_hw_mdp *mdp);
+
+	/**
+	 * set_cwb_ppb_cntl - select the data point for CWB
+	 * @mdp: mdp top context driver
+	 * @dual: indicates if dual pipe line needs to be programmed
+	 * @dspp_out : true if dspp output required. LM is default tap point
+	 */
+	void (*set_cwb_ppb_cntl)(struct sde_hw_mdp *mdp,
+			bool dual, bool dspp_out);
 };
 
 struct sde_hw_mdp {
diff --git a/drivers/gpu/drm/msm/sde/sde_hwio.h b/drivers/gpu/drm/msm/sde/sde_hwio.h
index cc020d9..a592223 100644
--- a/drivers/gpu/drm/msm/sde/sde_hwio.h
+++ b/drivers/gpu/drm/msm/sde/sde_hwio.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -40,6 +40,8 @@
 #define PPB0_CONFIG                     0x334
 #define PPB1_CNTL                       0x338
 #define PPB1_CONFIG                     0x33C
+#define PPB2_CNTL                       0x370
+#define PPB3_CNTL                       0x374
 #define HW_EVENTS_CTL                   0x37C
 #define CLK_CTRL3                       0x3A8
 #define CLK_STATUS3                     0x3AC
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index b0a52a7..c2fffef 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -2151,8 +2151,8 @@
 		aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
 			ARRAY_SIZE(iommu_ports));
 
-		msm_gem_aspace_domain_attach_detach_update(aspace, false);
 		aspace->domain_attached = true;
+		msm_gem_aspace_domain_attach_detach_update(aspace, false);
 	}
 
 	return 0;
@@ -2664,21 +2664,22 @@
 
 	mutex_lock(&dev->mode_config.mutex);
 	connector_list = &dev->mode_config.connector_list;
-	list_for_each_entry(conn_iter, connector_list, head) {
-		/**
-		 * SDE_KMS doesn't attach more than one encoder to
-		 * a DSI connector. So it is safe to check only with the
-		 * first encoder entry. Revisit this logic if we ever have
-		 * to support continuous splash for external displays in MST
-		 * configuration.
-		 */
-		if (conn_iter &&
-			(conn_iter->encoder_ids[0] == encoder->base.id)) {
-			connector = conn_iter;
-			break;
+	if (connector_list) {
+		list_for_each_entry(conn_iter, connector_list, head) {
+			/**
+			 * SDE_KMS doesn't attach more than one encoder to
+			 * a DSI connector. So it is safe to check only with
+			 * the first encoder entry. Revisit this logic if we
+			 * ever have to support continuous splash for
+			 * external displays in MST configuration.
+			 */
+			if (conn_iter &&
+			  (conn_iter->encoder_ids[0] == encoder->base.id)) {
+				connector = conn_iter;
+				break;
+			}
 		}
 	}
-
 	if (!connector) {
 		SDE_ERROR("connector not initialized\n");
 		mutex_unlock(&dev->mode_config.mutex);
@@ -3195,7 +3196,7 @@
 		sde_kms->mmio = NULL;
 		goto error;
 	}
-	DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio);
+	DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
 	sde_kms->mmio_len = msm_iomap_size(dev->platformdev, "mdp_phys");
 
 	rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 2e46599..3621fb2 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1088,6 +1088,12 @@
 		&& (src_w == dst_w))
 		return;
 
+	SDE_DEBUG_PLANE(psde,
+		"setting bilinear: src:%dx%d dst:%dx%d chroma:%dx%d fmt:%x\n",
+			src_w, src_h, dst_w, dst_h,
+			chroma_subsmpl_v, chroma_subsmpl_h,
+			fmt->base.pixel_format);
+
 	scale_cfg->dst_width = dst_w;
 	scale_cfg->dst_height = dst_h;
 	scale_cfg->y_rgb_filter_cfg = SDE_SCALE_BIL;
@@ -3449,6 +3455,25 @@
 				src_w, src_h);
 			return -EINVAL;
 		}
+
+		/*
+		 * SSPP fetch , unpack output and QSEED3 input lines need
+		 * to match for Y plane
+		 */
+		if (i == 0 &&
+			(sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
+			BIT(SDE_DRM_DEINTERLACE)) &&
+			((pstate->scaler3_cfg.src_height[i] != (src_h/2)) ||
+			(pstate->pixel_ext.roi_h[i] != (src_h/2)))) {
+			SDE_ERROR_PLANE(psde,
+				"de-interlace fail roi[%d] %d/%d, src %dx%d, src %dx%d\n",
+				i, pstate->pixel_ext.roi_w[i],
+				pstate->pixel_ext.roi_h[i],
+				pstate->scaler3_cfg.src_width[i],
+				pstate->scaler3_cfg.src_height[i],
+				src_w, src_h);
+			return -EINVAL;
+		}
 	}
 
 	pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2;
@@ -3555,17 +3580,16 @@
 		ret = -EINVAL;
 
 	/* decimation validation */
-	} else if (deci_w || deci_h) {
-		if ((deci_w > psde->pipe_sblk->maxhdeciexp) ||
-			(deci_h > psde->pipe_sblk->maxvdeciexp)) {
-			SDE_ERROR_PLANE(psde,
-					"too much decimation requested\n");
-			ret = -EINVAL;
-		} else if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
-			SDE_ERROR_PLANE(psde,
-					"decimation requires linear fetch\n");
-			ret = -EINVAL;
-		}
+	} else if ((deci_w || deci_h)
+			&& ((deci_w > psde->pipe_sblk->maxhdeciexp)
+				|| (deci_h > psde->pipe_sblk->maxvdeciexp))) {
+		SDE_ERROR_PLANE(psde, "too much decimation requested\n");
+		ret = -EINVAL;
+
+	} else if ((deci_w || deci_h)
+			&& (fmt->fetch_mode != SDE_FETCH_LINEAR)) {
+		SDE_ERROR_PLANE(psde, "decimation requires linear fetch\n");
+		ret = -EINVAL;
 
 	} else if (!(psde->features & SDE_SSPP_SCALER) &&
 		((src.w != dst.w) || (src.h != dst.h))) {
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index 71c8b63..61588bd9 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -816,6 +816,7 @@
 	.driver = {
 		.name = "sde_wb",
 		.of_match_table = dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 5829095..758cc53 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -169,6 +169,11 @@
 	struct vbif_debug_bus_entry *entries;
 };
 
+struct sde_dbg_dsi_debug_bus {
+	u32 *entries;
+	u32 size;
+};
+
 /**
  * struct sde_dbg_base - global sde debug base structure
  * @evtlog: event log instance
@@ -202,6 +207,7 @@
 
 	struct sde_dbg_sde_debug_bus dbgbus_sde;
 	struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
+	struct sde_dbg_dsi_debug_bus dbgbus_dsi;
 	bool dump_all;
 	bool dsi_dbg_bus;
 	u32 debugfs_ctrl;
@@ -2038,6 +2044,42 @@
 	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
 };
 
+static u32 dsi_dbg_bus_sdm845[] = {
+	0x0001, 0x1001, 0x0001, 0x0011,
+	0x1021, 0x0021, 0x0031, 0x0041,
+	0x0051, 0x0061, 0x3061, 0x0061,
+	0x2061, 0x2061, 0x1061, 0x1061,
+	0x1061, 0x0071, 0x0071, 0x0071,
+	0x0081, 0x0081, 0x00A1, 0x00A1,
+	0x10A1, 0x20A1, 0x30A1, 0x10A1,
+	0x10A1, 0x30A1, 0x20A1, 0x00B1,
+	0x00C1, 0x00C1, 0x10C1, 0x20C1,
+	0x30C1, 0x00D1, 0x00D1, 0x20D1,
+	0x30D1, 0x00E1, 0x00E1, 0x00E1,
+	0x00F1, 0x00F1, 0x0101, 0x0101,
+	0x1101, 0x2101, 0x3101, 0x0111,
+	0x0141, 0x1141, 0x0141, 0x1141,
+	0x1141, 0x0151, 0x0151, 0x1151,
+	0x2151, 0x3151, 0x0161, 0x0161,
+	0x1161, 0x0171, 0x0171, 0x0181,
+	0x0181, 0x0191, 0x0191, 0x01A1,
+	0x01A1, 0x01B1, 0x01B1, 0x11B1,
+	0x21B1, 0x01C1, 0x01C1, 0x11C1,
+	0x21C1, 0x31C1, 0x01D1, 0x01D1,
+	0x01D1, 0x01D1, 0x11D1, 0x21D1,
+	0x21D1, 0x01E1, 0x01E1, 0x01F1,
+	0x01F1, 0x0201, 0x0201, 0x0211,
+	0x0221, 0x0231, 0x0241, 0x0251,
+	0x0281, 0x0291, 0x0281, 0x0291,
+	0x02A1, 0x02B1, 0x02C1, 0x0321,
+	0x0321, 0x1321, 0x2321, 0x3321,
+	0x0331, 0x0331, 0x1331, 0x0341,
+	0x0341, 0x1341, 0x2341, 0x3341,
+	0x0351, 0x0361, 0x0361, 0x1361,
+	0x2361, 0x0371, 0x0381, 0x0391,
+	0x03C1, 0x03D1, 0x03E1, 0x03F1,
+};
+
 /**
  * _sde_dbg_enable_power - use callback to turn power on for hw register access
  * @enable: whether to turn power on or off
@@ -2603,7 +2645,8 @@
 		_sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt);
 
 	if (sde_dbg_base.dsi_dbg_bus || dump_all)
-		dsi_ctrl_debug_dump();
+		dsi_ctrl_debug_dump(sde_dbg_base.dbgbus_dsi.entries,
+				    sde_dbg_base.dbgbus_dsi.size);
 
 	if (do_panic && sde_dbg_base.panic_on_err)
 		panic(name);
@@ -3355,6 +3398,8 @@
 		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
 		dbg->dbgbus_vbif_rt.cmn.entries_size =
 				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+		dbg->dbgbus_dsi.entries = NULL;
+		dbg->dbgbus_dsi.size = 0;
 	} else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
 		dbg->dbgbus_sde.entries = dbg_bus_sde_sdm845;
 		dbg->dbgbus_sde.cmn.entries_size =
@@ -3365,6 +3410,8 @@
 		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
 		dbg->dbgbus_vbif_rt.cmn.entries_size =
 				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+		dbg->dbgbus_dsi.entries = dsi_dbg_bus_sdm845;
+		dbg->dbgbus_dsi.size = ARRAY_SIZE(dsi_dbg_bus_sdm845);
 	} else {
 		pr_err("unsupported chipset id %X\n", hwversion);
 	}
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 9efb893..2921a38 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -344,8 +344,10 @@
 
 /**
  * dsi_ctrl_debug_dump - dump dsi debug dump status
+ * @entries:	array of debug bus control values
+ * @size:	size of the debug bus control array
  */
-void dsi_ctrl_debug_dump(void);
+void dsi_ctrl_debug_dump(u32 *entries, u32 size);
 
 #else
 static inline struct sde_dbg_evtlog *sde_evtlog_init(void)
@@ -437,7 +439,7 @@
 {
 }
 
-static inline void dsi_ctrl_debug_dump(void)
+static inline void dsi_ctrl_debug_dump(u32 entries, u32 size)
 {
 }
 
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 8179b10..721e278 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -579,8 +579,12 @@
 			msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2));
 		if (!rc) {
 			pr_err("Timeout waiting for vsync\n");
-			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait),
+			rc = -ETIMEDOUT;
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc,
 				SDE_EVTLOG_ERROR);
+		} else {
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc);
+			rc = 0;
 		}
 	}
 end:
@@ -635,8 +639,12 @@
 			msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2));
 		if (!rc) {
 			pr_err("Timeout waiting for vsync\n");
-			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait),
+			rc = -ETIMEDOUT;
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc,
 				SDE_EVTLOG_ERROR);
+		} else {
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc);
+			rc = 0;
 		}
 	}
 
@@ -1446,6 +1454,7 @@
 	.driver     = {
 		.name   = "sde_rsc",
 		.of_match_table = dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index e2faccf..d66e0e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -46,8 +46,8 @@
 	0x00000000,
 	0x00000000,
 	0x584d454d,
-	0x00000756,
-	0x00000748,
+	0x00000754,
+	0x00000746,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -68,8 +68,8 @@
 	0x00000000,
 	0x00000000,
 	0x46524550,
-	0x0000075a,
 	0x00000758,
+	0x00000756,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -90,8 +90,8 @@
 	0x00000000,
 	0x00000000,
 	0x5f433249,
-	0x00000b8a,
-	0x00000a2d,
+	0x00000b88,
+	0x00000a2b,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +112,8 @@
 	0x00000000,
 	0x00000000,
 	0x54534554,
-	0x00000bb3,
-	0x00000b8c,
+	0x00000bb1,
+	0x00000b8a,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -134,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000bbf,
 	0x00000bbd,
+	0x00000bbb,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -236,19 +236,19 @@
 	0x000005d3,
 	0x00000003,
 	0x00000002,
-	0x0000069d,
+	0x0000069b,
 	0x00040004,
 	0x00000000,
-	0x000006b9,
+	0x000006b7,
 	0x00010005,
 	0x00000000,
-	0x000006d6,
+	0x000006d4,
 	0x00010006,
 	0x00000000,
 	0x0000065b,
 	0x00000007,
 	0x00000000,
-	0x000006e1,
+	0x000006df,
 /* 0x03c4: memx_func_tail */
 /* 0x03c4: memx_ts_start */
 	0x00000000,
@@ -1372,432 +1372,432 @@
 /* 0x065b: memx_func_wait_vblank */
 	0x9800f840,
 	0x66b00016,
-	0x130bf400,
+	0x120bf400,
 	0xf40166b0,
 	0x0ef4060b,
 /* 0x066d: memx_func_wait_vblank_head1 */
-	0x2077f12e,
-	0x070ef400,
-/* 0x0674: memx_func_wait_vblank_head0 */
-	0x000877f1,
-/* 0x0678: memx_func_wait_vblank_0 */
-	0x07c467f1,
-	0xcf0664b6,
-	0x67fd0066,
-	0xf31bf404,
-/* 0x0688: memx_func_wait_vblank_1 */
-	0x07c467f1,
-	0xcf0664b6,
-	0x67fd0066,
-	0xf30bf404,
-/* 0x0698: memx_func_wait_vblank_fini */
-	0xf80410b6,
-/* 0x069d: memx_func_wr32 */
-	0x00169800,
-	0xb6011598,
-	0x60f90810,
-	0xd0fc50f9,
-	0x21f4e0fc,
-	0x0242b640,
-	0xf8e91bf4,
-/* 0x06b9: memx_func_wait */
-	0x2c87f000,
-	0xcf0684b6,
-	0x1e980088,
-	0x011d9800,
-	0x98021c98,
-	0x10b6031b,
-	0xa321f410,
-/* 0x06d6: memx_func_delay */
-	0x1e9800f8,
-	0x0410b600,
-	0xf87e21f4,
-/* 0x06e1: memx_func_train */
-/* 0x06e3: memx_exec */
-	0xf900f800,
-	0xb9d0f9e0,
-	0xb2b902c1,
-/* 0x06ed: memx_exec_next */
-	0x00139802,
-	0xe70410b6,
-	0xe701f034,
-	0xb601e033,
-	0x30f00132,
-	0xde35980c,
-	0x12b855f9,
-	0xe41ef406,
-	0x98f10b98,
-	0xcbbbf20c,
-	0xc4b7f102,
-	0x06b4b607,
-	0xfc00bbcf,
-	0xf5e0fcd0,
-	0xf8033621,
-/* 0x0729: memx_info */
-	0x01c67000,
-/* 0x072f: memx_info_data */
-	0xf10e0bf4,
-	0xf103ccc7,
-	0xf40800b7,
-/* 0x073a: memx_info_train */
-	0xc7f10b0e,
-	0xb7f10bcc,
-/* 0x0742: memx_info_send */
-	0x21f50100,
-	0x00f80336,
-/* 0x0748: memx_recv */
-	0xf401d6b0,
-	0xd6b0980b,
-	0xd80bf400,
-/* 0x0756: memx_init */
+	0x2077f02c,
+/* 0x0673: memx_func_wait_vblank_head0 */
+	0xf0060ef4,
+/* 0x0676: memx_func_wait_vblank_0 */
+	0x67f10877,
+	0x64b607c4,
+	0x0066cf06,
+	0xf40467fd,
+/* 0x0686: memx_func_wait_vblank_1 */
+	0x67f1f31b,
+	0x64b607c4,
+	0x0066cf06,
+	0xf40467fd,
+/* 0x0696: memx_func_wait_vblank_fini */
+	0x10b6f30b,
+/* 0x069b: memx_func_wr32 */
+	0x9800f804,
+	0x15980016,
+	0x0810b601,
+	0x50f960f9,
+	0xe0fcd0fc,
+	0xb64021f4,
+	0x1bf40242,
+/* 0x06b7: memx_func_wait */
+	0xf000f8e9,
+	0x84b62c87,
+	0x0088cf06,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0xf41010b6,
+	0x00f8a321,
+/* 0x06d4: memx_func_delay */
+	0xb6001e98,
+	0x21f40410,
+/* 0x06df: memx_func_train */
+	0xf800f87e,
+/* 0x06e1: memx_exec */
+	0xf9e0f900,
+	0x02c1b9d0,
+/* 0x06eb: memx_exec_next */
+	0x9802b2b9,
+	0x10b60013,
+	0xf034e704,
+	0xe033e701,
+	0x0132b601,
+	0x980c30f0,
+	0x55f9de35,
+	0xf40612b8,
+	0x0b98e41e,
+	0xf20c98f1,
+	0xf102cbbb,
+	0xb607c4b7,
+	0xbbcf06b4,
+	0xfcd0fc00,
+	0x3621f5e0,
+/* 0x0727: memx_info */
+	0x7000f803,
+	0x0bf401c6,
+/* 0x072d: memx_info_data */
+	0xccc7f10e,
+	0x00b7f103,
+	0x0b0ef408,
+/* 0x0738: memx_info_train */
+	0x0bccc7f1,
+	0x0100b7f1,
+/* 0x0740: memx_info_send */
+	0x033621f5,
+/* 0x0746: memx_recv */
+	0xd6b000f8,
+	0x980bf401,
+	0xf400d6b0,
+	0x00f8d80b,
+/* 0x0754: memx_init */
+/* 0x0756: perf_recv */
 	0x00f800f8,
-/* 0x0758: perf_recv */
-/* 0x075a: perf_init */
-	0x00f800f8,
-/* 0x075c: i2c_drive_scl */
-	0xf40036b0,
-	0x07f1110b,
-	0x04b607e0,
-	0x0001d006,
-	0x00f804bd,
-/* 0x0770: i2c_drive_scl_lo */
-	0x07e407f1,
-	0xd00604b6,
-	0x04bd0001,
-/* 0x077e: i2c_drive_sda */
+/* 0x0758: perf_init */
+/* 0x075a: i2c_drive_scl */
 	0x36b000f8,
 	0x110bf400,
 	0x07e007f1,
 	0xd00604b6,
-	0x04bd0002,
-/* 0x0792: i2c_drive_sda_lo */
+	0x04bd0001,
+/* 0x076e: i2c_drive_scl_lo */
 	0x07f100f8,
 	0x04b607e4,
+	0x0001d006,
+	0x00f804bd,
+/* 0x077c: i2c_drive_sda */
+	0xf40036b0,
+	0x07f1110b,
+	0x04b607e0,
 	0x0002d006,
 	0x00f804bd,
-/* 0x07a0: i2c_sense_scl */
-	0xf10132f4,
-	0xb607c437,
-	0x33cf0634,
-	0x0431fd00,
-	0xf4060bf4,
-/* 0x07b6: i2c_sense_scl_done */
-	0x00f80131,
-/* 0x07b8: i2c_sense_sda */
-	0xf10132f4,
-	0xb607c437,
-	0x33cf0634,
-	0x0432fd00,
-	0xf4060bf4,
-/* 0x07ce: i2c_sense_sda_done */
-	0x00f80131,
-/* 0x07d0: i2c_raise_scl */
-	0x47f140f9,
-	0x37f00898,
-	0x5c21f501,
-/* 0x07dd: i2c_raise_scl_wait */
+/* 0x0790: i2c_drive_sda_lo */
+	0x07e407f1,
+	0xd00604b6,
+	0x04bd0002,
+/* 0x079e: i2c_sense_scl */
+	0x32f400f8,
+	0xc437f101,
+	0x0634b607,
+	0xfd0033cf,
+	0x0bf40431,
+	0x0131f406,
+/* 0x07b4: i2c_sense_scl_done */
+/* 0x07b6: i2c_sense_sda */
+	0x32f400f8,
+	0xc437f101,
+	0x0634b607,
+	0xfd0033cf,
+	0x0bf40432,
+	0x0131f406,
+/* 0x07cc: i2c_sense_sda_done */
+/* 0x07ce: i2c_raise_scl */
+	0x40f900f8,
+	0x089847f1,
+	0xf50137f0,
+/* 0x07db: i2c_raise_scl_wait */
+	0xf1075a21,
+	0xf403e8e7,
+	0x21f57e21,
+	0x01f4079e,
+	0x0142b609,
+/* 0x07ef: i2c_raise_scl_done */
+	0xfcef1bf4,
+/* 0x07f3: i2c_start */
+	0xf500f840,
+	0xf4079e21,
+	0x21f50d11,
+	0x11f407b6,
+	0x300ef406,
+/* 0x0804: i2c_start_rep */
+	0xf50037f0,
+	0xf0075a21,
+	0x21f50137,
+	0x76bb077c,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb607ce21,
+	0x11f40464,
+/* 0x0831: i2c_start_send */
+	0x0037f01f,
+	0x077c21f5,
+	0x1388e7f1,
+	0xf07e21f4,
+	0x21f50037,
+	0xe7f1075a,
+	0x21f41388,
+/* 0x084d: i2c_start_out */
+/* 0x084f: i2c_stop */
+	0xf000f87e,
+	0x21f50037,
+	0x37f0075a,
+	0x7c21f500,
 	0xe8e7f107,
 	0x7e21f403,
-	0x07a021f5,
-	0xb60901f4,
-	0x1bf40142,
-/* 0x07f1: i2c_raise_scl_done */
-	0xf840fcef,
-/* 0x07f5: i2c_start */
-	0xa021f500,
-	0x0d11f407,
-	0x07b821f5,
-	0xf40611f4,
-/* 0x0806: i2c_start_rep */
-	0x37f0300e,
-	0x5c21f500,
-	0x0137f007,
-	0x077e21f5,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xd021f550,
-	0x0464b607,
-/* 0x0833: i2c_start_send */
-	0xf01f11f4,
-	0x21f50037,
-	0xe7f1077e,
-	0x21f41388,
-	0x0037f07e,
-	0x075c21f5,
-	0x1388e7f1,
-/* 0x084f: i2c_start_out */
-	0xf87e21f4,
-/* 0x0851: i2c_stop */
-	0x0037f000,
-	0x075c21f5,
-	0xf50037f0,
-	0xf1077e21,
-	0xf403e8e7,
-	0x37f07e21,
-	0x5c21f501,
-	0x88e7f107,
-	0x7e21f413,
 	0xf50137f0,
-	0xf1077e21,
+	0xf1075a21,
 	0xf41388e7,
-	0x00f87e21,
-/* 0x0884: i2c_bitw */
-	0x077e21f5,
-	0x03e8e7f1,
-	0xbb7e21f4,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x07d021f5,
-	0xf40464b6,
-	0xe7f11811,
-	0x21f41388,
-	0x0037f07e,
-	0x075c21f5,
-	0x1388e7f1,
-/* 0x08c3: i2c_bitw_out */
-	0xf87e21f4,
-/* 0x08c5: i2c_bitr */
-	0x0137f000,
-	0x077e21f5,
-	0x03e8e7f1,
-	0xbb7e21f4,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x07d021f5,
-	0xf40464b6,
-	0x21f51b11,
-	0x37f007b8,
-	0x5c21f500,
+	0x37f07e21,
+	0x7c21f501,
 	0x88e7f107,
 	0x7e21f413,
-	0xf4013cf0,
-/* 0x090a: i2c_bitr_done */
-	0x00f80131,
-/* 0x090c: i2c_get_byte */
-	0xf00057f0,
-/* 0x0912: i2c_get_byte_next */
-	0x54b60847,
-	0x0076bb01,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b608c5,
-	0x2b11f404,
-	0xb60553fd,
-	0x1bf40142,
-	0x0137f0d8,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x8421f550,
-	0x0464b608,
-/* 0x095c: i2c_get_byte_done */
-/* 0x095e: i2c_put_byte */
-	0x47f000f8,
-/* 0x0961: i2c_put_byte_next */
-	0x0142b608,
-	0xbb3854ff,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x088421f5,
-	0xf40464b6,
-	0x46b03411,
-	0xd81bf400,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xc521f550,
-	0x0464b608,
-	0xbb0f11f4,
-	0x36b00076,
-	0x061bf401,
-/* 0x09b7: i2c_put_byte_done */
-	0xf80132f4,
-/* 0x09b9: i2c_addr */
-	0x0076bb00,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b607f5,
-	0x2911f404,
-	0x012ec3e7,
-	0xfd0134b6,
-	0x76bb0553,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb6095e21,
-/* 0x09fe: i2c_addr_done */
-	0x00f80464,
-/* 0x0a00: i2c_acquire_addr */
-	0xb6f8cec7,
-	0xe0b702e4,
-	0xee980d1c,
-/* 0x0a0f: i2c_acquire */
-	0xf500f800,
-	0xf40a0021,
-	0xd9f00421,
-	0x4021f403,
-/* 0x0a1e: i2c_release */
+/* 0x0882: i2c_bitw */
 	0x21f500f8,
-	0x21f40a00,
-	0x03daf004,
-	0xf84021f4,
-/* 0x0a2d: i2c_recv */
-	0x0132f400,
-	0xb6f8c1c7,
-	0x16b00214,
-	0x3a1ff528,
-	0xf413a001,
-	0x0032980c,
-	0x0ccc13a0,
-	0xf4003198,
-	0xd0f90231,
-	0xd0f9e0f9,
-	0x000067f1,
-	0x100063f1,
-	0xbb016792,
+	0xe7f1077c,
+	0x21f403e8,
+	0x0076bb7e,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b607ce,
+	0x1811f404,
+	0x1388e7f1,
+	0xf07e21f4,
+	0x21f50037,
+	0xe7f1075a,
+	0x21f41388,
+/* 0x08c1: i2c_bitw_out */
+/* 0x08c3: i2c_bitr */
+	0xf000f87e,
+	0x21f50137,
+	0xe7f1077c,
+	0x21f403e8,
+	0x0076bb7e,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b607ce,
+	0x1b11f404,
+	0x07b621f5,
+	0xf50037f0,
+	0xf1075a21,
+	0xf41388e7,
+	0x3cf07e21,
+	0x0131f401,
+/* 0x0908: i2c_bitr_done */
+/* 0x090a: i2c_get_byte */
+	0x57f000f8,
+	0x0847f000,
+/* 0x0910: i2c_get_byte_next */
+	0xbb0154b6,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x0a0f21f5,
-	0xfc0464b6,
-	0x00d6b0d0,
-	0x00b31bf5,
-	0xbb0057f0,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x09b921f5,
-	0xf50464b6,
-	0xc700d011,
-	0x76bbe0c5,
+	0x08c321f5,
+	0xf40464b6,
+	0x53fd2b11,
+	0x0142b605,
+	0xf0d81bf4,
+	0x76bb0137,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0xf550fc04,
-	0xb6095e21,
-	0x11f50464,
-	0x57f000ad,
+	0xb6088221,
+/* 0x095a: i2c_get_byte_done */
+	0x00f80464,
+/* 0x095c: i2c_put_byte */
+/* 0x095f: i2c_put_byte_next */
+	0xb60847f0,
+	0x54ff0142,
+	0x0076bb38,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b60882,
+	0x3411f404,
+	0xf40046b0,
+	0x76bbd81b,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb608c321,
+	0x11f40464,
+	0x0076bb0f,
+	0xf40136b0,
+	0x32f4061b,
+/* 0x09b5: i2c_put_byte_done */
+/* 0x09b7: i2c_addr */
+	0xbb00f801,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x07f321f5,
+	0xf40464b6,
+	0xc3e72911,
+	0x34b6012e,
+	0x0553fd01,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x5c21f550,
+	0x0464b609,
+/* 0x09fc: i2c_addr_done */
+/* 0x09fe: i2c_acquire_addr */
+	0xcec700f8,
+	0x02e4b6f8,
+	0x0d1ce0b7,
+	0xf800ee98,
+/* 0x0a0d: i2c_acquire */
+	0xfe21f500,
+	0x0421f409,
+	0xf403d9f0,
+	0x00f84021,
+/* 0x0a1c: i2c_release */
+	0x09fe21f5,
+	0xf00421f4,
+	0x21f403da,
+/* 0x0a2b: i2c_recv */
+	0xf400f840,
+	0xc1c70132,
+	0x0214b6f8,
+	0xf52816b0,
+	0xa0013a1f,
+	0x980cf413,
+	0x13a00032,
+	0x31980ccc,
+	0x0231f400,
+	0xe0f9d0f9,
+	0x67f1d0f9,
+	0x63f10000,
+	0x67921000,
 	0x0076bb01,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b609b9,
-	0x8a11f504,
+	0x64b60a0d,
+	0xb0d0fc04,
+	0x1bf500d6,
+	0x57f000b3,
 	0x0076bb00,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b6090c,
-	0x6a11f404,
-	0xbbe05bcb,
+	0x64b609b7,
+	0xd011f504,
+	0xe0c5c700,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x5c21f550,
+	0x0464b609,
+	0x00ad11f5,
+	0xbb0157f0,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x085121f5,
-	0xb90464b6,
-	0x74bd025b,
-/* 0x0b33: i2c_recv_not_rd08 */
-	0xb0430ef4,
-	0x1bf401d6,
-	0x0057f03d,
-	0x09b921f5,
-	0xc73311f4,
-	0x21f5e0c5,
-	0x11f4095e,
-	0x0057f029,
-	0x09b921f5,
-	0xc71f11f4,
-	0x21f5e0b5,
-	0x11f4095e,
-	0x5121f515,
-	0xc774bd08,
-	0x1bf408c5,
-	0x0232f409,
-/* 0x0b73: i2c_recv_not_wr08 */
-/* 0x0b73: i2c_recv_done */
-	0xc7030ef4,
-	0x21f5f8ce,
-	0xe0fc0a1e,
-	0x12f4d0fc,
-	0x027cb90a,
-	0x033621f5,
-/* 0x0b88: i2c_recv_exit */
-/* 0x0b8a: i2c_init */
-	0x00f800f8,
-/* 0x0b8c: test_recv */
-	0x05d817f1,
-	0xcf0614b6,
-	0x10b60011,
-	0xd807f101,
-	0x0604b605,
-	0xbd0001d0,
-	0x00e7f104,
-	0x4fe3f1d9,
-	0x5621f513,
-/* 0x0bb3: test_init */
-	0xf100f802,
-	0xf50800e7,
-	0xf8025621,
-/* 0x0bbd: idle_recv */
-/* 0x0bbf: idle */
-	0xf400f800,
-	0x17f10031,
-	0x14b605d4,
+	0x09b721f5,
+	0xf50464b6,
+	0xbb008a11,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x090a21f5,
+	0xf40464b6,
+	0x5bcb6a11,
+	0x0076bbe0,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b6084f,
+	0x025bb904,
+	0x0ef474bd,
+/* 0x0b31: i2c_recv_not_rd08 */
+	0x01d6b043,
+	0xf03d1bf4,
+	0x21f50057,
+	0x11f409b7,
+	0xe0c5c733,
+	0x095c21f5,
+	0xf02911f4,
+	0x21f50057,
+	0x11f409b7,
+	0xe0b5c71f,
+	0x095c21f5,
+	0xf51511f4,
+	0xbd084f21,
+	0x08c5c774,
+	0xf4091bf4,
+	0x0ef40232,
+/* 0x0b71: i2c_recv_not_wr08 */
+/* 0x0b71: i2c_recv_done */
+	0xf8cec703,
+	0x0a1c21f5,
+	0xd0fce0fc,
+	0xb90a12f4,
+	0x21f5027c,
+/* 0x0b86: i2c_recv_exit */
+	0x00f80336,
+/* 0x0b88: i2c_init */
+/* 0x0b8a: test_recv */
+	0x17f100f8,
+	0x14b605d8,
 	0x0011cf06,
 	0xf10110b6,
-	0xb605d407,
+	0xb605d807,
 	0x01d00604,
-/* 0x0bdb: idle_loop */
-	0xf004bd00,
-	0x32f45817,
-/* 0x0be1: idle_proc */
-/* 0x0be1: idle_proc_exec */
-	0xb910f902,
-	0x21f5021e,
-	0x10fc033f,
-	0xf40911f4,
-	0x0ef40231,
-/* 0x0bf5: idle_proc_next */
-	0x5810b6ef,
-	0xf4061fb8,
-	0x02f4e61b,
-	0x0028f4dd,
-	0x00bb0ef4,
+	0xf104bd00,
+	0xf1d900e7,
+	0xf5134fe3,
+	0xf8025621,
+/* 0x0bb1: test_init */
+	0x00e7f100,
+	0x5621f508,
+/* 0x0bbb: idle_recv */
+	0xf800f802,
+/* 0x0bbd: idle */
+	0x0031f400,
+	0x05d417f1,
+	0xcf0614b6,
+	0x10b60011,
+	0xd407f101,
+	0x0604b605,
+	0xbd0001d0,
+/* 0x0bd9: idle_loop */
+	0x5817f004,
+/* 0x0bdf: idle_proc */
+/* 0x0bdf: idle_proc_exec */
+	0xf90232f4,
+	0x021eb910,
+	0x033f21f5,
+	0x11f410fc,
+	0x0231f409,
+/* 0x0bf3: idle_proc_next */
+	0xb6ef0ef4,
+	0x1fb85810,
+	0xe61bf406,
+	0xf4dd02f4,
+	0x0ef40028,
+	0x000000bb,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 3c731ff..9582224 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -46,8 +46,8 @@
 	0x00000000,
 	0x00000000,
 	0x584d454d,
-	0x000005f3,
-	0x000005e5,
+	0x000005ee,
+	0x000005e0,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -68,8 +68,8 @@
 	0x00000000,
 	0x00000000,
 	0x46524550,
-	0x000005f7,
-	0x000005f5,
+	0x000005f2,
+	0x000005f0,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -90,8 +90,8 @@
 	0x00000000,
 	0x00000000,
 	0x5f433249,
-	0x000009f8,
-	0x000008a2,
+	0x000009f3,
+	0x0000089d,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +112,8 @@
 	0x00000000,
 	0x00000000,
 	0x54534554,
-	0x00000a16,
-	0x000009fa,
+	0x00000a11,
+	0x000009f5,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -134,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000a21,
-	0x00000a1f,
+	0x00000a1c,
+	0x00000a1a,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -233,22 +233,22 @@
 /* 0x037c: memx_func_next */
 	0x00000002,
 	0x00000000,
-	0x000004cf,
+	0x000004cc,
 	0x00000003,
 	0x00000002,
-	0x00000546,
+	0x00000541,
 	0x00040004,
 	0x00000000,
-	0x00000563,
+	0x0000055e,
 	0x00010005,
 	0x00000000,
-	0x0000057d,
+	0x00000578,
 	0x00010006,
 	0x00000000,
-	0x00000541,
+	0x0000053c,
 	0x00000007,
 	0x00000000,
-	0x00000589,
+	0x00000584,
 /* 0x03c4: memx_func_tail */
 /* 0x03c4: memx_ts_start */
 	0x00000000,
@@ -1238,454 +1238,454 @@
 	0x0001f604,
 	0x00f804bd,
 /* 0x045c: memx_func_enter */
-	0x162067f1,
-	0xf55d77f1,
-	0x047e6eb2,
-	0xd8b20000,
-	0xf90487fd,
-	0xfc80f960,
-	0x7ee0fcd0,
-	0x0700002d,
-	0x7e6eb2fe,
+	0x47162046,
+	0x6eb2f55d,
+	0x0000047e,
+	0x87fdd8b2,
+	0xf960f904,
+	0xfcd0fc80,
+	0x002d7ee0,
+	0xb2fe0700,
+	0x00047e6e,
+	0xfdd8b200,
+	0x60f90487,
+	0xd0fc80f9,
+	0x2d7ee0fc,
+	0xf0460000,
+	0x7e6eb226,
 	0xb2000004,
 	0x0487fdd8,
 	0x80f960f9,
 	0xe0fcd0fc,
 	0x00002d7e,
-	0x26f067f1,
-	0x047e6eb2,
-	0xd8b20000,
-	0xf90487fd,
-	0xfc80f960,
-	0x7ee0fcd0,
-	0x0600002d,
-	0x07e04004,
-	0xbd0006f6,
-/* 0x04b9: memx_func_enter_wait */
-	0x07c04604,
-	0xf00066cf,
-	0x0bf40464,
-	0xcf2c06f7,
-	0x06b50066,
-/* 0x04cf: memx_func_leave */
-	0x0600f8f1,
-	0x0066cf2c,
-	0x06f206b5,
-	0x07e44004,
-	0xbd0006f6,
-/* 0x04e1: memx_func_leave_wait */
-	0x07c04604,
-	0xf00066cf,
-	0x1bf40464,
-	0xf067f1f7,
+	0xe0400406,
+	0x0006f607,
+/* 0x04b6: memx_func_enter_wait */
+	0xc04604bd,
+	0x0066cf07,
+	0xf40464f0,
+	0x2c06f70b,
+	0xb50066cf,
+	0x00f8f106,
+/* 0x04cc: memx_func_leave */
+	0x66cf2c06,
+	0xf206b500,
+	0xe4400406,
+	0x0006f607,
+/* 0x04de: memx_func_leave_wait */
+	0xc04604bd,
+	0x0066cf07,
+	0xf40464f0,
+	0xf046f71b,
 	0xb2010726,
 	0x00047e6e,
 	0xfdd8b200,
 	0x60f90587,
 	0xd0fc80f9,
 	0x2d7ee0fc,
-	0x67f10000,
-	0x6eb21620,
-	0x0000047e,
-	0x87fdd8b2,
-	0xf960f905,
-	0xfcd0fc80,
-	0x002d7ee0,
-	0x0aa24700,
-	0x047e6eb2,
-	0xd8b20000,
-	0xf90587fd,
-	0xfc80f960,
-	0x7ee0fcd0,
-	0xf800002d,
-/* 0x0541: memx_func_wait_vblank */
-	0x0410b600,
-/* 0x0546: memx_func_wr32 */
-	0x169800f8,
-	0x01159800,
-	0xf90810b6,
-	0xfc50f960,
-	0x7ee0fcd0,
-	0xb600002d,
-	0x1bf40242,
-/* 0x0563: memx_func_wait */
-	0x0800f8e8,
-	0x0088cf2c,
-	0x98001e98,
-	0x1c98011d,
-	0x031b9802,
-	0x7e1010b6,
-	0xf8000074,
-/* 0x057d: memx_func_delay */
-	0x001e9800,
-	0x7e0410b6,
-	0xf8000058,
-/* 0x0589: memx_func_train */
-/* 0x058b: memx_exec */
-	0xf900f800,
-	0xb2d0f9e0,
-/* 0x0593: memx_exec_next */
-	0x98b2b2c1,
-	0x10b60013,
-	0xf034e704,
-	0xe033e701,
-	0x0132b601,
-	0x980c30f0,
-	0x55f9de35,
-	0x1ef412a6,
-	0xf10b98e5,
-	0xbbf20c98,
-	0xc44b02cb,
-	0x00bbcf07,
+	0x20460000,
+	0x7e6eb216,
+	0xb2000004,
+	0x0587fdd8,
+	0x80f960f9,
 	0xe0fcd0fc,
-	0x00029f7e,
-/* 0x05ca: memx_info */
-	0xc67000f8,
-	0x0c0bf401,
-/* 0x05d0: memx_info_data */
-	0x4b03cc4c,
-	0x0ef40800,
-/* 0x05d9: memx_info_train */
-	0x0bcc4c09,
-/* 0x05df: memx_info_send */
-	0x7e01004b,
-	0xf800029f,
-/* 0x05e5: memx_recv */
-	0x01d6b000,
-	0xb0a30bf4,
-	0x0bf400d6,
-/* 0x05f3: memx_init */
-	0xf800f8dc,
-/* 0x05f5: perf_recv */
-/* 0x05f7: perf_init */
-	0xf800f800,
-/* 0x05f9: i2c_drive_scl */
-	0x0036b000,
-	0x400d0bf4,
-	0x01f607e0,
-	0xf804bd00,
-/* 0x0609: i2c_drive_scl_lo */
-	0x07e44000,
-	0xbd0001f6,
-/* 0x0613: i2c_drive_sda */
-	0xb000f804,
-	0x0bf40036,
-	0x07e0400d,
-	0xbd0002f6,
-/* 0x0623: i2c_drive_sda_lo */
-	0x4000f804,
-	0x02f607e4,
-	0xf804bd00,
-/* 0x062d: i2c_sense_scl */
-	0x0132f400,
-	0xcf07c443,
-	0x31fd0033,
-	0x060bf404,
-/* 0x063f: i2c_sense_scl_done */
-	0xf80131f4,
-/* 0x0641: i2c_sense_sda */
-	0x0132f400,
-	0xcf07c443,
-	0x32fd0033,
-	0x060bf404,
-/* 0x0653: i2c_sense_sda_done */
-	0xf80131f4,
-/* 0x0655: i2c_raise_scl */
-	0x4440f900,
-	0x01030898,
-	0x0005f97e,
-/* 0x0660: i2c_raise_scl_wait */
-	0x7e03e84e,
-	0x7e000058,
-	0xf400062d,
-	0x42b60901,
-	0xef1bf401,
-/* 0x0674: i2c_raise_scl_done */
-	0x00f840fc,
-/* 0x0678: i2c_start */
-	0x00062d7e,
-	0x7e0d11f4,
-	0xf4000641,
-	0x0ef40611,
-/* 0x0689: i2c_start_rep */
-	0x7e00032e,
-	0x030005f9,
-	0x06137e01,
-	0x0076bb00,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x557e50fc,
-	0x64b60006,
-	0x1d11f404,
-/* 0x06b4: i2c_start_send */
-	0x137e0003,
-	0x884e0006,
-	0x00587e13,
-	0x7e000300,
-	0x4e0005f9,
-	0x587e1388,
-/* 0x06ce: i2c_start_out */
+	0x00002d7e,
+	0xb20aa247,
+	0x00047e6e,
+	0xfdd8b200,
+	0x60f90587,
+	0xd0fc80f9,
+	0x2d7ee0fc,
 	0x00f80000,
-/* 0x06d0: i2c_stop */
-	0xf97e0003,
-	0x00030005,
-	0x0006137e,
-	0x7e03e84e,
-	0x03000058,
-	0x05f97e01,
+/* 0x053c: memx_func_wait_vblank */
+	0xf80410b6,
+/* 0x0541: memx_func_wr32 */
+	0x00169800,
+	0xb6011598,
+	0x60f90810,
+	0xd0fc50f9,
+	0x2d7ee0fc,
+	0x42b60000,
+	0xe81bf402,
+/* 0x055e: memx_func_wait */
+	0x2c0800f8,
+	0x980088cf,
+	0x1d98001e,
+	0x021c9801,
+	0xb6031b98,
+	0x747e1010,
+	0x00f80000,
+/* 0x0578: memx_func_delay */
+	0xb6001e98,
+	0x587e0410,
+	0x00f80000,
+/* 0x0584: memx_func_train */
+/* 0x0586: memx_exec */
+	0xe0f900f8,
+	0xc1b2d0f9,
+/* 0x058e: memx_exec_next */
+	0x1398b2b2,
+	0x0410b600,
+	0x01f034e7,
+	0x01e033e7,
+	0xf00132b6,
+	0x35980c30,
+	0xa655f9de,
+	0xe51ef412,
+	0x98f10b98,
+	0xcbbbf20c,
+	0x07c44b02,
+	0xfc00bbcf,
+	0x7ee0fcd0,
+	0xf800029f,
+/* 0x05c5: memx_info */
+	0x01c67000,
+/* 0x05cb: memx_info_data */
+	0x4c0c0bf4,
+	0x004b03cc,
+	0x090ef408,
+/* 0x05d4: memx_info_train */
+	0x4b0bcc4c,
+/* 0x05da: memx_info_send */
+	0x9f7e0100,
+	0x00f80002,
+/* 0x05e0: memx_recv */
+	0xf401d6b0,
+	0xd6b0a30b,
+	0xdc0bf400,
+/* 0x05ee: memx_init */
+	0x00f800f8,
+/* 0x05f0: perf_recv */
+/* 0x05f2: perf_init */
+	0x00f800f8,
+/* 0x05f4: i2c_drive_scl */
+	0xf40036b0,
+	0xe0400d0b,
+	0x0001f607,
+	0x00f804bd,
+/* 0x0604: i2c_drive_scl_lo */
+	0xf607e440,
+	0x04bd0001,
+/* 0x060e: i2c_drive_sda */
+	0x36b000f8,
+	0x0d0bf400,
+	0xf607e040,
+	0x04bd0002,
+/* 0x061e: i2c_drive_sda_lo */
+	0xe44000f8,
+	0x0002f607,
+	0x00f804bd,
+/* 0x0628: i2c_sense_scl */
+	0x430132f4,
+	0x33cf07c4,
+	0x0431fd00,
+	0xf4060bf4,
+/* 0x063a: i2c_sense_scl_done */
+	0x00f80131,
+/* 0x063c: i2c_sense_sda */
+	0x430132f4,
+	0x33cf07c4,
+	0x0432fd00,
+	0xf4060bf4,
+/* 0x064e: i2c_sense_sda_done */
+	0x00f80131,
+/* 0x0650: i2c_raise_scl */
+	0x984440f9,
+	0x7e010308,
+/* 0x065b: i2c_raise_scl_wait */
+	0x4e0005f4,
+	0x587e03e8,
+	0x287e0000,
+	0x01f40006,
+	0x0142b609,
+/* 0x066f: i2c_raise_scl_done */
+	0xfcef1bf4,
+/* 0x0673: i2c_start */
+	0x7e00f840,
+	0xf4000628,
+	0x3c7e0d11,
+	0x11f40006,
+	0x2e0ef406,
+/* 0x0684: i2c_start_rep */
+	0xf47e0003,
+	0x01030005,
+	0x00060e7e,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x06507e50,
+	0x0464b600,
+/* 0x06af: i2c_start_send */
+	0x031d11f4,
+	0x060e7e00,
 	0x13884e00,
 	0x0000587e,
-	0x137e0103,
-	0x884e0006,
-	0x00587e13,
-/* 0x06ff: i2c_bitw */
-	0x7e00f800,
-	0x4e000613,
-	0x587e03e8,
-	0x76bb0000,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0x7e50fc04,
-	0xb6000655,
-	0x11f40464,
-	0x13884e17,
-	0x0000587e,
-	0xf97e0003,
+	0xf47e0003,
 	0x884e0005,
 	0x00587e13,
-/* 0x073d: i2c_bitw_out */
-/* 0x073f: i2c_bitr */
+/* 0x06c9: i2c_start_out */
+/* 0x06cb: i2c_stop */
 	0x0300f800,
-	0x06137e01,
-	0x03e84e00,
-	0x0000587e,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x06557e50,
-	0x0464b600,
-	0x7e1a11f4,
-	0x03000641,
-	0x05f97e00,
+	0x05f47e00,
+	0x7e000300,
+	0x4e00060e,
+	0x587e03e8,
+	0x01030000,
+	0x0005f47e,
+	0x7e13884e,
+	0x03000058,
+	0x060e7e01,
 	0x13884e00,
 	0x0000587e,
-	0xf4013cf0,
-/* 0x0782: i2c_bitr_done */
-	0x00f80131,
-/* 0x0784: i2c_get_byte */
-	0x08040005,
-/* 0x0788: i2c_get_byte_next */
-	0xbb0154b6,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x00073f7e,
-	0xf40464b6,
-	0x53fd2a11,
-	0x0142b605,
-	0x03d81bf4,
-	0x0076bb01,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0xff7e50fc,
-	0x64b60006,
-/* 0x07d1: i2c_get_byte_done */
-/* 0x07d3: i2c_put_byte */
-	0x0400f804,
-/* 0x07d5: i2c_put_byte_next */
-	0x0142b608,
-	0xbb3854ff,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x0006ff7e,
-	0xf40464b6,
-	0x46b03411,
-	0xd81bf400,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x073f7e50,
-	0x0464b600,
-	0xbb0f11f4,
-	0x36b00076,
-	0x061bf401,
-/* 0x082b: i2c_put_byte_done */
-	0xf80132f4,
-/* 0x082d: i2c_addr */
+/* 0x06fa: i2c_bitw */
+	0x0e7e00f8,
+	0xe84e0006,
+	0x00587e03,
 	0x0076bb00,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
-	0x787e50fc,
+	0x507e50fc,
 	0x64b60006,
-	0x2911f404,
-	0x012ec3e7,
-	0xfd0134b6,
-	0x76bb0553,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0x7e50fc04,
-	0xb60007d3,
-/* 0x0872: i2c_addr_done */
-	0x00f80464,
-/* 0x0874: i2c_acquire_addr */
-	0xb6f8cec7,
-	0xe0b705e4,
-	0x00f8d014,
-/* 0x0880: i2c_acquire */
-	0x0008747e,
-	0x0000047e,
-	0x7e03d9f0,
-	0xf800002d,
-/* 0x0891: i2c_release */
-	0x08747e00,
-	0x00047e00,
-	0x03daf000,
-	0x00002d7e,
-/* 0x08a2: i2c_recv */
-	0x32f400f8,
-	0xf8c1c701,
-	0xb00214b6,
-	0x1ff52816,
-	0x13b80134,
-	0x98000cf4,
-	0x13b80032,
-	0x98000ccc,
-	0x31f40031,
-	0xf9d0f902,
-	0xd6d0f9e0,
-	0x10000000,
-	0xbb016792,
+	0x1711f404,
+	0x7e13884e,
+	0x03000058,
+	0x05f47e00,
+	0x13884e00,
+	0x0000587e,
+/* 0x0738: i2c_bitw_out */
+/* 0x073a: i2c_bitr */
+	0x010300f8,
+	0x00060e7e,
+	0x7e03e84e,
+	0xbb000058,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x0008807e,
-	0xfc0464b6,
-	0x00d6b0d0,
-	0x00b01bf5,
-	0x76bb0005,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0x7e50fc04,
-	0xb600082d,
-	0x11f50464,
-	0xc5c700cc,
-	0x0076bbe0,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0xd37e50fc,
-	0x64b60007,
-	0xa911f504,
-	0xbb010500,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x00082d7e,
-	0xf50464b6,
-	0xbb008711,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x0007847e,
+	0x0006507e,
 	0xf40464b6,
-	0x5bcb6711,
-	0x0076bbe0,
+	0x3c7e1a11,
+	0x00030006,
+	0x0005f47e,
+	0x7e13884e,
+	0xf0000058,
+	0x31f4013c,
+/* 0x077d: i2c_bitr_done */
+/* 0x077f: i2c_get_byte */
+	0x0500f801,
+/* 0x0783: i2c_get_byte_next */
+	0xb6080400,
+	0x76bb0154,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb600073a,
+	0x11f40464,
+	0x0553fd2a,
+	0xf40142b6,
+	0x0103d81b,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x06fa7e50,
+	0x0464b600,
+/* 0x07cc: i2c_get_byte_done */
+/* 0x07ce: i2c_put_byte */
+	0x080400f8,
+/* 0x07d0: i2c_put_byte_next */
+	0xff0142b6,
+	0x76bb3854,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb60006fa,
+	0x11f40464,
+	0x0046b034,
+	0xbbd81bf4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x00073a7e,
+	0xf40464b6,
+	0x76bb0f11,
+	0x0136b000,
+	0xf4061bf4,
+/* 0x0826: i2c_put_byte_done */
+	0x00f80132,
+/* 0x0828: i2c_addr */
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x06737e50,
+	0x0464b600,
+	0xe72911f4,
+	0xb6012ec3,
+	0x53fd0134,
+	0x0076bb05,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
-	0xd07e50fc,
-	0x64b60006,
-	0xbd5bb204,
-	0x410ef474,
-/* 0x09a4: i2c_recv_not_rd08 */
-	0xf401d6b0,
-	0x00053b1b,
-	0x00082d7e,
-	0xc73211f4,
-	0xd37ee0c5,
-	0x11f40007,
-	0x7e000528,
-	0xf400082d,
-	0xb5c71f11,
-	0x07d37ee0,
-	0x1511f400,
-	0x0006d07e,
-	0xc5c774bd,
-	0x091bf408,
-	0xf40232f4,
-/* 0x09e2: i2c_recv_not_wr08 */
-/* 0x09e2: i2c_recv_done */
-	0xcec7030e,
-	0x08917ef8,
-	0xfce0fc00,
-	0x0912f4d0,
-	0x9f7e7cb2,
-/* 0x09f6: i2c_recv_exit */
-	0x00f80002,
-/* 0x09f8: i2c_init */
-/* 0x09fa: test_recv */
-	0x584100f8,
-	0x0011cf04,
-	0x400110b6,
-	0x01f60458,
-	0xde04bd00,
-	0x134fd900,
-	0x0001de7e,
-/* 0x0a16: test_init */
-	0x004e00f8,
-	0x01de7e08,
-/* 0x0a1f: idle_recv */
+	0xce7e50fc,
+	0x64b60007,
+/* 0x086d: i2c_addr_done */
+/* 0x086f: i2c_acquire_addr */
+	0xc700f804,
+	0xe4b6f8ce,
+	0x14e0b705,
+/* 0x087b: i2c_acquire */
+	0x7e00f8d0,
+	0x7e00086f,
+	0xf0000004,
+	0x2d7e03d9,
+	0x00f80000,
+/* 0x088c: i2c_release */
+	0x00086f7e,
+	0x0000047e,
+	0x7e03daf0,
+	0xf800002d,
+/* 0x089d: i2c_recv */
+	0x0132f400,
+	0xb6f8c1c7,
+	0x16b00214,
+	0x341ff528,
+	0xf413b801,
+	0x3298000c,
+	0xcc13b800,
+	0x3198000c,
+	0x0231f400,
+	0xe0f9d0f9,
+	0x00d6d0f9,
+	0x92100000,
+	0x76bb0167,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb600087b,
+	0xd0fc0464,
+	0xf500d6b0,
+	0x0500b01b,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x287e50fc,
+	0x64b60008,
+	0xcc11f504,
+	0xe0c5c700,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x07ce7e50,
+	0x0464b600,
+	0x00a911f5,
+	0x76bb0105,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb6000828,
+	0x11f50464,
+	0x76bb0087,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb600077f,
+	0x11f40464,
+	0xe05bcb67,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x06cb7e50,
+	0x0464b600,
+	0x74bd5bb2,
+/* 0x099f: i2c_recv_not_rd08 */
+	0xb0410ef4,
+	0x1bf401d6,
+	0x7e00053b,
+	0xf4000828,
+	0xc5c73211,
+	0x07ce7ee0,
+	0x2811f400,
+	0x287e0005,
+	0x11f40008,
+	0xe0b5c71f,
+	0x0007ce7e,
+	0x7e1511f4,
+	0xbd0006cb,
+	0x08c5c774,
+	0xf4091bf4,
+	0x0ef40232,
+/* 0x09dd: i2c_recv_not_wr08 */
+/* 0x09dd: i2c_recv_done */
+	0xf8cec703,
+	0x00088c7e,
+	0xd0fce0fc,
+	0xb20912f4,
+	0x029f7e7c,
+/* 0x09f1: i2c_recv_exit */
+/* 0x09f3: i2c_init */
 	0xf800f800,
-/* 0x0a21: idle */
-	0x0031f400,
-	0xcf045441,
-	0x10b60011,
-	0x04544001,
-	0xbd0001f6,
-/* 0x0a35: idle_loop */
-	0xf4580104,
-/* 0x0a3a: idle_proc */
-/* 0x0a3a: idle_proc_exec */
-	0x10f90232,
-	0xa87e1eb2,
-	0x10fc0002,
-	0xf40911f4,
-	0x0ef40231,
-/* 0x0a4d: idle_proc_next */
-	0x5810b6f0,
-	0x1bf41fa6,
-	0xe002f4e8,
-	0xf40028f4,
-	0x0000c60e,
+/* 0x09f5: test_recv */
+	0x04584100,
+	0xb60011cf,
+	0x58400110,
+	0x0001f604,
+	0x00de04bd,
+	0x7e134fd9,
+	0xf80001de,
+/* 0x0a11: test_init */
+	0x08004e00,
+	0x0001de7e,
+/* 0x0a1a: idle_recv */
+	0x00f800f8,
+/* 0x0a1c: idle */
+	0x410031f4,
+	0x11cf0454,
+	0x0110b600,
+	0xf6045440,
+	0x04bd0001,
+/* 0x0a30: idle_loop */
+	0x32f45801,
+/* 0x0a35: idle_proc */
+/* 0x0a35: idle_proc_exec */
+	0xb210f902,
+	0x02a87e1e,
+	0xf410fc00,
+	0x31f40911,
+	0xf00ef402,
+/* 0x0a48: idle_proc_next */
+	0xa65810b6,
+	0xe81bf41f,
+	0xf4e002f4,
+	0x0ef40028,
+	0x000000c6,
+	0x00000000,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index e833418..e29b785 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -46,8 +46,8 @@
 	0x00000000,
 	0x00000000,
 	0x584d454d,
-	0x0000083a,
-	0x0000082c,
+	0x00000833,
+	0x00000825,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -68,8 +68,8 @@
 	0x00000000,
 	0x00000000,
 	0x46524550,
-	0x0000083e,
-	0x0000083c,
+	0x00000837,
+	0x00000835,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -90,8 +90,8 @@
 	0x00000000,
 	0x00000000,
 	0x5f433249,
-	0x00000c6e,
-	0x00000b11,
+	0x00000c67,
+	0x00000b0a,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +112,8 @@
 	0x00000000,
 	0x00000000,
 	0x54534554,
-	0x00000c97,
-	0x00000c70,
+	0x00000c90,
+	0x00000c69,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -134,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000ca3,
-	0x00000ca1,
+	0x00000c9c,
+	0x00000c9a,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -233,22 +233,22 @@
 /* 0x037c: memx_func_next */
 	0x00000002,
 	0x00000000,
-	0x000005a0,
+	0x0000059f,
 	0x00000003,
 	0x00000002,
-	0x00000632,
+	0x0000062f,
 	0x00040004,
 	0x00000000,
-	0x0000064e,
+	0x0000064b,
 	0x00010005,
 	0x00000000,
-	0x0000066b,
+	0x00000668,
 	0x00010006,
 	0x00000000,
-	0x000005f0,
+	0x000005ef,
 	0x00000007,
 	0x00000000,
-	0x00000676,
+	0x00000673,
 /* 0x03c4: memx_func_tail */
 /* 0x03c4: memx_ts_start */
 	0x00000000,
@@ -1304,560 +1304,560 @@
 	0x67f102d7,
 	0x63f1fffc,
 	0x76fdffff,
-	0x0267f104,
-	0x0576fd00,
-	0x70f980f9,
-	0xe0fcd0fc,
-	0xf04021f4,
+	0x0267f004,
+	0xf90576fd,
+	0xfc70f980,
+	0xf4e0fcd0,
+	0x67f04021,
+	0xe007f104,
+	0x0604b607,
+	0xbd0006d0,
+/* 0x0581: memx_func_enter_wait */
+	0xc067f104,
+	0x0664b607,
+	0xf00066cf,
+	0x0bf40464,
+	0x2c67f0f3,
+	0xcf0664b6,
+	0x06800066,
+/* 0x059f: memx_func_leave */
+	0xf000f8f1,
+	0x64b62c67,
+	0x0066cf06,
+	0xf0f20680,
 	0x07f10467,
-	0x04b607e0,
+	0x04b607e4,
 	0x0006d006,
-/* 0x0582: memx_func_enter_wait */
+/* 0x05ba: memx_func_leave_wait */
 	0x67f104bd,
 	0x64b607c0,
 	0x0066cf06,
 	0xf40464f0,
-	0x67f0f30b,
-	0x0664b62c,
-	0x800066cf,
-	0x00f8f106,
-/* 0x05a0: memx_func_leave */
-	0xb62c67f0,
-	0x66cf0664,
-	0xf2068000,
-	0xf10467f0,
-	0xb607e407,
-	0x06d00604,
-/* 0x05bb: memx_func_leave_wait */
-	0xf104bd00,
-	0xb607c067,
-	0x66cf0664,
-	0x0464f000,
-	0xf1f31bf4,
-	0xb9161087,
-	0x21f4028e,
-	0x02d7b904,
-	0xffcc67f1,
-	0xffff63f1,
-	0xf90476fd,
-	0xfc70f980,
-	0xf4e0fcd0,
-	0x00f84021,
-/* 0x05f0: memx_func_wait_vblank */
-	0xb0001698,
-	0x0bf40066,
-	0x0166b013,
-	0xf4060bf4,
-/* 0x0602: memx_func_wait_vblank_head1 */
-	0x77f12e0e,
-	0x0ef40020,
-/* 0x0609: memx_func_wait_vblank_head0 */
-	0x0877f107,
-/* 0x060d: memx_func_wait_vblank_0 */
-	0xc467f100,
-	0x0664b607,
-	0xfd0066cf,
-	0x1bf40467,
-/* 0x061d: memx_func_wait_vblank_1 */
-	0xc467f1f3,
-	0x0664b607,
-	0xfd0066cf,
-	0x0bf40467,
-/* 0x062d: memx_func_wait_vblank_fini */
-	0x0410b6f3,
-/* 0x0632: memx_func_wr32 */
-	0x169800f8,
-	0x01159800,
-	0xf90810b6,
-	0xfc50f960,
-	0xf4e0fcd0,
-	0x42b64021,
-	0xe91bf402,
-/* 0x064e: memx_func_wait */
-	0x87f000f8,
-	0x0684b62c,
-	0x980088cf,
-	0x1d98001e,
-	0x021c9801,
-	0xb6031b98,
-	0x21f41010,
-/* 0x066b: memx_func_delay */
-	0x9800f8a3,
-	0x10b6001e,
-	0x7e21f404,
-/* 0x0676: memx_func_train */
-	0x57f100f8,
-	0x77f10003,
-	0x97f10000,
-	0x93f00000,
-	0x029eb970,
-	0xb90421f4,
-	0xe7f102d8,
-	0x21f42710,
-/* 0x0695: memx_func_train_loop_outer */
-	0x0158e07e,
-	0x0083f101,
-	0xe097f102,
-	0x1193f011,
-	0x80f990f9,
+	0x87f1f31b,
+	0x8eb91610,
+	0x0421f402,
+	0xf102d7b9,
+	0xf1ffcc67,
+	0xfdffff63,
+	0x80f90476,
+	0xd0fc70f9,
+	0x21f4e0fc,
+/* 0x05ef: memx_func_wait_vblank */
+	0x9800f840,
+	0x66b00016,
+	0x120bf400,
+	0xf40166b0,
+	0x0ef4060b,
+/* 0x0601: memx_func_wait_vblank_head1 */
+	0x2077f02c,
+/* 0x0607: memx_func_wait_vblank_head0 */
+	0xf0060ef4,
+/* 0x060a: memx_func_wait_vblank_0 */
+	0x67f10877,
+	0x64b607c4,
+	0x0066cf06,
+	0xf40467fd,
+/* 0x061a: memx_func_wait_vblank_1 */
+	0x67f1f31b,
+	0x64b607c4,
+	0x0066cf06,
+	0xf40467fd,
+/* 0x062a: memx_func_wait_vblank_fini */
+	0x10b6f30b,
+/* 0x062f: memx_func_wr32 */
+	0x9800f804,
+	0x15980016,
+	0x0810b601,
+	0x50f960f9,
 	0xe0fcd0fc,
-	0xf94021f4,
-	0x0067f150,
-/* 0x06b5: memx_func_train_loop_inner */
-	0x1187f100,
-	0x9068ff11,
-	0xfd109894,
-	0x97f10589,
-	0x93f00720,
-	0xf990f910,
-	0xfcd0fc80,
-	0x4021f4e0,
-	0x008097f1,
-	0xb91093f0,
-	0x21f4029e,
-	0x02d8b904,
-	0xf92088c5,
+	0xb64021f4,
+	0x1bf40242,
+/* 0x064b: memx_func_wait */
+	0xf000f8e9,
+	0x84b62c87,
+	0x0088cf06,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0xf41010b6,
+	0x00f8a321,
+/* 0x0668: memx_func_delay */
+	0xb6001e98,
+	0x21f40410,
+/* 0x0673: memx_func_train */
+	0xf000f87e,
+	0x77f00357,
+	0x0097f100,
+	0x7093f000,
+	0xf4029eb9,
+	0xd8b90421,
+	0x10e7f102,
+	0x7e21f427,
+/* 0x0690: memx_func_train_loop_outer */
+	0x010158e0,
+	0x020083f1,
+	0x11e097f1,
+	0xf91193f0,
+	0xfc80f990,
+	0xf4e0fcd0,
+	0x50f94021,
+/* 0x06af: memx_func_train_loop_inner */
+	0xf10067f0,
+	0xff111187,
+	0x98949068,
+	0x0589fd10,
+	0x072097f1,
+	0xf91093f0,
 	0xfc80f990,
 	0xf4e0fcd0,
 	0x97f14021,
-	0x93f0053c,
-	0x0287f110,
-	0x0083f130,
-	0xf990f980,
+	0x93f00080,
+	0x029eb910,
+	0xb90421f4,
+	0x88c502d8,
+	0xf990f920,
 	0xfcd0fc80,
 	0x4021f4e0,
-	0x0560e7f1,
-	0xf110e3f0,
-	0xf10000d7,
-	0x908000d3,
-	0xb7f100dc,
-	0xb3f08480,
-	0xa321f41e,
-	0x000057f1,
-	0xffff97f1,
-	0x830093f1,
-/* 0x0734: memx_func_train_loop_4x */
-	0x0080a7f1,
-	0xb910a3f0,
-	0x21f402ae,
-	0x02d8b904,
-	0xffdfb7f1,
-	0xffffb3f1,
-	0xf9048bfd,
-	0xfc80f9a0,
+	0x053c97f1,
+	0xf11093f0,
+	0xf1300287,
+	0xf9800083,
+	0xfc80f990,
 	0xf4e0fcd0,
-	0xa7f14021,
-	0xa3f0053c,
-	0x0287f110,
-	0x0083f130,
-	0xf9a0f980,
-	0xfcd0fc80,
-	0x4021f4e0,
-	0x0560e7f1,
-	0xf110e3f0,
-	0xf10000d7,
-	0xb98000d3,
-	0xb7f102dc,
-	0xb3f02710,
-	0xa321f400,
-	0xf402eeb9,
-	0xddb90421,
-	0x949dff02,
+	0xe7f14021,
+	0xe3f00560,
+	0x00d7f110,
+	0x00d3f100,
+	0x00dc9080,
+	0x8480b7f1,
+	0xf41eb3f0,
+	0x57f0a321,
+	0xff97f100,
+	0x0093f1ff,
+/* 0x072d: memx_func_train_loop_4x */
+	0x80a7f183,
+	0x10a3f000,
+	0xf402aeb9,
+	0xd8b90421,
+	0xdfb7f102,
+	0xffb3f1ff,
+	0x048bfdff,
+	0x80f9a0f9,
+	0xe0fcd0fc,
+	0xf14021f4,
+	0xf0053ca7,
+	0x87f110a3,
+	0x83f13002,
+	0xa0f98000,
+	0xd0fc80f9,
+	0x21f4e0fc,
+	0x60e7f140,
+	0x10e3f005,
+	0x0000d7f1,
+	0x8000d3f1,
+	0xf102dcb9,
+	0xf02710b7,
+	0x21f400b3,
+	0x02eeb9a3,
+	0xb90421f4,
+	0x9dff02dd,
+	0x0150b694,
+	0xf4045670,
+	0x7aa0921e,
+	0xa9800bcc,
+	0x0160b600,
+	0x700470b6,
+	0x1ef51066,
+	0x50fcff01,
 	0x700150b6,
-	0x1ef40456,
-	0xcc7aa092,
-	0x00a9800b,
-	0xb60160b6,
-	0x66700470,
-	0x001ef510,
-	0xb650fcff,
-	0x56700150,
-	0xd41ef507,
-/* 0x07c7: memx_exec */
-	0xf900f8fe,
-	0xb9d0f9e0,
-	0xb2b902c1,
-/* 0x07d1: memx_exec_next */
-	0x00139802,
-	0xe70410b6,
-	0xe701f034,
-	0xb601e033,
-	0x30f00132,
-	0xde35980c,
-	0x12b855f9,
-	0xe41ef406,
-	0x98f10b98,
-	0xcbbbf20c,
-	0xc4b7f102,
-	0x06b4b607,
-	0xfc00bbcf,
-	0xf5e0fcd0,
+	0x1ef50756,
+	0x00f8fed6,
+/* 0x07c0: memx_exec */
+	0xd0f9e0f9,
+	0xb902c1b9,
+/* 0x07ca: memx_exec_next */
+	0x139802b2,
+	0x0410b600,
+	0x01f034e7,
+	0x01e033e7,
+	0xf00132b6,
+	0x35980c30,
+	0xb855f9de,
+	0x1ef40612,
+	0xf10b98e4,
+	0xbbf20c98,
+	0xb7f102cb,
+	0xb4b607c4,
+	0x00bbcf06,
+	0xe0fcd0fc,
+	0x033621f5,
+/* 0x0806: memx_info */
+	0xc67000f8,
+	0x0e0bf401,
+/* 0x080c: memx_info_data */
+	0x03ccc7f1,
+	0x0800b7f1,
+/* 0x0817: memx_info_train */
+	0xf10b0ef4,
+	0xf10bccc7,
+/* 0x081f: memx_info_send */
+	0xf50100b7,
 	0xf8033621,
-/* 0x080d: memx_info */
-	0x01c67000,
-/* 0x0813: memx_info_data */
-	0xf10e0bf4,
-	0xf103ccc7,
-	0xf40800b7,
-/* 0x081e: memx_info_train */
-	0xc7f10b0e,
-	0xb7f10bcc,
-/* 0x0826: memx_info_send */
-	0x21f50100,
-	0x00f80336,
-/* 0x082c: memx_recv */
-	0xf401d6b0,
-	0xd6b0980b,
-	0xd80bf400,
-/* 0x083a: memx_init */
-	0x00f800f8,
-/* 0x083c: perf_recv */
-/* 0x083e: perf_init */
-	0x00f800f8,
-/* 0x0840: i2c_drive_scl */
-	0xf40036b0,
-	0x07f1110b,
-	0x04b607e0,
-	0x0001d006,
-	0x00f804bd,
-/* 0x0854: i2c_drive_scl_lo */
-	0x07e407f1,
-	0xd00604b6,
-	0x04bd0001,
-/* 0x0862: i2c_drive_sda */
-	0x36b000f8,
-	0x110bf400,
-	0x07e007f1,
-	0xd00604b6,
-	0x04bd0002,
-/* 0x0876: i2c_drive_sda_lo */
-	0x07f100f8,
-	0x04b607e4,
-	0x0002d006,
-	0x00f804bd,
-/* 0x0884: i2c_sense_scl */
-	0xf10132f4,
-	0xb607c437,
-	0x33cf0634,
-	0x0431fd00,
-	0xf4060bf4,
-/* 0x089a: i2c_sense_scl_done */
-	0x00f80131,
-/* 0x089c: i2c_sense_sda */
-	0xf10132f4,
-	0xb607c437,
-	0x33cf0634,
-	0x0432fd00,
-	0xf4060bf4,
-/* 0x08b2: i2c_sense_sda_done */
-	0x00f80131,
-/* 0x08b4: i2c_raise_scl */
-	0x47f140f9,
-	0x37f00898,
-	0x4021f501,
-/* 0x08c1: i2c_raise_scl_wait */
+/* 0x0825: memx_recv */
+	0x01d6b000,
+	0xb0980bf4,
+	0x0bf400d6,
+/* 0x0833: memx_init */
+	0xf800f8d8,
+/* 0x0835: perf_recv */
+/* 0x0837: perf_init */
+	0xf800f800,
+/* 0x0839: i2c_drive_scl */
+	0x0036b000,
+	0xf1110bf4,
+	0xb607e007,
+	0x01d00604,
+	0xf804bd00,
+/* 0x084d: i2c_drive_scl_lo */
+	0xe407f100,
+	0x0604b607,
+	0xbd0001d0,
+/* 0x085b: i2c_drive_sda */
+	0xb000f804,
+	0x0bf40036,
+	0xe007f111,
+	0x0604b607,
+	0xbd0002d0,
+/* 0x086f: i2c_drive_sda_lo */
+	0xf100f804,
+	0xb607e407,
+	0x02d00604,
+	0xf804bd00,
+/* 0x087d: i2c_sense_scl */
+	0x0132f400,
+	0x07c437f1,
+	0xcf0634b6,
+	0x31fd0033,
+	0x060bf404,
+/* 0x0893: i2c_sense_scl_done */
+	0xf80131f4,
+/* 0x0895: i2c_sense_sda */
+	0x0132f400,
+	0x07c437f1,
+	0xcf0634b6,
+	0x32fd0033,
+	0x060bf404,
+/* 0x08ab: i2c_sense_sda_done */
+	0xf80131f4,
+/* 0x08ad: i2c_raise_scl */
+	0xf140f900,
+	0xf0089847,
+	0x21f50137,
+/* 0x08ba: i2c_raise_scl_wait */
+	0xe7f10839,
+	0x21f403e8,
+	0x7d21f57e,
+	0x0901f408,
+	0xf40142b6,
+/* 0x08ce: i2c_raise_scl_done */
+	0x40fcef1b,
+/* 0x08d2: i2c_start */
+	0x21f500f8,
+	0x11f4087d,
+	0x9521f50d,
+	0x0611f408,
+/* 0x08e3: i2c_start_rep */
+	0xf0300ef4,
+	0x21f50037,
+	0x37f00839,
+	0x5b21f501,
+	0x0076bb08,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b608ad,
+	0x1f11f404,
+/* 0x0910: i2c_start_send */
+	0xf50037f0,
+	0xf1085b21,
+	0xf41388e7,
+	0x37f07e21,
+	0x3921f500,
+	0x88e7f108,
+	0x7e21f413,
+/* 0x092c: i2c_start_out */
+/* 0x092e: i2c_stop */
+	0x37f000f8,
+	0x3921f500,
+	0x0037f008,
+	0x085b21f5,
+	0x03e8e7f1,
+	0xf07e21f4,
+	0x21f50137,
+	0xe7f10839,
+	0x21f41388,
+	0x0137f07e,
+	0x085b21f5,
+	0x1388e7f1,
+	0xf87e21f4,
+/* 0x0961: i2c_bitw */
+	0x5b21f500,
 	0xe8e7f108,
 	0x7e21f403,
-	0x088421f5,
-	0xb60901f4,
-	0x1bf40142,
-/* 0x08d5: i2c_raise_scl_done */
-	0xf840fcef,
-/* 0x08d9: i2c_start */
-	0x8421f500,
-	0x0d11f408,
-	0x089c21f5,
-	0xf40611f4,
-/* 0x08ea: i2c_start_rep */
-	0x37f0300e,
-	0x4021f500,
-	0x0137f008,
-	0x086221f5,
 	0xb60076bb,
 	0x50f90465,
 	0xbb046594,
 	0x50bd0256,
 	0xfc0475fd,
-	0xb421f550,
+	0xad21f550,
 	0x0464b608,
-/* 0x0917: i2c_start_send */
-	0xf01f11f4,
-	0x21f50037,
-	0xe7f10862,
-	0x21f41388,
-	0x0037f07e,
-	0x084021f5,
-	0x1388e7f1,
-/* 0x0933: i2c_start_out */
-	0xf87e21f4,
-/* 0x0935: i2c_stop */
-	0x0037f000,
-	0x084021f5,
-	0xf50037f0,
-	0xf1086221,
-	0xf403e8e7,
-	0x37f07e21,
-	0x4021f501,
-	0x88e7f108,
-	0x7e21f413,
-	0xf50137f0,
-	0xf1086221,
+	0xf11811f4,
 	0xf41388e7,
-	0x00f87e21,
-/* 0x0968: i2c_bitw */
-	0x086221f5,
-	0x03e8e7f1,
-	0xbb7e21f4,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x08b421f5,
-	0xf40464b6,
-	0xe7f11811,
-	0x21f41388,
-	0x0037f07e,
-	0x084021f5,
-	0x1388e7f1,
-/* 0x09a7: i2c_bitw_out */
-	0xf87e21f4,
-/* 0x09a9: i2c_bitr */
-	0x0137f000,
-	0x086221f5,
-	0x03e8e7f1,
-	0xbb7e21f4,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x08b421f5,
-	0xf40464b6,
-	0x21f51b11,
-	0x37f0089c,
-	0x4021f500,
+	0x37f07e21,
+	0x3921f500,
 	0x88e7f108,
 	0x7e21f413,
-	0xf4013cf0,
-/* 0x09ee: i2c_bitr_done */
-	0x00f80131,
-/* 0x09f0: i2c_get_byte */
-	0xf00057f0,
-/* 0x09f6: i2c_get_byte_next */
-	0x54b60847,
+/* 0x09a0: i2c_bitw_out */
+/* 0x09a2: i2c_bitr */
+	0x37f000f8,
+	0x5b21f501,
+	0xe8e7f108,
+	0x7e21f403,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xad21f550,
+	0x0464b608,
+	0xf51b11f4,
+	0xf0089521,
+	0x21f50037,
+	0xe7f10839,
+	0x21f41388,
+	0x013cf07e,
+/* 0x09e7: i2c_bitr_done */
+	0xf80131f4,
+/* 0x09e9: i2c_get_byte */
+	0x0057f000,
+/* 0x09ef: i2c_get_byte_next */
+	0xb60847f0,
+	0x76bb0154,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb609a221,
+	0x11f40464,
+	0x0553fd2b,
+	0xf40142b6,
+	0x37f0d81b,
 	0x0076bb01,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b609a9,
-	0x2b11f404,
-	0xb60553fd,
-	0x1bf40142,
-	0x0137f0d8,
+	0x64b60961,
+/* 0x0a39: i2c_get_byte_done */
+/* 0x0a3b: i2c_put_byte */
+	0xf000f804,
+/* 0x0a3e: i2c_put_byte_next */
+	0x42b60847,
+	0x3854ff01,
 	0xb60076bb,
 	0x50f90465,
 	0xbb046594,
 	0x50bd0256,
 	0xfc0475fd,
-	0x6821f550,
+	0x6121f550,
 	0x0464b609,
-/* 0x0a40: i2c_get_byte_done */
-/* 0x0a42: i2c_put_byte */
-	0x47f000f8,
-/* 0x0a45: i2c_put_byte_next */
-	0x0142b608,
-	0xbb3854ff,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x096821f5,
-	0xf40464b6,
-	0x46b03411,
-	0xd81bf400,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xa921f550,
-	0x0464b609,
-	0xbb0f11f4,
-	0x36b00076,
-	0x061bf401,
-/* 0x0a9b: i2c_put_byte_done */
-	0xf80132f4,
-/* 0x0a9d: i2c_addr */
-	0x0076bb00,
+	0xb03411f4,
+	0x1bf40046,
+	0x0076bbd8,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b608d9,
-	0x2911f404,
-	0x012ec3e7,
-	0xfd0134b6,
-	0x76bb0553,
+	0x64b609a2,
+	0x0f11f404,
+	0xb00076bb,
+	0x1bf40136,
+	0x0132f406,
+/* 0x0a94: i2c_put_byte_done */
+/* 0x0a96: i2c_addr */
+	0x76bb00f8,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0xf550fc04,
-	0xb60a4221,
-/* 0x0ae2: i2c_addr_done */
-	0x00f80464,
-/* 0x0ae4: i2c_acquire_addr */
-	0xb6f8cec7,
-	0xe0b702e4,
-	0xee980d1c,
-/* 0x0af3: i2c_acquire */
-	0xf500f800,
-	0xf40ae421,
-	0xd9f00421,
+	0xb608d221,
+	0x11f40464,
+	0x2ec3e729,
+	0x0134b601,
+	0xbb0553fd,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x0a3b21f5,
+/* 0x0adb: i2c_addr_done */
+	0xf80464b6,
+/* 0x0add: i2c_acquire_addr */
+	0xf8cec700,
+	0xb702e4b6,
+	0x980d1ce0,
+	0x00f800ee,
+/* 0x0aec: i2c_acquire */
+	0x0add21f5,
+	0xf00421f4,
+	0x21f403d9,
+/* 0x0afb: i2c_release */
+	0xf500f840,
+	0xf40add21,
+	0xdaf00421,
 	0x4021f403,
-/* 0x0b02: i2c_release */
-	0x21f500f8,
-	0x21f40ae4,
-	0x03daf004,
-	0xf84021f4,
-/* 0x0b11: i2c_recv */
-	0x0132f400,
-	0xb6f8c1c7,
-	0x16b00214,
-	0x3a1ff528,
-	0xf413a001,
-	0x0032980c,
-	0x0ccc13a0,
-	0xf4003198,
-	0xd0f90231,
-	0xd0f9e0f9,
-	0x000067f1,
-	0x100063f1,
-	0xbb016792,
+/* 0x0b0a: i2c_recv */
+	0x32f400f8,
+	0xf8c1c701,
+	0xb00214b6,
+	0x1ff52816,
+	0x13a0013a,
+	0x32980cf4,
+	0xcc13a000,
+	0x0031980c,
+	0xf90231f4,
+	0xf9e0f9d0,
+	0x0067f1d0,
+	0x0063f100,
+	0x01679210,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xec21f550,
+	0x0464b60a,
+	0xd6b0d0fc,
+	0xb31bf500,
+	0x0057f000,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x9621f550,
+	0x0464b60a,
+	0x00d011f5,
+	0xbbe0c5c7,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x0af321f5,
-	0xfc0464b6,
-	0x00d6b0d0,
-	0x00b31bf5,
-	0xbb0057f0,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x0a9d21f5,
+	0x0a3b21f5,
 	0xf50464b6,
-	0xc700d011,
-	0x76bbe0c5,
+	0xf000ad11,
+	0x76bb0157,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0xf550fc04,
-	0xb60a4221,
+	0xb60a9621,
 	0x11f50464,
-	0x57f000ad,
-	0x0076bb01,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b60a9d,
-	0x8a11f504,
-	0x0076bb00,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b609f0,
-	0x6a11f404,
-	0xbbe05bcb,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x093521f5,
-	0xb90464b6,
-	0x74bd025b,
-/* 0x0c17: i2c_recv_not_rd08 */
-	0xb0430ef4,
-	0x1bf401d6,
-	0x0057f03d,
-	0x0a9d21f5,
-	0xc73311f4,
-	0x21f5e0c5,
-	0x11f40a42,
-	0x0057f029,
-	0x0a9d21f5,
-	0xc71f11f4,
-	0x21f5e0b5,
-	0x11f40a42,
-	0x3521f515,
-	0xc774bd09,
-	0x1bf408c5,
-	0x0232f409,
-/* 0x0c57: i2c_recv_not_wr08 */
-/* 0x0c57: i2c_recv_done */
-	0xc7030ef4,
-	0x21f5f8ce,
-	0xe0fc0b02,
-	0x12f4d0fc,
-	0x027cb90a,
-	0x033621f5,
-/* 0x0c6c: i2c_recv_exit */
-/* 0x0c6e: i2c_init */
+	0x76bb008a,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb609e921,
+	0x11f40464,
+	0xe05bcb6a,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x2e21f550,
+	0x0464b609,
+	0xbd025bb9,
+	0x430ef474,
+/* 0x0c10: i2c_recv_not_rd08 */
+	0xf401d6b0,
+	0x57f03d1b,
+	0x9621f500,
+	0x3311f40a,
+	0xf5e0c5c7,
+	0xf40a3b21,
+	0x57f02911,
+	0x9621f500,
+	0x1f11f40a,
+	0xf5e0b5c7,
+	0xf40a3b21,
+	0x21f51511,
+	0x74bd092e,
+	0xf408c5c7,
+	0x32f4091b,
+	0x030ef402,
+/* 0x0c50: i2c_recv_not_wr08 */
+/* 0x0c50: i2c_recv_done */
+	0xf5f8cec7,
+	0xfc0afb21,
+	0xf4d0fce0,
+	0x7cb90a12,
+	0x3621f502,
+/* 0x0c65: i2c_recv_exit */
+/* 0x0c67: i2c_init */
+	0xf800f803,
+/* 0x0c69: test_recv */
+	0xd817f100,
+	0x0614b605,
+	0xb60011cf,
+	0x07f10110,
+	0x04b605d8,
+	0x0001d006,
+	0xe7f104bd,
+	0xe3f1d900,
+	0x21f5134f,
+	0x00f80256,
+/* 0x0c90: test_init */
+	0x0800e7f1,
+	0x025621f5,
+/* 0x0c9a: idle_recv */
 	0x00f800f8,
-/* 0x0c70: test_recv */
-	0x05d817f1,
-	0xcf0614b6,
-	0x10b60011,
-	0xd807f101,
-	0x0604b605,
-	0xbd0001d0,
-	0x00e7f104,
-	0x4fe3f1d9,
-	0x5621f513,
-/* 0x0c97: test_init */
-	0xf100f802,
-	0xf50800e7,
-	0xf8025621,
-/* 0x0ca1: idle_recv */
-/* 0x0ca3: idle */
-	0xf400f800,
-	0x17f10031,
-	0x14b605d4,
-	0x0011cf06,
-	0xf10110b6,
-	0xb605d407,
-	0x01d00604,
-/* 0x0cbf: idle_loop */
-	0xf004bd00,
-	0x32f45817,
-/* 0x0cc5: idle_proc */
-/* 0x0cc5: idle_proc_exec */
-	0xb910f902,
-	0x21f5021e,
-	0x10fc033f,
-	0xf40911f4,
-	0x0ef40231,
-/* 0x0cd9: idle_proc_next */
-	0x5810b6ef,
-	0xf4061fb8,
-	0x02f4e61b,
-	0x0028f4dd,
-	0x00bb0ef4,
+/* 0x0c9c: idle */
+	0xf10031f4,
+	0xb605d417,
+	0x11cf0614,
+	0x0110b600,
+	0x05d407f1,
+	0xd00604b6,
+	0x04bd0001,
+/* 0x0cb8: idle_loop */
+	0xf45817f0,
+/* 0x0cbe: idle_proc */
+/* 0x0cbe: idle_proc_exec */
+	0x10f90232,
+	0xf5021eb9,
+	0xfc033f21,
+	0x0911f410,
+	0xf40231f4,
+/* 0x0cd2: idle_proc_next */
+	0x10b6ef0e,
+	0x061fb858,
+	0xf4e61bf4,
+	0x28f4dd02,
+	0xbb0ef400,
+	0x00000000,
+	0x00000000,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
index ec03f9a..1663bf9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
@@ -82,15 +82,15 @@
 // $r0  - zero
 memx_func_enter:
 #if NVKM_PPWR_CHIPSET == GT215
-	movw $r8 0x1610
+	mov $r8 0x1610
 	nv_rd32($r7, $r8)
 	imm32($r6, 0xfffffffc)
 	and $r7 $r6
-	movw $r6 0x2
+	mov $r6 0x2
 	or $r7 $r6
 	nv_wr32($r8, $r7)
 #else
-	movw $r6 0x001620
+	mov $r6 0x001620
 	imm32($r7, ~0x00000aa2);
 	nv_rd32($r8, $r6)
 	and $r8 $r7
@@ -101,7 +101,7 @@
 	and $r8 $r7
 	nv_wr32($r6, $r8)
 
-	movw $r6 0x0026f0
+	mov $r6 0x0026f0
 	nv_rd32($r8, $r6)
 	and $r8 $r7
 	nv_wr32($r6, $r8)
@@ -136,19 +136,19 @@
 		bra nz #memx_func_leave_wait
 
 #if NVKM_PPWR_CHIPSET == GT215
-	movw $r8 0x1610
+	mov $r8 0x1610
 	nv_rd32($r7, $r8)
 	imm32($r6, 0xffffffcc)
 	and $r7 $r6
 	nv_wr32($r8, $r7)
 #else
-	movw $r6 0x0026f0
+	mov $r6 0x0026f0
 	imm32($r7, 0x00000001)
 	nv_rd32($r8, $r6)
 	or $r8 $r7
 	nv_wr32($r6, $r8)
 
-	movw $r6 0x001620
+	mov $r6 0x001620
 	nv_rd32($r8, $r6)
 	or $r8 $r7
 	nv_wr32($r6, $r8)
@@ -177,11 +177,11 @@
 	bra #memx_func_wait_vblank_fini
 
 	memx_func_wait_vblank_head1:
-	movw $r7 0x20
+	mov $r7 0x20
 	bra #memx_func_wait_vblank_0
 
 	memx_func_wait_vblank_head0:
-	movw $r7 0x8
+	mov $r7 0x8
 
 	memx_func_wait_vblank_0:
 		nv_iord($r6, NV_PPWR_INPUT)
@@ -273,13 +273,13 @@
 // $r5 - outer loop counter
 // $r6 - inner loop counter
 // $r7 - entry counter (#memx_train_head + $r7)
-	movw $r5 0x3
-	movw $r7 0x0
+	mov $r5 0x3
+	mov $r7 0x0
 
 // Read random memory to wake up... things
 	imm32($r9, 0x700000)
 	nv_rd32($r8,$r9)
-	movw $r14 0x2710
+	mov $r14 0x2710
 	call(nsec)
 
 	memx_func_train_loop_outer:
@@ -289,9 +289,9 @@
 		nv_wr32($r9, $r8)
 		push $r5
 
-		movw $r6 0x0
+		mov $r6 0x0
 		memx_func_train_loop_inner:
-			movw $r8 0x1111
+			mov $r8 0x1111
 			mulu $r9 $r6 $r8
 			shl b32 $r8 $r9 0x10
 			or $r8 $r9
@@ -315,7 +315,7 @@
 
 			// $r5 - inner inner loop counter
 			// $r9 - result
-			movw $r5 0
+			mov $r5 0
 			imm32($r9, 0x8300ffff)
 			memx_func_train_loop_4x:
 				imm32($r10, 0x100080)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 505dee0db..ca91651 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -195,7 +195,7 @@
 	size_t size = PAGE_SIZE * n;
 	loff_t off = mmap_offset(obj) +
 			(entry->obj_pgoff << PAGE_SHIFT);
-	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 
 	if (m > 1) {
 		int i;
@@ -442,7 +442,7 @@
 	 * into account in some of the math, so figure out virtual stride
 	 * in pages
 	 */
-	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 
 	/* We don't use vmf->pgoff since that has the fake offset: */
 	pgoff = ((unsigned long)vmf->virtual_address -
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 6f65846..5b2a9f9 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1250,7 +1250,7 @@
 		.width = 154,
 		.height = 83,
 	},
-	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
 static const struct drm_display_mode ortustech_com43h4m85ulc_mode  = {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 41b72ce..83e1345 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -238,9 +238,10 @@
 	 * may be slow
 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 	 */
-
+#ifndef CONFIG_COMPILE_TEST
 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 	 thanks to write-combining
+#endif
 
 	if (bo->flags & RADEON_GEM_GTT_WC)
 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 574ab00..b82ef5e 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5969,9 +5969,9 @@
 {
 	u32 lane_width;
 	u32 new_lane_width =
-		(radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+		((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
 	u32 current_lane_width =
-		(radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+		((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
 
 	if (new_lane_width != current_lane_width) {
 		radeon_set_pcie_lanes(rdev, new_lane_width);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index b70f942..cab4d60 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -64,7 +64,6 @@
 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
 	 */
 	vma->vm_flags &= ~VM_PFNMAP;
-	vma->vm_pgoff = 0;
 
 	ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
 			     obj->size, rk_obj->dma_attrs);
@@ -96,6 +95,12 @@
 	if (ret)
 		return ret;
 
+	/*
+	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
+	 * whole buffer from the start.
+	 */
+	vma->vm_pgoff = 0;
+
 	obj = vma->vm_private_data;
 
 	return rockchip_drm_gem_object_mmap(obj, vma);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 6e3c4ac..32d87c6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1386,6 +1386,9 @@
 	usleep_range(10, 20);
 	reset_control_deassert(ahb_rst);
 
+	VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
+	VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
+
 	memcpy(vop->regsbak, vop->regs, vop->len);
 
 	for (i = 0; i < vop_data->table_size; i++)
@@ -1541,17 +1544,9 @@
 
 	mutex_init(&vop->vsync_mutex);
 
-	ret = devm_request_irq(dev, vop->irq, vop_isr,
-			       IRQF_SHARED, dev_name(dev), vop);
-	if (ret)
-		return ret;
-
-	/* IRQ is initially disabled; it gets enabled in power_on */
-	disable_irq(vop->irq);
-
 	ret = vop_create_crtc(vop);
 	if (ret)
-		goto err_enable_irq;
+		return ret;
 
 	pm_runtime_enable(&pdev->dev);
 
@@ -1561,13 +1556,19 @@
 		goto err_disable_pm_runtime;
 	}
 
+	ret = devm_request_irq(dev, vop->irq, vop_isr,
+			       IRQF_SHARED, dev_name(dev), vop);
+	if (ret)
+		goto err_disable_pm_runtime;
+
+	/* IRQ is initially disabled; it gets enabled in power_on */
+	disable_irq(vop->irq);
+
 	return 0;
 
 err_disable_pm_runtime:
 	pm_runtime_disable(&pdev->dev);
 	vop_destroy_crtc(vop);
-err_enable_irq:
-	enable_irq(vop->irq); /* To balance out the disable_irq above */
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index d401156..4460ca4 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -129,10 +129,13 @@
 static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
 {
 	struct sun4i_dclk *dclk = hw_to_dclk(hw);
+	u32 val = degrees / 120;
+
+	val <<= 28;
 
 	regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
 			   GENMASK(29, 28),
-			   degrees / 120);
+			   val);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 9e77fc0..aad2f4a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -212,6 +212,11 @@
 	.unbind	= sun4i_drv_unbind,
 };
 
+static bool sun4i_drv_node_is_connector(struct device_node *node)
+{
+	return of_device_is_compatible(node, "hdmi-connector");
+}
+
 static bool sun4i_drv_node_is_frontend(struct device_node *node)
 {
 	return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
@@ -252,6 +257,13 @@
 	    !of_device_is_available(node))
 		return 0;
 
+	/*
+	 * The connectors will be the last nodes in our pipeline, we
+	 * can just bail out.
+	 */
+	if (sun4i_drv_node_is_connector(node))
+		return 0;
+
 	if (!sun4i_drv_node_is_frontend(node)) {
 		/* Add current component */
 		DRM_DEBUG_DRIVER("Adding component %s\n",
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index ec9023b..d53e805 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -80,6 +80,7 @@
 	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
 
 	if (bo->validated_shader) {
+		kfree(bo->validated_shader->uniform_addr_offsets);
 		kfree(bo->validated_shader->texture_samples);
 		kfree(bo->validated_shader);
 		bo->validated_shader = NULL;
@@ -328,6 +329,7 @@
 	}
 
 	if (bo->validated_shader) {
+		kfree(bo->validated_shader->uniform_addr_offsets);
 		kfree(bo->validated_shader->texture_samples);
 		kfree(bo->validated_shader);
 		bo->validated_shader = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index ab30169..b608cd4 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -110,8 +110,8 @@
 					    &handle);
 
 		if (ret) {
-			state->bo_count = i - 1;
-			goto err;
+			state->bo_count = i;
+			goto err_delete_handle;
 		}
 		bo_state[i].handle = handle;
 		bo_state[i].paddr = vc4_bo->base.paddr;
@@ -123,13 +123,16 @@
 			 state->bo_count * sizeof(*bo_state)))
 		ret = -EFAULT;
 
-	kfree(bo_state);
+err_delete_handle:
+	if (ret) {
+		for (i = 0; i < state->bo_count; i++)
+			drm_gem_handle_delete(file_priv, bo_state[i].handle);
+	}
 
 err_free:
-
 	vc4_free_hang_state(dev, kernel_state);
+	kfree(bo_state);
 
-err:
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 881bf48..7505655 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -533,7 +533,7 @@
 	 * the scl fields here.
 	 */
 	if (num_planes == 1) {
-		scl0 = vc4_get_scl_field(state, 1);
+		scl0 = vc4_get_scl_field(state, 0);
 		scl1 = scl0;
 	} else {
 		scl0 = vc4_get_scl_field(state, 1);
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 917321c..19a5bde8 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -874,6 +874,7 @@
 fail:
 	kfree(validation_state.branch_targets);
 	if (validated_shader) {
+		kfree(validated_shader->uniform_addr_offsets);
 		kfree(validated_shader->texture_samples);
 		kfree(validated_shader);
 	}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 818478b..5463939 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -194,6 +194,9 @@
 	case VIRTGPU_PARAM_3D_FEATURES:
 		value = vgdev->has_virgl_3d == true ? 1 : 0;
 		break;
+	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
+		value = 1;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -469,7 +472,7 @@
 {
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct drm_virtgpu_get_caps *args = data;
-	int size;
+	unsigned size, host_caps_size;
 	int i;
 	int found_valid = -1;
 	int ret;
@@ -478,6 +481,10 @@
 	if (vgdev->num_capsets == 0)
 		return -ENOSYS;
 
+	/* don't allow userspace to pass 0 */
+	if (args->size == 0)
+		return -EINVAL;
+
 	spin_lock(&vgdev->display_info_lock);
 	for (i = 0; i < vgdev->num_capsets; i++) {
 		if (vgdev->capsets[i].id == args->cap_set_id) {
@@ -493,11 +500,9 @@
 		return -EINVAL;
 	}
 
-	size = vgdev->capsets[found_valid].max_size;
-	if (args->size > size) {
-		spin_unlock(&vgdev->display_info_lock);
-		return -EINVAL;
-	}
+	host_caps_size = vgdev->capsets[found_valid].max_size;
+	/* only copy to user the minimum of the host caps size or the guest caps size */
+	size = min(args->size, host_caps_size);
 
 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 		if (cache_ent->id == args->cap_set_id &&
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a7..52436b3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -324,7 +324,7 @@
 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
 	if (ret == -ENOSPC) {
 		spin_unlock(&vgdev->ctrlq.qlock);
-		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
 		spin_lock(&vgdev->ctrlq.qlock);
 		goto retry;
 	} else {
@@ -399,7 +399,7 @@
 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
 	if (ret == -ENOSPC) {
 		spin_unlock(&vgdev->cursorq.qlock);
-		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
 		spin_lock(&vgdev->cursorq.qlock);
 		goto retry;
 	} else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87086af..33ca24a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2014,6 +2014,7 @@
 		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
 					     out_fence, NULL);
 
+	vmw_dmabuf_unreference(&ctx->buf);
 	vmw_resource_unreserve(res, false, NULL, 0);
 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 557a033..8545488 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -135,17 +135,24 @@
 
 #else
 
-/* In the 32-bit version of this macro, we use "m" because there is no
- * more register left for bp
+/*
+ * In the 32-bit version of this macro, we store bp in a memory location
+ * because we've ran out of registers.
+ * Now we can't reference that memory location while we've modified
+ * %esp or %ebp, so we first push it on the stack, just before we push
+ * %ebp, and then when we need it we read it from the stack where we
+ * just pushed it.
  */
 #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di,	\
 			port_num, magic, bp,		\
 			eax, ebx, ecx, edx, si, di)	\
 ({							\
-	asm volatile ("push %%ebp;"			\
-		"mov %12, %%ebp;"			\
+	asm volatile ("push %12;"			\
+		"push %%ebp;"				\
+		"mov 0x04(%%esp), %%ebp;"		\
 		"rep outsb;"				\
-		"pop %%ebp;" :				\
+		"pop %%ebp;"				\
+		"add $0x04, %%esp;" :			\
 		"=a"(eax),				\
 		"=b"(ebx),				\
 		"=c"(ecx),				\
@@ -167,10 +174,12 @@
 		       port_num, magic, bp,		\
 		       eax, ebx, ecx, edx, si, di)	\
 ({							\
-	asm volatile ("push %%ebp;"			\
-		"mov %12, %%ebp;"			\
+	asm volatile ("push %12;"			\
+		"push %%ebp;"				\
+		"mov 0x04(%%esp), %%ebp;"		\
 		"rep insb;"				\
-		"pop %%ebp" :				\
+		"pop %%ebp;"				\
+		"add $0x04, %%esp;" :			\
 		"=a"(eax),				\
 		"=b"(ebx),				\
 		"=c"(ecx),				\
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 44ecb0b..fbf298d 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -1808,7 +1808,7 @@
 		return ret;
 	}
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, &val);
-	if (val != *perfctr_pwr_hi) {
+	if (val > *perfctr_pwr_hi) {
 		*perfctr_pwr_hi = val;
 		ret = true;
 	}
@@ -1823,14 +1823,17 @@
 	unsigned int ret = 0;
 	bool overflow = true;
 	static unsigned int perfctr_pwr_hi;
+	unsigned int prev_perfctr_pwr_hi = 0;
 
 	/* Read the value */
 	kgsl_regread(device, reg, &val);
 
 	if (adreno_is_a5xx(adreno_dev) && reg == adreno_getreg
-		(adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO))
+		(adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO)) {
+		prev_perfctr_pwr_hi = perfctr_pwr_hi;
 		overflow = is_power_counter_overflow(adreno_dev, reg,
 				*counter, &perfctr_pwr_hi);
+	}
 
 	/* Return 0 for the first read */
 	if (*counter != 0) {
@@ -1843,9 +1846,12 @@
 			 * Since KGSL got abnormal value from the counter,
 			 * We will drop the value from being accumulated.
 			 */
-			pr_warn_once("KGSL: Abnormal value :0x%x (0x%x) from perf counter : 0x%x\n",
-					val, *counter, reg);
-			return 0;
+			KGSL_DRV_ERR_RATELIMIT(device,
+				"Abnormal value:0x%llx (0x%llx) from perf counter : 0x%x\n",
+				val | ((uint64_t)perfctr_pwr_hi << 32),
+				*counter |
+					((uint64_t)prev_perfctr_pwr_hi << 32),
+				reg);
 		}
 	}
 
@@ -1867,8 +1873,11 @@
 		ret = gpudev->oob_set(adreno_dev, OOB_PERFCNTR_SET_MASK,
 				OOB_PERFCNTR_CHECK_MASK,
 				OOB_PERFCNTR_CLEAR_MASK);
-		if (ret)
+		if (ret) {
+			adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+			adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
 			kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
+		}
 	}
 
 	return ret;
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index e5c8222..4f98912 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1332,7 +1332,7 @@
 	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
 	if (iommu_regs)
 		adreno_set_protected_registers(adreno_dev, &index,
-				iommu_regs->base, iommu_regs->range);
+				iommu_regs->base, ilog2(iommu_regs->range));
 }
 
 static void a3xx_start(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index 771d035..432e98d 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -524,7 +524,7 @@
 	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
 	if (iommu_regs)
 		adreno_set_protected_registers(adreno_dev, &index,
-				iommu_regs->base, iommu_regs->range);
+				iommu_regs->base, ilog2(iommu_regs->range));
 }
 
 static struct adreno_snapshot_sizes a4xx_snap_sizes = {
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 876b7c9..68a7c2a 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -386,7 +386,7 @@
 	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
 	if (iommu_regs)
 		adreno_set_protected_registers(adreno_dev, &index,
-				iommu_regs->base, iommu_regs->range);
+				iommu_regs->base, ilog2(iommu_regs->range));
 }
 
 /*
@@ -2281,6 +2281,7 @@
 	switch (ADRENO_GPUREV(adreno_dev)) {
 	case ADRENO_REV_A510:
 		return 0x00000001; /* Ucode workaround for token end syncs */
+	case ADRENO_REV_A504:
 	case ADRENO_REV_A505:
 	case ADRENO_REV_A506:
 	case ADRENO_REV_A530:
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index d1a6005..4bde8c6 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -412,6 +412,15 @@
 	0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
 	/* VPC CTX 1 */
 	0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2,
+};
+
+/*
+ * GPMU registers to dump for A5XX on snapshot.
+ * Registers in pairs - first value is the start offset, second
+ * is the stop offset (inclusive)
+ */
+
+static const unsigned int a5xx_gpmu_registers[] = {
 	/* GPMU */
 	0xA800, 0xA8FF, 0xAC60, 0xAC60,
 };
@@ -664,24 +673,23 @@
 	return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
 }
 
+struct registers {
+	const unsigned int *regs;
+	size_t size;
+};
+
 static size_t a5xx_legacy_snapshot_registers(struct kgsl_device *device,
-		u8 *buf, size_t remain)
+		u8 *buf, size_t remain, const unsigned int *regs, size_t size)
 {
-	struct kgsl_snapshot_registers regs = {
-		.regs = a5xx_registers,
-		.count = ARRAY_SIZE(a5xx_registers) / 2,
+	struct kgsl_snapshot_registers snapshot_regs = {
+		.regs = regs,
+		.count = size / 2,
 	};
 
-	return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
+	return kgsl_snapshot_dump_registers(device, buf, remain,
+			&snapshot_regs);
 }
 
-static struct cdregs {
-	const unsigned int *regs;
-	unsigned int size;
-} _a5xx_cd_registers[] = {
-	{ a5xx_registers, ARRAY_SIZE(a5xx_registers) },
-};
-
 #define REG_PAIR_COUNT(_a, _i) \
 	(((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
 
@@ -691,11 +699,13 @@
 	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
 	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
 	unsigned int *src = (unsigned int *) registers.hostptr;
-	unsigned int i, j, k;
+	struct registers *regs = (struct registers *)priv;
+	unsigned int j, k;
 	unsigned int count = 0;
 
 	if (crash_dump_valid == false)
-		return a5xx_legacy_snapshot_registers(device, buf, remain);
+		return a5xx_legacy_snapshot_registers(device, buf, remain,
+				regs->regs, regs->size);
 
 	if (remain < sizeof(*header)) {
 		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
@@ -704,24 +714,20 @@
 
 	remain -= sizeof(*header);
 
-	for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
-		struct cdregs *regs = &_a5xx_cd_registers[i];
+	for (j = 0; j < regs->size / 2; j++) {
+		unsigned int start = regs->regs[2 * j];
+		unsigned int end = regs->regs[(2 * j) + 1];
 
-		for (j = 0; j < regs->size / 2; j++) {
-			unsigned int start = regs->regs[2 * j];
-			unsigned int end = regs->regs[(2 * j) + 1];
+		if (remain < ((end - start) + 1) * 8) {
+			SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+			goto out;
+		}
 
-			if (remain < ((end - start) + 1) * 8) {
-				SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
-				goto out;
-			}
+		remain -= ((end - start) + 1) * 8;
 
-			remain -= ((end - start) + 1) * 8;
-
-			for (k = start; k <= end; k++, count++) {
-				*data++ = k;
-				*data++ = *src++;
-			}
+		for (k = start; k <= end; k++, count++) {
+			*data++ = k;
+			*data++ = *src++;
 		}
 	}
 
@@ -861,6 +867,7 @@
 	struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
 	unsigned int reg, i;
 	struct adreno_ringbuffer *rb;
+	struct registers regs;
 
 	/* Disable Clock gating temporarily for the debug bus to work */
 	a5xx_hwcg_set(adreno_dev, false);
@@ -877,8 +884,20 @@
 	/* Try to run the crash dumper */
 	_a5xx_do_crashdump(device);
 
-	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
-		snapshot, a5xx_snapshot_registers, NULL);
+	regs.regs = a5xx_registers;
+	regs.size = ARRAY_SIZE(a5xx_registers);
+
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
+			a5xx_snapshot_registers, &regs);
+
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
+		regs.regs = a5xx_gpmu_registers;
+		regs.size = ARRAY_SIZE(a5xx_gpmu_registers);
+
+		kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+				snapshot, a5xx_snapshot_registers, &regs);
+	}
+
 
 	/* Dump SP TP HLSQ registers */
 	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
@@ -1035,17 +1054,23 @@
 	 * To save the registers, we need 16 bytes per register pair for the
 	 * script and a dword for each register int the data
 	 */
-	for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
-		struct cdregs *regs = &_a5xx_cd_registers[i];
 
+	/* Each pair needs 16 bytes (2 qwords) */
+	script_size += (ARRAY_SIZE(a5xx_registers) / 2) * 16;
+
+	/* Each register needs a dword in the data */
+	for (j = 0; j < ARRAY_SIZE(a5xx_registers) / 2; j++)
+		data_size += REG_PAIR_COUNT(a5xx_registers, j) *
+			sizeof(unsigned int);
+
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
 		/* Each pair needs 16 bytes (2 qwords) */
-		script_size += (regs->size / 2) * 16;
+		script_size += (ARRAY_SIZE(a5xx_gpmu_registers) / 2) * 16;
 
 		/* Each register needs a dword in the data */
-		for (j = 0; j < regs->size / 2; j++)
-			data_size += REG_PAIR_COUNT(regs->regs, j) *
+		for (j = 0; j < ARRAY_SIZE(a5xx_gpmu_registers) / 2; j++)
+			data_size += REG_PAIR_COUNT(a5xx_gpmu_registers, j) *
 				sizeof(unsigned int);
-
 	}
 
 	/*
@@ -1083,13 +1108,21 @@
 	ptr = (uint64_t *) capturescript.hostptr;
 
 	/* For the registers, program a read command for each pair */
-	for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
-		struct cdregs *regs = &_a5xx_cd_registers[i];
 
-		for (j = 0; j < regs->size / 2; j++) {
-			unsigned int r = REG_PAIR_COUNT(regs->regs, j);
+	for (j = 0; j < ARRAY_SIZE(a5xx_registers) / 2; j++) {
+		unsigned int r = REG_PAIR_COUNT(a5xx_registers, j);
+		*ptr++ = registers.gpuaddr + offset;
+		*ptr++ = (((uint64_t) a5xx_registers[2 * j]) << 44)
+			| r;
+		offset += r * sizeof(unsigned int);
+	}
+
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
+		for (j = 0; j < ARRAY_SIZE(a5xx_gpmu_registers) / 2; j++) {
+			unsigned int r = REG_PAIR_COUNT(a5xx_gpmu_registers, j);
 			*ptr++ = registers.gpuaddr + offset;
-			*ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
+			*ptr++ = (((uint64_t) a5xx_gpmu_registers[2 * j]) << 44)
+				| r;
 			offset += r * sizeof(unsigned int);
 		}
 	}
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 37330cb..517b813 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -446,7 +446,7 @@
 
 	if (mmu_prot) {
 		mmu_base = mmu_prot->base;
-		mmu_range = 1 << mmu_prot->range;
+		mmu_range = mmu_prot->range;
 		req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
 	}
 
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 6c8e664..132fb02 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -49,8 +49,13 @@
 				OOB_PREEMPTION_SET_MASK,
 				OOB_PREEMPTION_CHECK_MASK,
 				OOB_PREEMPTION_CLEAR_MASK);
-			if (status)
+			if (status) {
+				adreno_set_gpu_fault(adreno_dev,
+					ADRENO_GMU_FAULT);
+				adreno_dispatcher_schedule(
+					KGSL_DEVICE(adreno_dev));
 				return;
+			}
 		}
 	}
 
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index b8c282d..a634d98 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -208,8 +208,8 @@
 	if (!kgsl_state_is_awake(KGSL_DEVICE(adreno_dev)))
 		goto ret;
 
-	if (adreno_rb_empty(adreno_dev->cur_rb))
-		goto ret;
+	if (!adreno_rb_empty(adreno_dev->cur_rb))
+		return false;
 
 	/* only check rbbm status to determine if GPU is idle */
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS, &reg_rbbm_status);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index a2d6071..3b55fc6 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2388,7 +2388,6 @@
 	struct kgsl_gpuobj_import *param = data;
 	struct kgsl_mem_entry *entry;
 	int ret, fd = -1;
-	struct kgsl_mmu *mmu = &dev_priv->device->mmu;
 
 	entry = kgsl_mem_entry_create();
 	if (entry == NULL)
@@ -2402,18 +2401,10 @@
 			| KGSL_MEMFLAGS_FORCE_32BIT
 			| KGSL_MEMFLAGS_IOCOHERENT;
 
-	/* Disable IO coherence if it is not supported on the chip */
-	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
-		param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
-
 	if (kgsl_is_compat_task())
 		param->flags |= KGSL_MEMFLAGS_FORCE_32BIT;
 
-	entry->memdesc.flags = param->flags;
-
-	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
-		entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
-
+	kgsl_memdesc_init(dev_priv->device, &entry->memdesc, param->flags);
 	if (param->type == KGSL_USER_MEM_TYPE_ADDR)
 		ret = _gpuobj_map_useraddr(dev_priv->device, private->pagetable,
 			entry, param);
@@ -2652,6 +2643,7 @@
 	struct kgsl_process_private *private = dev_priv->process_priv;
 	struct kgsl_mmu *mmu = &dev_priv->device->mmu;
 	unsigned int memtype;
+	uint64_t flags;
 
 	/*
 	 * If content protection is not enabled and secure buffer
@@ -2688,30 +2680,17 @@
 	 * Note: CACHEMODE is ignored for this call. Caching should be
 	 * determined by type of allocation being mapped.
 	 */
-	param->flags &= KGSL_MEMFLAGS_GPUREADONLY
-			| KGSL_MEMTYPE_MASK
-			| KGSL_MEMALIGN_MASK
-			| KGSL_MEMFLAGS_USE_CPU_MAP
-			| KGSL_MEMFLAGS_SECURE
-			| KGSL_MEMFLAGS_IOCOHERENT;
-
-	/* Disable IO coherence if it is not supported on the chip */
-	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
-		param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
-
-	entry->memdesc.flags = (uint64_t) param->flags;
+	flags = param->flags & (KGSL_MEMFLAGS_GPUREADONLY
+				| KGSL_MEMTYPE_MASK
+				| KGSL_MEMALIGN_MASK
+				| KGSL_MEMFLAGS_USE_CPU_MAP
+				| KGSL_MEMFLAGS_SECURE
+				| KGSL_MEMFLAGS_IOCOHERENT);
 
 	if (kgsl_is_compat_task())
-		entry->memdesc.flags |= KGSL_MEMFLAGS_FORCE_32BIT;
+		flags |= KGSL_MEMFLAGS_FORCE_32BIT;
 
-	if (!kgsl_mmu_use_cpu_map(mmu))
-		entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
-
-	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
-		entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
-
-	if (param->flags & KGSL_MEMFLAGS_SECURE)
-		entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
+	kgsl_memdesc_init(dev_priv->device, &entry->memdesc, flags);
 
 	switch (memtype) {
 	case KGSL_MEM_ENTRY_USER:
@@ -3107,10 +3086,6 @@
 		| KGSL_MEMFLAGS_FORCE_32BIT
 		| KGSL_MEMFLAGS_IOCOHERENT;
 
-	/* Turn off SVM if the system doesn't support it */
-	if (!kgsl_mmu_use_cpu_map(mmu))
-		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
-
 	/* Return not supported error if secure memory isn't enabled */
 	if (!kgsl_mmu_is_secured(mmu) &&
 			(flags & KGSL_MEMFLAGS_SECURE)) {
@@ -3119,10 +3094,6 @@
 		return ERR_PTR(-EOPNOTSUPP);
 	}
 
-	/* Secure memory disables advanced addressing modes */
-	if (flags & KGSL_MEMFLAGS_SECURE)
-		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
-
 	/* Cap the alignment bits to the highest number we can handle */
 	align = MEMFLAGS(flags, KGSL_MEMALIGN_MASK, KGSL_MEMALIGN_SHIFT);
 	if (align >= ilog2(KGSL_MAX_ALIGN)) {
@@ -3141,20 +3112,10 @@
 
 	flags = kgsl_filter_cachemode(flags);
 
-	/* Disable IO coherence if it is not supported on the chip */
-	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
-		flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
-
 	entry = kgsl_mem_entry_create();
 	if (entry == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
-		entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
-
-	if (flags & KGSL_MEMFLAGS_SECURE)
-		entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
-
 	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
 		size, flags);
 	if (ret != 0)
@@ -3338,6 +3299,7 @@
 	struct kgsl_process_private *process = dev_priv->process_priv;
 	struct kgsl_sparse_phys_alloc *param = data;
 	struct kgsl_mem_entry *entry;
+	uint64_t flags;
 	int ret;
 	int id;
 
@@ -3370,11 +3332,12 @@
 	entry->id = id;
 	entry->priv = process;
 
-	entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_PHYS;
-	kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
+	flags = KGSL_MEMFLAGS_SPARSE_PHYS |
+		((ilog2(param->pagesize) << KGSL_MEMALIGN_SHIFT) &
+			KGSL_MEMALIGN_MASK);
 
 	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
-			param->size, entry->memdesc.flags);
+			param->size, flags);
 	if (ret)
 		goto err_remove_idr;
 
@@ -3463,7 +3426,8 @@
 	if (entry == NULL)
 		return -ENOMEM;
 
-	entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_VIRT;
+	kgsl_memdesc_init(dev_priv->device, &entry->memdesc,
+			KGSL_MEMFLAGS_SPARSE_VIRT);
 	entry->memdesc.size = param->size;
 	entry->memdesc.cur_bindings = 0;
 	kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 325d44a..3539cda 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -262,13 +262,12 @@
 		return;
 	}
 
-	gpu_qdss_desc.flags = 0;
+	kgsl_memdesc_init(device, &gpu_qdss_desc, 0);
 	gpu_qdss_desc.priv = 0;
 	gpu_qdss_desc.physaddr = gpu_qdss_entry[0];
 	gpu_qdss_desc.size = gpu_qdss_entry[1];
 	gpu_qdss_desc.pagetable = NULL;
 	gpu_qdss_desc.ops = NULL;
-	gpu_qdss_desc.dev = device->dev->parent;
 	gpu_qdss_desc.hostptr = NULL;
 
 	result = memdesc_sg_dma(&gpu_qdss_desc, gpu_qdss_desc.physaddr,
@@ -307,13 +306,12 @@
 		return;
 	}
 
-	gpu_qtimer_desc.flags = 0;
+	kgsl_memdesc_init(device, &gpu_qtimer_desc, 0);
 	gpu_qtimer_desc.priv = 0;
 	gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0];
 	gpu_qtimer_desc.size = gpu_qtimer_entry[1];
 	gpu_qtimer_desc.pagetable = NULL;
 	gpu_qtimer_desc.ops = NULL;
-	gpu_qtimer_desc.dev = device->dev->parent;
 	gpu_qtimer_desc.hostptr = NULL;
 
 	result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
@@ -1486,6 +1484,7 @@
 {
 	int ret;
 
+	kgsl_memdesc_init(device, &iommu->setstate, 0);
 	ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE);
 
 	if (!ret) {
@@ -2630,7 +2629,7 @@
 		return -EINVAL;
 	}
 	iommu->protect.base = reg_val[0] / sizeof(u32);
-	iommu->protect.range = ilog2(reg_val[1] / sizeof(u32));
+	iommu->protect.range = reg_val[1] / sizeof(u32);
 
 	of_property_for_each_string(node, "clock-names", prop, cname) {
 		struct clk *c = devm_clk_get(&pdev->dev, cname);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 2e84bad..df88b9a 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -110,27 +110,25 @@
 		id++, entry = idr_get_next(&priv->mem_idr, &id)) {
 
 		int egl_surface_count = 0, egl_image_count = 0;
-		struct kgsl_memdesc *m;
+		struct kgsl_memdesc *m = &entry->memdesc;
 
-		if (kgsl_mem_entry_get(entry) == 0)
+		if ((kgsl_memdesc_usermem_type(m) != KGSL_MEM_ENTRY_ION) ||
+			entry->pending_free || (kgsl_mem_entry_get(entry) == 0))
 			continue;
 		spin_unlock(&priv->mem_lock);
 
-		m = &entry->memdesc;
-		if (kgsl_memdesc_usermem_type(m) == KGSL_MEM_ENTRY_ION) {
-			kgsl_get_egl_counts(entry, &egl_surface_count,
-					&egl_image_count);
+		kgsl_get_egl_counts(entry, &egl_surface_count,
+				&egl_image_count);
 
-			if (kgsl_memdesc_get_memtype(m) ==
-						KGSL_MEMTYPE_EGL_SURFACE)
-				imported_mem += m->size;
-			else if (egl_surface_count == 0) {
-				uint64_t size = m->size;
+		if (kgsl_memdesc_get_memtype(m) ==
+				KGSL_MEMTYPE_EGL_SURFACE)
+			imported_mem += m->size;
+		else if (egl_surface_count == 0) {
+			uint64_t size = m->size;
 
-				do_div(size, (egl_image_count ?
-							egl_image_count : 1));
-				imported_mem += size;
-			}
+			do_div(size, (egl_image_count ?
+					egl_image_count : 1));
+			imported_mem += size;
 		}
 
 		kgsl_mem_entry_put(entry);
@@ -415,7 +413,7 @@
 {
 	int ret;
 
-	memdesc->flags = flags;
+	kgsl_memdesc_init(device, memdesc, flags);
 
 	if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)
 		ret = kgsl_sharedmem_alloc_contig(device, memdesc, size);
@@ -771,6 +769,40 @@
 }
 EXPORT_SYMBOL(kgsl_cache_range_op);
 
+void kgsl_memdesc_init(struct kgsl_device *device,
+			struct kgsl_memdesc *memdesc, uint64_t flags)
+{
+	struct kgsl_mmu *mmu = &device->mmu;
+	unsigned int align;
+
+	memset(memdesc, 0, sizeof(*memdesc));
+	/* Turn off SVM if the system doesn't support it */
+	if (!kgsl_mmu_use_cpu_map(mmu))
+		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
+
+	/* Secure memory disables advanced addressing modes */
+	if (flags & KGSL_MEMFLAGS_SECURE)
+		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
+
+	/* Disable IO coherence if it is not supported on the chip */
+	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
+		flags &= ~((uint64_t) KGSL_MEMFLAGS_IOCOHERENT);
+
+	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
+		memdesc->priv |= KGSL_MEMDESC_GUARD_PAGE;
+
+	if (flags & KGSL_MEMFLAGS_SECURE)
+		memdesc->priv |= KGSL_MEMDESC_SECURE;
+
+	memdesc->flags = flags;
+	memdesc->dev = device->dev->parent;
+
+	align = max_t(unsigned int,
+		(memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT,
+		ilog2(PAGE_SIZE));
+	kgsl_memdesc_set_align(memdesc, align);
+}
+
 int
 kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
 			uint64_t size)
@@ -971,8 +1003,6 @@
 
 	if (memdesc->pages)
 		kgsl_free(memdesc->pages);
-
-	memset(memdesc, 0, sizeof(*memdesc));
 }
 EXPORT_SYMBOL(kgsl_sharedmem_free);
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 55bb34f..976752d 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,9 @@
 			uint64_t offset, uint64_t size,
 			unsigned int op);
 
+void kgsl_memdesc_init(struct kgsl_device *device,
+			struct kgsl_memdesc *memdesc, uint64_t flags);
+
 void kgsl_process_init_sysfs(struct kgsl_device *device,
 		struct kgsl_process_private *private);
 void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
@@ -282,8 +285,8 @@
 {
 	int ret;
 
-	memdesc->flags = flags;
-	memdesc->priv = priv;
+	kgsl_memdesc_init(device, memdesc, flags);
+	memdesc->priv |= priv;
 
 	if (((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0) ||
 		(kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index ac7d2c6..aea6267 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1370,7 +1370,7 @@
 	 * of implement() working on 8 byte chunks
 	 */
 
-	int len = hid_report_len(report) + 7;
+	u32 len = hid_report_len(report) + 7;
 
 	return kmalloc(len, flags);
 }
@@ -1435,7 +1435,7 @@
 {
 	char *buf;
 	int ret;
-	int len;
+	u32 len;
 
 	buf = hid_alloc_report_buf(report, GFP_KERNEL);
 	if (!buf)
@@ -1461,14 +1461,14 @@
 }
 EXPORT_SYMBOL_GPL(__hid_request);
 
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
 		int interrupt)
 {
 	struct hid_report_enum *report_enum = hid->report_enum + type;
 	struct hid_report *report;
 	struct hid_driver *hdrv;
 	unsigned int a;
-	int rsize, csize = size;
+	u32 rsize, csize = size;
 	u8 *cdata = data;
 	int ret = 0;
 
@@ -1526,7 +1526,7 @@
  *
  * This is data entry for lower layers.
  */
-int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
+int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
 {
 	struct hid_report_enum *report_enum;
 	struct hid_driver *hdrv;
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 40233315..5ff6dd8 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1279,7 +1279,8 @@
 					      led_work);
 	struct hid_field *field;
 	struct hid_report *report;
-	int len, ret;
+	int ret;
+	u32 len;
 	__u8 *buf;
 
 	field = hidinput_get_led_field(hid);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 89e9032..fba655d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -315,7 +315,8 @@
 static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
 {
 	struct mt_device *td = hid_get_drvdata(hdev);
-	int ret, size = hid_report_len(report);
+	int ret;
+	u32 size = hid_report_len(report);
 	u8 *buf;
 
 	/*
@@ -919,7 +920,7 @@
 	struct hid_report_enum *re;
 	struct mt_class *cls = &td->mtclass;
 	char *buf;
-	int report_len;
+	u32 report_len;
 
 	if (td->inputmode < 0)
 		return;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index be89bcb..276d12d 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -110,8 +110,8 @@
 	u8 *writeReport;
 	u8 *readReport;
 
-	int input_report_size;
-	int output_report_size;
+	u32 input_report_size;
+	u32 output_report_size;
 
 	unsigned long flags;
 
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 43617fb..317c9c2 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -37,6 +37,8 @@
 static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
 		uint new_profile_index)
 {
+	if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
+		return;
 	kovaplus->actual_profile = new_profile_index;
 	kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
 	kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b0bb99a..1b1dccd 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1056,7 +1056,6 @@
 	u8 battery_charging;
 	u8 battery_capacity;
 	u8 led_state[MAX_LEDS];
-	u8 resume_led_state[MAX_LEDS];
 	u8 led_delay_on[MAX_LEDS];
 	u8 led_delay_off[MAX_LEDS];
 	u8 led_count;
@@ -1793,6 +1792,7 @@
 		led->name = name;
 		led->brightness = sc->led_state[n];
 		led->max_brightness = max_brightness[n];
+		led->flags = LED_CORE_SUSPENDRESUME;
 		led->brightness_get = sony_led_get_brightness;
 		led->brightness_set = sony_led_set_brightness;
 
@@ -2509,47 +2509,32 @@
 
 static int sony_suspend(struct hid_device *hdev, pm_message_t message)
 {
-	/*
-	 * On suspend save the current LED state,
-	 * stop running force-feedback and blank the LEDS.
-	 */
-	if (SONY_LED_SUPPORT || SONY_FF_SUPPORT) {
+#ifdef CONFIG_SONY_FF
+
+	/* On suspend stop any running force-feedback events */
+	if (SONY_FF_SUPPORT) {
 		struct sony_sc *sc = hid_get_drvdata(hdev);
 
-#ifdef CONFIG_SONY_FF
 		sc->left = sc->right = 0;
-#endif
-
-		memcpy(sc->resume_led_state, sc->led_state,
-			sizeof(sc->resume_led_state));
-		memset(sc->led_state, 0, sizeof(sc->led_state));
-
 		sony_send_output_report(sc);
 	}
 
+#endif
 	return 0;
 }
 
 static int sony_resume(struct hid_device *hdev)
 {
-	/* Restore the state of controller LEDs on resume */
-	if (SONY_LED_SUPPORT) {
-		struct sony_sc *sc = hid_get_drvdata(hdev);
+	struct sony_sc *sc = hid_get_drvdata(hdev);
 
-		memcpy(sc->led_state, sc->resume_led_state,
-			sizeof(sc->led_state));
-
-		/*
-		 * The Sixaxis and navigation controllers on USB need to be
-		 * reinitialized on resume or they won't behave properly.
-		 */
-		if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
-			(sc->quirks & NAVIGATION_CONTROLLER_USB)) {
-			sixaxis_set_operational_usb(sc->hdev);
-			sc->defer_initialization = 1;
-		}
-
-		sony_set_leds(sc);
+	/*
+	 * The Sixaxis and navigation controllers on USB need to be
+	 * reinitialized on resume or they won't behave properly.
+	 */
+	if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
+		(sc->quirks & NAVIGATION_CONTROLLER_USB)) {
+		sixaxis_set_operational_usb(sc->hdev);
+		sc->defer_initialization = 1;
 	}
 
 	return 0;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index f0e2757..216f033 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -192,6 +192,11 @@
 	int ret = 0, len;
 	unsigned char report_number;
 
+	if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+		ret = -ENODEV;
+		goto out;
+	}
+
 	dev = hidraw_table[minor]->hid;
 
 	if (!dev->ll_driver->raw_request) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 865e7c2..2548c5d 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -142,10 +142,10 @@
 						   * register of the HID
 						   * descriptor. */
 	unsigned int		bufsize;	/* i2c buffer size */
-	char			*inbuf;		/* Input buffer */
-	char			*rawbuf;	/* Raw Input buffer */
-	char			*cmdbuf;	/* Command buffer */
-	char			*argsbuf;	/* Command arguments buffer */
+	u8			*inbuf;		/* Input buffer */
+	u8			*rawbuf;	/* Raw Input buffer */
+	u8			*cmdbuf;	/* Command buffer */
+	u8			*argsbuf;	/* Command arguments buffer */
 
 	unsigned long		flags;		/* device flags */
 	unsigned long		quirks;		/* Various quirks */
@@ -451,7 +451,8 @@
 
 static void i2c_hid_get_input(struct i2c_hid *ihid)
 {
-	int ret, ret_size;
+	int ret;
+	u32 ret_size;
 	int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
 
 	if (size > ihid->bufsize)
@@ -476,7 +477,7 @@
 		return;
 	}
 
-	if (ret_size > size) {
+	if ((ret_size > size) || (ret_size <= 2)) {
 		dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
 			__func__, size, ret_size);
 		return;
@@ -968,6 +969,15 @@
 	return ret < 0 && ret != -ENXIO ? ret : 0;
 }
 
+static void i2c_hid_acpi_fix_up_power(struct device *dev)
+{
+	acpi_handle handle = ACPI_HANDLE(dev);
+	struct acpi_device *adev;
+
+	if (handle && acpi_bus_get_device(handle, &adev) == 0)
+		acpi_device_fix_up_power(adev);
+}
+
 static const struct acpi_device_id i2c_hid_acpi_match[] = {
 	{"ACPI0C50", 0 },
 	{"PNP0C50", 0 },
@@ -980,6 +990,8 @@
 {
 	return -ENODEV;
 }
+
+static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
 #endif
 
 #ifdef CONFIG_OF
@@ -1082,6 +1094,8 @@
 	if (ret < 0)
 		goto err;
 
+	i2c_hid_acpi_fix_up_power(&client->dev);
+
 	pm_runtime_get_noresume(&client->dev);
 	pm_runtime_set_active(&client->dev);
 	pm_runtime_enable(&client->dev);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 7a4d39c..e8b90b5 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -351,7 +351,7 @@
 	u8 *rep_data;
 	struct hid_report *r;
 	struct hid_report_enum *re;
-	int length;
+	u32 length;
 	int error = -ENOMEM, limit = 0;
 
 	if (wacom_wac->mode_report < 0)
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index d8bc4b9..9360cdc 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -70,7 +70,7 @@
 	/* PCIE */
 	{ .dev_type = HV_PCIE,
 	  HV_PCIE_GUID,
-	  .perf_device = true,
+	  .perf_device = false,
 	},
 
 	/* Synthetic Frame Buffer */
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index b24f1d3..ac63e56 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -94,18 +94,20 @@
 
 struct ina2xx_config {
 	u16 config_default;
-	int calibration_factor;
+	int calibration_value;
 	int registers;
 	int shunt_div;
 	int bus_voltage_shift;
 	int bus_voltage_lsb;	/* uV */
-	int power_lsb;		/* uW */
+	int power_lsb_factor;
 };
 
 struct ina2xx_data {
 	const struct ina2xx_config *config;
 
 	long rshunt;
+	long current_lsb_uA;
+	long power_lsb_uW;
 	struct mutex config_lock;
 	struct regmap *regmap;
 
@@ -115,21 +117,21 @@
 static const struct ina2xx_config ina2xx_config[] = {
 	[ina219] = {
 		.config_default = INA219_CONFIG_DEFAULT,
-		.calibration_factor = 40960000,
+		.calibration_value = 4096,
 		.registers = INA219_REGISTERS,
 		.shunt_div = 100,
 		.bus_voltage_shift = 3,
 		.bus_voltage_lsb = 4000,
-		.power_lsb = 20000,
+		.power_lsb_factor = 20,
 	},
 	[ina226] = {
 		.config_default = INA226_CONFIG_DEFAULT,
-		.calibration_factor = 5120000,
+		.calibration_value = 2048,
 		.registers = INA226_REGISTERS,
 		.shunt_div = 400,
 		.bus_voltage_shift = 0,
 		.bus_voltage_lsb = 1250,
-		.power_lsb = 25000,
+		.power_lsb_factor = 25,
 	},
 };
 
@@ -168,12 +170,16 @@
 	return INA226_SHIFT_AVG(avg_bits);
 }
 
+/*
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (eq. 3) the best values are 2048 for
+ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
+ */
 static int ina2xx_calibrate(struct ina2xx_data *data)
 {
-	u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
-				    data->rshunt);
-
-	return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
+	return regmap_write(data->regmap, INA2XX_CALIBRATION,
+			    data->config->calibration_value);
 }
 
 /*
@@ -186,10 +192,6 @@
 	if (ret < 0)
 		return ret;
 
-	/*
-	 * Set current LSB to 1mA, shunt is in uOhms
-	 * (equation 13 in datasheet).
-	 */
 	return ina2xx_calibrate(data);
 }
 
@@ -267,15 +269,15 @@
 		val = DIV_ROUND_CLOSEST(val, 1000);
 		break;
 	case INA2XX_POWER:
-		val = regval * data->config->power_lsb;
+		val = regval * data->power_lsb_uW;
 		break;
 	case INA2XX_CURRENT:
-		/* signed register, LSB=1mA (selected), in mA */
-		val = (s16)regval;
+		/* signed register, result in mA */
+		val = regval * data->current_lsb_uA;
+		val = DIV_ROUND_CLOSEST(val, 1000);
 		break;
 	case INA2XX_CALIBRATION:
-		val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
-					regval);
+		val = regval;
 		break;
 	default:
 		/* programmer goofed */
@@ -303,9 +305,32 @@
 			ina2xx_get_value(data, attr->index, regval));
 }
 
-static ssize_t ina2xx_set_shunt(struct device *dev,
-				struct device_attribute *da,
-				const char *buf, size_t count)
+/*
+ * In order to keep calibration register value fixed, the product
+ * of current_lsb and shunt_resistor should also be fixed and equal
+ * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
+ * to keep the scale.
+ */
+static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
+{
+	unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
+						  data->config->shunt_div);
+	if (val <= 0 || val > dividend)
+		return -EINVAL;
+
+	mutex_lock(&data->config_lock);
+	data->rshunt = val;
+	data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
+	data->power_lsb_uW = data->config->power_lsb_factor *
+			     data->current_lsb_uA;
+	mutex_unlock(&data->config_lock);
+
+	return 0;
+}
+
+static ssize_t ina2xx_store_shunt(struct device *dev,
+				  struct device_attribute *da,
+				  const char *buf, size_t count)
 {
 	unsigned long val;
 	int status;
@@ -315,18 +340,9 @@
 	if (status < 0)
 		return status;
 
-	if (val == 0 ||
-	    /* Values greater than the calibration factor make no sense. */
-	    val > data->config->calibration_factor)
-		return -EINVAL;
-
-	mutex_lock(&data->config_lock);
-	data->rshunt = val;
-	status = ina2xx_calibrate(data);
-	mutex_unlock(&data->config_lock);
+	status = ina2xx_set_shunt(data, val);
 	if (status < 0)
 		return status;
-
 	return count;
 }
 
@@ -386,7 +402,7 @@
 
 /* shunt resistance */
 static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
-			  ina2xx_show_value, ina2xx_set_shunt,
+			  ina2xx_show_value, ina2xx_store_shunt,
 			  INA2XX_CALIBRATION);
 
 /* update interval (ina226 only) */
@@ -431,6 +447,7 @@
 
 	/* set the device type */
 	data->config = &ina2xx_config[id->driver_data];
+	mutex_init(&data->config_lock);
 
 	if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
 		struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@@ -441,10 +458,7 @@
 			val = INA2XX_RSHUNT_DEFAULT;
 	}
 
-	if (val <= 0 || val > data->config->calibration_factor)
-		return -ENODEV;
-
-	data->rshunt = val;
+	ina2xx_set_shunt(data, val);
 
 	ina2xx_regmap_config.max_register = data->config->registers;
 
@@ -460,8 +474,6 @@
 		return -ENODEV;
 	}
 
-	mutex_init(&data->config_lock);
-
 	data->groups[group++] = &ina2xx_group;
 	if (id->driver_data == ina226)
 		data->groups[group++] = &ina226_group;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index ce75dd4..2b31b84 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1393,7 +1393,7 @@
 		duty_is_dc = data->REG_PWM_MODE[i] &&
 		  (nct6775_read_value(data, data->REG_PWM_MODE[i])
 		   & data->PWM_MODE_MASK[i]);
-		data->pwm_mode[i] = duty_is_dc;
+		data->pwm_mode[i] = !duty_is_dc;
 
 		fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
 		for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
@@ -2270,7 +2270,7 @@
 	struct nct6775_data *data = nct6775_update_device(dev);
 	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
 
-	return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]);
+	return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]);
 }
 
 static ssize_t
@@ -2291,9 +2291,9 @@
 	if (val > 1)
 		return -EINVAL;
 
-	/* Setting DC mode is not supported for all chips/channels */
+	/* Setting DC mode (0) is not supported for all chips/channels */
 	if (data->REG_PWM_MODE[nr] == 0) {
-		if (val)
+		if (!val)
 			return -EINVAL;
 		return count;
 	}
@@ -2302,7 +2302,7 @@
 	data->pwm_mode[nr] = val;
 	reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
 	reg &= ~data->PWM_MODE_MASK[nr];
-	if (val)
+	if (!val)
 		reg |= data->PWM_MODE_MASK[nr];
 	nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
 	mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index d659a02..c3a8f68 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -154,7 +154,7 @@
 	const struct adm1275_data *data = to_adm1275_data(info);
 	int ret = 0;
 
-	if (page)
+	if (page > 0)
 		return -ENXIO;
 
 	switch (reg) {
@@ -240,7 +240,7 @@
 	const struct adm1275_data *data = to_adm1275_data(info);
 	int ret;
 
-	if (page)
+	if (page > 0)
 		return -ENXIO;
 
 	switch (reg) {
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index dd4883a..e951f9b 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -45,7 +45,7 @@
 {
 	int ret;
 
-	if (page)
+	if (page > 0)
 		return -ENXIO;
 
 	switch (reg) {
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
index 9069530..0252203 100644
--- a/drivers/hwtracing/coresight/coresight-csr.c
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/coresight.h>
 #include <linux/clk.h>
+#include <linux/mutex.h>
 
 #include "coresight-priv.h"
 
@@ -88,6 +89,8 @@
 };
 
 static LIST_HEAD(csr_list);
+static DEFINE_MUTEX(csr_lock);
+
 #define to_csr_drvdata(c) container_of(c, struct csr_drvdata, csr)
 
 void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
@@ -236,12 +239,15 @@
 struct coresight_csr *coresight_csr_get(const char *name)
 {
 	struct coresight_csr *csr;
-
+	mutex_lock(&csr_lock);
 	list_for_each_entry(csr, &csr_list, link) {
-		if (!strcmp(csr->name, name))
+		if (!strcmp(csr->name, name)) {
+			mutex_unlock(&csr_lock);
 			return csr;
+		}
 	}
 
+	mutex_unlock(&csr_lock);
 	return ERR_PTR(-EINVAL);
 }
 EXPORT_SYMBOL(coresight_csr_get);
@@ -391,7 +397,10 @@
 	spin_lock_init(&drvdata->spin_lock);
 	drvdata->csr.name = ((struct coresight_platform_data *)
 					 (pdev->dev.platform_data))->name;
+
+	mutex_lock(&csr_lock);
 	list_add_tail(&drvdata->csr.link, &csr_list);
+	mutex_unlock(&csr_lock);
 
 	dev_info(dev, "CSR initialized: %s\n", drvdata->csr.name);
 	return 0;
@@ -399,12 +408,13 @@
 
 static int csr_remove(struct platform_device *pdev)
 {
-	unsigned long flags;
 	struct csr_drvdata *drvdata = platform_get_drvdata(pdev);
 
-	spin_lock_irqsave(&drvdata->spin_lock, flags);
+	mutex_lock(&csr_lock);
+	list_del(&drvdata->csr.link);
+	mutex_unlock(&csr_lock);
+
 	coresight_unregister(drvdata->csdev);
-	spin_unlock_irqrestore(&drvdata->spin_lock, flags);
 	return 0;
 }
 
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index f57ad2e..04870ce 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2016, 2018, The Linux Foundation. All rights reserved.
  *
  * Description: CoreSight Program Flow Trace driver
  *
@@ -39,6 +39,7 @@
 
 #include "coresight-etm.h"
 #include "coresight-etm-perf.h"
+#include "coresight-priv.h"
 
 /*
  * Not really modular but using module_param is the easiest way to
@@ -720,6 +721,11 @@
 	 * certain registers might be ignored.
 	 */
 	etm_clr_pwrdwn(drvdata);
+
+	/* check the state of the fuse */
+	if (!coresight_authstatus_enabled(drvdata->base))
+		goto out;
+
 	/*
 	 * Set prog bit. It will be set from reset but this is included to
 	 * ensure it is set
@@ -742,6 +748,7 @@
 
 	etm_set_pwrdwn(drvdata);
 	etm_clr_pwrup(drvdata);
+out:
 	CS_LOCK(drvdata->base);
 }
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a4bf109..95d13a9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,7 @@
 #include <linux/cpu.h>
 #include <linux/coresight.h>
 #include <linux/coresight-pmu.h>
+#include <linux/of.h>
 #include <linux/pm_wakeup.h>
 #include <linux/amba/bus.h>
 #include <linux/seq_file.h>
@@ -38,6 +39,7 @@
 
 #include "coresight-etm4x.h"
 #include "coresight-etm-perf.h"
+#include "coresight-priv.h"
 
 static int boot_enable;
 module_param_named(boot_enable, boot_enable, int, 0444);
@@ -166,12 +168,14 @@
 	writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
 	writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
 
-	/*
-	 * Request to keep the trace unit powered and also
-	 * emulation of powerdown
-	 */
-	writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) | TRCPDCR_PU,
-		       drvdata->base + TRCPDCR);
+	if (!drvdata->tupwr_disable) {
+		/*
+		 * Request to keep the trace unit powered and also
+		 * emulation of powerdown
+		 */
+		writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR)
+				| TRCPDCR_PU, drvdata->base + TRCPDCR);
+	}
 
 	/* Enable the trace unit */
 	writel_relaxed(1, drvdata->base + TRCPRGCTLR);
@@ -312,10 +316,12 @@
 
 	CS_UNLOCK(drvdata->base);
 
-	/* power can be removed from the trace unit now */
-	control = readl_relaxed(drvdata->base + TRCPDCR);
-	control &= ~TRCPDCR_PU;
-	writel_relaxed(control, drvdata->base + TRCPDCR);
+	if (!drvdata->tupwr_disable) {
+		/* power can be removed from the trace unit now */
+		control = readl_relaxed(drvdata->base + TRCPDCR);
+		control &= ~TRCPDCR_PU;
+		writel_relaxed(control, drvdata->base + TRCPDCR);
+	}
 
 	control = readl_relaxed(drvdata->base + TRCPRGCTLR);
 
@@ -436,6 +442,9 @@
 
 	CS_UNLOCK(drvdata->base);
 
+	if (!coresight_authstatus_enabled(drvdata->base))
+		goto out;
+
 	/* find all capabilities of the tracing unit */
 	etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
 
@@ -582,6 +591,8 @@
 	drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
 	/* NUMCNTR, bits[30:28] number of counters available for tracing */
 	drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
+
+out:
 	CS_LOCK(drvdata->base);
 }
 
@@ -1035,6 +1046,9 @@
 
 	etmdrvdata[drvdata->cpu] = drvdata;
 
+	drvdata->tupwr_disable = of_property_read_bool(drvdata->dev->of_node,
+				"qcom,tupwr-disable");
+
 	dev_info(dev, "CPU%d: %s initialized\n",
 			drvdata->cpu, (char *)id->data);
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 4e51ecdc..5f34bdc 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -345,6 +345,7 @@
  * @nooverflow:	Indicate if overflow prevention is supported.
  * @atbtrig:	If the implementation can support ATB triggers
  * @lpoverride:	If the implementation can support low-power state over.
+ * @tupwr_disable:	If disable the support of keeping trace unit powered.
  * @config:	structure holding configuration parameters.
  */
 struct etmv4_drvdata {
@@ -391,6 +392,7 @@
 	bool				nooverflow;
 	bool				atbtrig;
 	bool				lpoverride;
+	bool				tupwr_disable;
 	struct etmv4_config		config;
 };
 
diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c
index a5075ba..340c589 100644
--- a/drivers/hwtracing/coresight/coresight-ost.c
+++ b/drivers/hwtracing/coresight/coresight-ost.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -280,14 +280,13 @@
 
 int stm_set_ost_params(struct stm_drvdata *drvdata, size_t bitmap_size)
 {
-	stmdrvdata = drvdata;
-
 	drvdata->chs.bitmap = devm_kzalloc(drvdata->dev, bitmap_size,
 					   GFP_KERNEL);
 	if (!drvdata->chs.bitmap)
 		return -ENOMEM;
 
 	bitmap_fill(drvdata->entities, OST_ENTITY_MAX);
+	stmdrvdata = drvdata;
 
 	return 0;
 }
diff --git a/drivers/hwtracing/coresight/coresight-remote-etm.c b/drivers/hwtracing/coresight/coresight-remote-etm.c
index 54e897d..f4512e5 100644
--- a/drivers/hwtracing/coresight/coresight-remote-etm.c
+++ b/drivers/hwtracing/coresight/coresight-remote-etm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -362,6 +362,10 @@
 {
 	struct remote_etm_drvdata *drvdata = platform_get_drvdata(pdev);
 
+	qmi_svc_event_notifier_unregister(CORESIGHT_QMI_SVC_ID,
+					  CORESIGHT_QMI_VERSION,
+					  drvdata->inst_id,
+					  &drvdata->nb);
 	coresight_unregister(drvdata->csdev);
 	return 0;
 }
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 159512c..caeda7b 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * Description: CoreSight System Trace Macrocell driver
  *
@@ -839,11 +839,6 @@
 	}
 	bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
 
-	/* Store the driver data pointer for use in exported functions */
-	ret = stm_set_ost_params(drvdata, bitmap_size);
-	if (ret)
-		return ret;
-
 	guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
 	if (!guaranteed)
 		return -ENOMEM;
@@ -872,6 +867,11 @@
 		goto stm_unregister;
 	}
 
+	/* Store the driver data pointer for use in exported functions */
+	ret = stm_set_ost_params(drvdata, bitmap_size);
+	if (ret)
+		goto stm_unregister;
+
 	pm_runtime_put(&adev->dev);
 
 	dev_info(dev, "%s initialized\n", (char *)id->data);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 9d2ab01..e369ea1 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -845,7 +845,6 @@
 	}
 
 	drvdata->enable = true;
-	drvdata->sticky_enable = true;
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 87d9162..8aed31a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -139,7 +139,6 @@
 void tmc_enable_hw(struct tmc_drvdata *drvdata)
 {
 	drvdata->enable = true;
-	drvdata->sticky_enable = true;
 	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 	if (drvdata->force_reg_dump)
 		__tmc_reg_dump(drvdata);
@@ -155,7 +154,7 @@
 {
 	int ret = 0;
 
-	if (!drvdata->sticky_enable)
+	if (!drvdata->enable)
 		return -EPERM;
 
 	switch (drvdata->config_type) {
@@ -733,6 +732,13 @@
 		ret = tmc_etr_bam_init(adev, drvdata);
 		if (ret)
 			goto out;
+		/*
+		 * ETR configuration uses a 40-bit AXI master in place of
+		 * the embedded SRAM of ETB/ETF.
+		 */
+		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+		if (ret)
+			goto out;
 	} else {
 		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
 		desc.ops = &tmc_etf_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 1cf60f3..21c74ec 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -181,8 +181,10 @@
 	if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
 		if (link_ops(csdev)->enable) {
 			ret = link_ops(csdev)->enable(csdev, inport, outport);
-			if (ret)
+			if (ret) {
+				atomic_dec(&csdev->refcnt[refport]);
 				return ret;
+			}
 		}
 	}
 
@@ -263,42 +265,66 @@
 	}
 }
 
-void coresight_disable_path(struct list_head *path)
+static void coresigh_disable_list_node(struct list_head *path,
+					struct coresight_node *nd)
 {
 	u32 type;
-	struct coresight_node *nd;
 	struct coresight_device *csdev, *parent, *child;
 
+	csdev = nd->csdev;
+	type = csdev->type;
+
+	/*
+	 * ETF devices are tricky... They can be a link or a sink,
+	 * depending on how they are configured.  If an ETF has been
+	 * "activated" it will be configured as a sink, otherwise
+	 * go ahead with the link configuration.
+	 */
+	if (type == CORESIGHT_DEV_TYPE_LINKSINK)
+		type = (csdev == coresight_get_sink(path)) ?
+					CORESIGHT_DEV_TYPE_SINK :
+					CORESIGHT_DEV_TYPE_LINK;
+
+	switch (type) {
+	case CORESIGHT_DEV_TYPE_SINK:
+		coresight_disable_sink(csdev);
+		break;
+	case CORESIGHT_DEV_TYPE_SOURCE:
+		/* sources are disabled from either sysFS or Perf */
+		break;
+	case CORESIGHT_DEV_TYPE_LINK:
+		parent = list_prev_entry(nd, link)->csdev;
+		child = list_next_entry(nd, link)->csdev;
+		coresight_disable_link(csdev, parent, child);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * During enabling path, if it is failed, then only those enabled
+ * devices need to be disabled. This function is to disable devices
+ * which is enabled before the failed device.
+ *
+ * @path the head of the list
+ * @nd the failed device node
+ */
+static void coresight_disable_previous_devs(struct list_head *path,
+					struct coresight_node *nd)
+{
+
+	list_for_each_entry_continue(nd, path, link) {
+		coresigh_disable_list_node(path, nd);
+	}
+}
+
+void coresight_disable_path(struct list_head *path)
+{
+	struct coresight_node *nd;
+
 	list_for_each_entry(nd, path, link) {
-		csdev = nd->csdev;
-		type = csdev->type;
-
-		/*
-		 * ETF devices are tricky... They can be a link or a sink,
-		 * depending on how they are configured.  If an ETF has been
-		 * "activated" it will be configured as a sink, otherwise
-		 * go ahead with the link configuration.
-		 */
-		if (type == CORESIGHT_DEV_TYPE_LINKSINK)
-			type = (csdev == coresight_get_sink(path)) ?
-						CORESIGHT_DEV_TYPE_SINK :
-						CORESIGHT_DEV_TYPE_LINK;
-
-		switch (type) {
-		case CORESIGHT_DEV_TYPE_SINK:
-			coresight_disable_sink(csdev);
-			break;
-		case CORESIGHT_DEV_TYPE_SOURCE:
-			/* sources are disabled from either sysFS or Perf */
-			break;
-		case CORESIGHT_DEV_TYPE_LINK:
-			parent = list_prev_entry(nd, link)->csdev;
-			child = list_next_entry(nd, link)->csdev;
-			coresight_disable_link(csdev, parent, child);
-			break;
-		default:
-			break;
-		}
+		coresigh_disable_list_node(path, nd);
 	}
 }
 
@@ -349,7 +375,7 @@
 out:
 	return ret;
 err:
-	coresight_disable_path(path);
+	coresight_disable_previous_devs(path, nd);
 	goto out;
 }
 
@@ -589,6 +615,9 @@
 	int ret = 0;
 	struct coresight_device *sink;
 	struct list_head *path;
+	enum coresight_dev_subtype_source subtype;
+
+	subtype = csdev->subtype.source_subtype;
 
 	mutex_lock(&coresight_mutex);
 
@@ -596,8 +625,16 @@
 	if (ret)
 		goto out;
 
-	if (csdev->enable)
+	if (csdev->enable) {
+		/*
+		 * There could be multiple applications driving the software
+		 * source. So keep the refcount for each such user when the
+		 * source is already enabled.
+		 */
+		if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
+			atomic_inc(csdev->refcnt);
 		goto out;
+	}
 
 	/*
 	 * Search for a valid sink for this session but don't reset the
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 2dd60ee..b8e2992 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -174,8 +174,9 @@
 {
 	struct stp_master *master;
 	size_t size;
+	unsigned long align = sizeof(unsigned long);
 
-	size = ALIGN(stm->data->sw_nchannels, 8) / 8;
+	size = ALIGN(stm->data->sw_nchannels, align) / align;
 	size += sizeof(struct stp_master);
 	master = kzalloc(size, GFP_ATOMIC);
 	if (!master)
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 340e037..884c1ec 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -507,7 +507,10 @@
 	i2c_dw_disable_int(dev);
 
 	/* Enable the adapter */
-	__i2c_dw_enable_and_wait(dev, true);
+	__i2c_dw_enable(dev, true);
+
+	/* Dummy read to avoid the register getting stuck on Bay Trail */
+	dw_readl(dev, DW_IC_ENABLE_STATUS);
 
 	/* Clear and enable interrupts */
 	dw_readl(dev, DW_IC_CLR_INTR);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e6fe21a..b32bf7e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -243,6 +243,7 @@
 	struct i2c_adapter adapter;
 	unsigned long smba;
 	unsigned char original_hstcfg;
+	unsigned char original_slvcmd;
 	struct pci_dev *pci_dev;
 	unsigned int features;
 
@@ -962,13 +963,24 @@
 	if (!priv->host_notify)
 		return -ENOMEM;
 
-	outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
+	if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
+		outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
+		       SMBSLVCMD(priv));
+
 	/* clear Host Notify bit to allow a new notification */
 	outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
 
 	return 0;
 }
 
+static void i801_disable_host_notify(struct i801_priv *priv)
+{
+	if (!(priv->features & FEATURE_HOST_NOTIFY))
+		return;
+
+	outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
+}
+
 static const struct i2c_algorithm smbus_algorithm = {
 	.smbus_xfer	= i801_access,
 	.functionality	= i801_func,
@@ -1589,6 +1601,10 @@
 		outb_p(inb_p(SMBAUXCTL(priv)) &
 		       ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
 
+	/* Remember original Host Notify setting */
+	if (priv->features & FEATURE_HOST_NOTIFY)
+		priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+
 	/* Default timeout in interrupt mode: 200 ms */
 	priv->adapter.timeout = HZ / 5;
 
@@ -1666,6 +1682,7 @@
 	pm_runtime_forbid(&dev->dev);
 	pm_runtime_get_noresume(&dev->dev);
 
+	i801_disable_host_notify(priv);
 	i801_del_mux(priv);
 	i2c_del_adapter(&priv->adapter);
 	i801_acpi_remove(priv);
@@ -1679,6 +1696,15 @@
 	 */
 }
 
+static void i801_shutdown(struct pci_dev *dev)
+{
+	struct i801_priv *priv = pci_get_drvdata(dev);
+
+	/* Restore config registers to avoid hard hang on some systems */
+	i801_disable_host_notify(priv);
+	pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+}
+
 #ifdef CONFIG_PM
 static int i801_suspend(struct device *dev)
 {
@@ -1711,6 +1737,7 @@
 	.id_table	= i801_ids,
 	.probe		= i801_probe,
 	.remove		= i801_remove,
+	.shutdown	= i801_shutdown,
 	.driver		= {
 		.pm	= &i801_pm_ops,
 	},
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index b4dec08..5c9dea7 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -848,12 +848,16 @@
 	 */
 	if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
 		drv_data->offload_enabled = true;
-		drv_data->errata_delay = true;
+		/* The delay is only needed in standard mode (100kHz) */
+		if (bus_freq <= 100000)
+			drv_data->errata_delay = true;
 	}
 
 	if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
 		drv_data->offload_enabled = false;
-		drv_data->errata_delay = true;
+		/* The delay is only needed in standard mode (100kHz) */
+		if (bus_freq <= 100000)
+			drv_data->errata_delay = true;
 	}
 
 	if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index bc4af98..c712866 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -799,6 +799,7 @@
 		return ret;
 	}
 
+	gi2c->i2c_rsc.ctrl_dev = gi2c->dev;
 	gi2c->i2c_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
 	if (IS_ERR(gi2c->i2c_rsc.se_clk)) {
 		ret = PTR_ERR(gi2c->i2c_rsc.se_clk);
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index c6a90b4..91b10af 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -196,20 +196,25 @@
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 		mux->data.reg_size = resource_size(res);
 		mux->data.reg = devm_ioremap_resource(&pdev->dev, res);
-		if (IS_ERR(mux->data.reg))
-			return PTR_ERR(mux->data.reg);
+		if (IS_ERR(mux->data.reg)) {
+			ret = PTR_ERR(mux->data.reg);
+			goto err_put_parent;
+		}
 	}
 
 	if (mux->data.reg_size != 4 && mux->data.reg_size != 2 &&
 	    mux->data.reg_size != 1) {
 		dev_err(&pdev->dev, "Invalid register size\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto err_put_parent;
 	}
 
 	muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
 			     i2c_mux_reg_select, NULL);
-	if (!muxc)
-		return -ENOMEM;
+	if (!muxc) {
+		ret = -ENOMEM;
+		goto err_put_parent;
+	}
 	muxc->priv = mux;
 
 	platform_set_drvdata(pdev, muxc);
@@ -235,6 +240,8 @@
 
 add_adapter_failed:
 	i2c_mux_del_adapters(muxc);
+err_put_parent:
+	i2c_put_adapter(parent);
 
 	return ret;
 }
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index bf9a2ad..883fe2c 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1593,6 +1593,8 @@
 	struct cdrom_info *info;
 	int rc = -ENXIO;
 
+	check_disk_change(bdev);
+
 	mutex_lock(&ide_cd_mutex);
 	info = ide_cd_get(bdev->bd_disk);
 	if (!info)
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 678e8c7..fec696e 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -121,10 +121,21 @@
 				     enum iio_event_direction dir, int state)
 {
 	struct hi8435_priv *priv = iio_priv(idev);
+	int ret;
+	u32 tmp;
 
-	priv->event_scan_mask &= ~BIT(chan->channel);
-	if (state)
+	if (state) {
+		ret = hi8435_readl(priv, HI8435_SO31_0_REG, &tmp);
+		if (ret < 0)
+			return ret;
+		if (tmp & BIT(chan->channel))
+			priv->event_prev_val |= BIT(chan->channel);
+		else
+			priv->event_prev_val &= ~BIT(chan->channel);
+
 		priv->event_scan_mask |= BIT(chan->channel);
+	} else
+		priv->event_scan_mask &= ~BIT(chan->channel);
 
 	return 0;
 }
@@ -442,13 +453,15 @@
 	priv->spi = spi;
 
 	reset_gpio = devm_gpiod_get(&spi->dev, NULL, GPIOD_OUT_LOW);
-	if (IS_ERR(reset_gpio)) {
-		/* chip s/w reset if h/w reset failed */
+	if (!IS_ERR(reset_gpio)) {
+		/* need >=100ns low pulse to reset chip */
+		gpiod_set_raw_value_cansleep(reset_gpio, 0);
+		udelay(1);
+		gpiod_set_raw_value_cansleep(reset_gpio, 1);
+	} else {
+		/* s/w reset chip if h/w reset is not available */
 		hi8435_writeb(priv, HI8435_CTRL_REG, HI8435_CTRL_SRST);
 		hi8435_writeb(priv, HI8435_CTRL_REG, 0);
-	} else {
-		udelay(5);
-		gpiod_set_value(reset_gpio, 1);
 	}
 
 	spi_set_drvdata(spi, idev);
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 1f1ad41..0a8b763 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -39,6 +39,7 @@
 	  be called kmx61.
 
 source "drivers/iio/imu/inv_mpu6050/Kconfig"
+source "drivers/iio/imu/inv_icm20602/Kconfig"
 
 endmenu
 
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index c71bcd3..fab6a5e 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -15,5 +15,6 @@
 
 obj-y += bmi160/
 obj-y += inv_mpu6050/
+obj-y += inv_icm20602/
 
 obj-$(CONFIG_KMX61) += kmx61.o
diff --git a/drivers/iio/imu/inv_icm20602/Kconfig b/drivers/iio/imu/inv_icm20602/Kconfig
new file mode 100644
index 0000000..fa88689
--- /dev/null
+++ b/drivers/iio/imu/inv_icm20602/Kconfig
@@ -0,0 +1,14 @@
+#
+# inv-icm20602 drivers for Invensense MPU devices and combos
+#
+config INV_ICM20602_IIO
+	tristate "Invensense ICM20602 devices"
+	depends on I2C && SYSFS
+	select IIO_BUFFER
+	select IIO_BUFFER_CB
+	select IIO_TRIGGERED_BUFFER
+	help
+	  This driver supports the Invensense ICM20602 devices.
+	  It is a gyroscope/accelerometer combo device.
+	  This driver can be built as a module. The module will be called
+	  inv-icm20602.
diff --git a/drivers/iio/imu/inv_icm20602/Makefile b/drivers/iio/imu/inv_icm20602/Makefile
new file mode 100644
index 0000000..d60c10a
--- /dev/null
+++ b/drivers/iio/imu/inv_icm20602/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Invensense icm20602 device.
+#
+
+obj-$(CONFIG_INV_ICM20602_IIO) += inv-icm20602.o
+inv-icm20602-objs := inv_icm20602_core.o inv_icm20602_ring.o inv_icm20602_trigger.o inv_icm20602_bsp.o
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
new file mode 100644
index 0000000..03e9670
--- /dev/null
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
@@ -0,0 +1,961 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include "inv_icm20602_bsp.h"
+#include "inv_icm20602_iio.h"
+
+#define icm20602_init_reg_addr(head, tail, reg_map) { \
+			enum inv_icm20602_reg_addr i;\
+			for (i = head; i <= tail; i++) {\
+				reg_map->address = i;\
+				reg_map->reg_u.reg = 0x0;\
+				reg_map++;\
+			} \
+}
+
+#define icm20602_write_reg_simple(st, register) \
+		icm20602_write_reg(st, \
+		register.address, \
+		register.reg_u.reg)
+
+static struct inv_icm20602_reg_map reg_set_20602;
+
+int icm20602_init_reg_map(void)
+{
+	struct struct_XG_OFFS_TC_H *reg_map = &(reg_set_20602.XG_OFFS_TC_H);
+
+	icm20602_init_reg_addr(ADDR_XG_OFFS_TC_H,
+			ADDR_XG_OFFS_TC_L, reg_map);
+
+	icm20602_init_reg_addr(ADDR_YG_OFFS_TC_H,
+			ADDR_YG_OFFS_TC_L, reg_map);
+
+	icm20602_init_reg_addr(ADDR_ZG_OFFS_TC_H,
+			ADDR_ZG_OFFS_TC_L, reg_map);
+
+	icm20602_init_reg_addr(ADDR_SELF_TEST_X_ACCEL,
+			ADDR_SELF_TEST_Z_ACCEL, reg_map);
+
+	icm20602_init_reg_addr(ADDR_XG_OFFS_USRH,
+			ADDR_LP_MODE_CFG, reg_map);
+
+	icm20602_init_reg_addr(ADDR_ACCEL_WOM_X_THR,
+			ADDR_FIFO_EN, reg_map);
+
+	icm20602_init_reg_addr(ADDR_FSYNC_INT,
+			ADDR_GYRO_ZOUT_L, reg_map);
+
+	icm20602_init_reg_addr(ADDR_SELF_TEST_X_GYRO,
+			ADDR_SELF_TEST_Z_GYRO, reg_map);
+
+	icm20602_init_reg_addr(ADDR_FIFO_WM_TH1,
+			ADDR_FIFO_WM_TH2, reg_map);
+
+	icm20602_init_reg_addr(ADDR_SIGNAL_PATH_RESET,
+			ADDR_PWR_MGMT_2, reg_map);
+
+	icm20602_init_reg_addr(ADDR_I2C_IF,
+			ADDR_I2C_IF, reg_map);
+
+	icm20602_init_reg_addr(ADDR_FIFO_COUNTH,
+			ADDR_XA_OFFSET_L, reg_map);
+
+	icm20602_init_reg_addr(ADDR_YA_OFFSET_H,
+			ADDR_YA_OFFSET_L, reg_map);
+
+	icm20602_init_reg_addr(ADDR_ZA_OFFSET_H,
+			ADDR_ZA_OFFSET_L, reg_map);
+
+	return MPU_SUCCESS;
+}
+
+#define W_FLG	0
+#define R_FLG	1
+int icm20602_bulk_read(struct inv_icm20602_state *st,
+			int reg, u8 *buf, int size)
+{
+	int result = MPU_SUCCESS;
+	char tx_buf[2] = {0x0, 0x0};
+	u8 *tmp_buf = buf;
+	struct i2c_msg msg[2];
+
+	if (!st || !buf)
+		return -MPU_FAIL;
+
+	if (st->interface == ICM20602_SPI) {
+		tx_buf[0] = ICM20602_READ_REG(reg);
+		result = spi_write_then_read(st->spi, &tx_buf[0],
+		1, tmp_buf, size);
+		if (result) {
+			pr_err("mpu read reg %u failed, rc %d\n",
+			reg, result);
+			result = -MPU_READ_FAIL;
+		}
+	} else {
+		result = size;
+#ifdef ICM20602_I2C_SMBUS
+		result += i2c_smbus_read_i2c_block_data(st->client,
+		reg, size, tmp_buf);
+#else
+		tx_buf[0] = reg;
+		msg[0].addr = st->client->addr;
+		msg[0].flags = W_FLG;
+		msg[0].len = 1;
+		msg[0].buf = tx_buf;
+
+		msg[1].addr = st->client->addr;
+		msg[1].flags = I2C_M_RD;
+		msg[1].len = size;
+		msg[1].buf = tmp_buf;
+		i2c_transfer(st->client->adapter, msg, ARRAY_SIZE(msg));
+#endif
+	}
+	return result;
+}
+
+static int icm20602_write_reg(struct inv_icm20602_state *st,
+			 uint8_t reg, uint8_t val)
+{
+	int result = MPU_SUCCESS;
+	char txbuf[2] = {0x0, 0x0};
+	struct i2c_msg msg[1];
+
+	if (st->interface == ICM20602_SPI) {
+		txbuf[0] = ICM20602_WRITE_REG(reg);
+		txbuf[1] = val;
+		result = spi_write_then_read(st->spi, &txbuf[0], 2, NULL, 0);
+		if (result) {
+			pr_err("mpu write reg %u failed, rc %d\n",
+						reg, val);
+			result = -MPU_READ_FAIL;
+		}
+	} else if (st->interface == ICM20602_I2C) {
+#ifdef ICM20602_I2C_SMBUS
+		result = i2c_smbus_write_i2c_block_data(st->client,
+						reg, 1, &val);
+#else
+		txbuf[0] = reg;
+		txbuf[1] = val;
+		msg[0].addr = st->client->addr;
+		msg[0].flags = I2C_M_IGNORE_NAK;
+		msg[0].len = 2;
+		msg[0].buf = txbuf;
+
+		i2c_transfer(st->client->adapter, msg, ARRAY_SIZE(msg));
+#endif
+	}
+
+	return result;
+}
+
+static int icm20602_read_reg(struct inv_icm20602_state *st,
+				uint8_t reg, uint8_t *val)
+{
+	int result = MPU_SUCCESS;
+	char txbuf[1] = {0x0};
+	char rxbuf[1] = {0x0};
+	struct i2c_msg msg[2];
+
+	if (st->interface == ICM20602_SPI) {
+		txbuf[0] = ICM20602_READ_REG(reg);
+		result = spi_write_then_read(st->spi,
+						&txbuf[0], 1, rxbuf, 1);
+		if (result) {
+			pr_err("mpu read reg %u failed, rc %d\n",
+						reg, result);
+			result = -MPU_READ_FAIL;
+		}
+	} else if (st->interface == ICM20602_I2C) {
+#ifdef ICM20602_I2C_SMBUS
+		result = i2c_smbus_read_i2c_block_data(st->client,
+						reg, 1, rxbuf);
+		if (result != 1) {
+			pr_err("mpu read reg %u failed, rc %d\n",
+						reg, result);
+			result = -MPU_READ_FAIL;
+		}
+#else
+		txbuf[0] = reg;
+		msg[0].addr = st->client->addr;
+		msg[0].flags = W_FLG;
+		msg[0].len = 1;
+		msg[0].buf = txbuf;
+
+		msg[1].addr = st->client->addr;
+		msg[1].flags = I2C_M_RD;
+		msg[1].len = 1;
+		msg[1].buf = rxbuf;
+
+		i2c_transfer(st->client->adapter, msg, ARRAY_SIZE(msg));
+#endif
+	}
+	*val = rxbuf[0];
+
+	return result;
+}
+
+#define combine_8_to_16(upper, lower) ((upper << 8) | lower)
+
+int icm20602_read_raw(struct inv_icm20602_state *st,
+		struct struct_icm20602_real_data *real_data, uint32_t type)
+{
+	struct struct_icm20602_raw_data raw_data;
+
+	if ((type & ACCEL) != 0) {
+		icm20602_read_reg(st,
+			reg_set_20602.ACCEL_XOUT_H.address,
+			&raw_data.ACCEL_XOUT_H);
+		icm20602_read_reg(st,
+			reg_set_20602.ACCEL_XOUT_L.address,
+			&raw_data.ACCEL_XOUT_L);
+		real_data->ACCEL_XOUT =
+			combine_8_to_16(raw_data.ACCEL_XOUT_H,
+			raw_data.ACCEL_XOUT_L);
+
+		icm20602_read_reg(st,
+			reg_set_20602.ACCEL_YOUT_H.address,
+			&raw_data.ACCEL_YOUT_H);
+		icm20602_read_reg(st,
+			reg_set_20602.ACCEL_YOUT_L.address,
+			&raw_data.ACCEL_YOUT_L);
+		real_data->ACCEL_YOUT =
+			combine_8_to_16(raw_data.ACCEL_YOUT_H,
+			raw_data.ACCEL_YOUT_L);
+
+		icm20602_read_reg(st,
+			reg_set_20602.ACCEL_ZOUT_H.address,
+			&raw_data.ACCEL_ZOUT_H);
+		icm20602_read_reg(st,
+			reg_set_20602.ACCEL_ZOUT_L.address,
+			&raw_data.ACCEL_ZOUT_L);
+		real_data->ACCEL_ZOUT =
+			combine_8_to_16(raw_data.ACCEL_ZOUT_H,
+			raw_data.ACCEL_ZOUT_L);
+	}
+
+	if ((type & GYRO) != 0) {
+		icm20602_read_reg(st,
+			reg_set_20602.GYRO_XOUT_H.address,
+			&raw_data.GYRO_XOUT_H);
+		icm20602_read_reg(st,
+			reg_set_20602.GYRO_XOUT_L.address,
+			&raw_data.GYRO_XOUT_L);
+		real_data->GYRO_XOUT =
+			combine_8_to_16(raw_data.GYRO_XOUT_H,
+			raw_data.GYRO_XOUT_L);
+
+		icm20602_read_reg(st,
+			reg_set_20602.GYRO_YOUT_H.address,
+			&raw_data.GYRO_YOUT_H);
+		icm20602_read_reg(st,
+			reg_set_20602.GYRO_YOUT_L.address,
+			&raw_data.GYRO_YOUT_L);
+		real_data->GYRO_YOUT =
+			combine_8_to_16(raw_data.GYRO_YOUT_H,
+			raw_data.GYRO_YOUT_L);
+
+		icm20602_read_reg(st,
+			reg_set_20602.GYRO_ZOUT_H.address,
+			&raw_data.GYRO_ZOUT_H);
+		icm20602_read_reg(st,
+			reg_set_20602.GYRO_ZOUT_L.address,
+			&raw_data.GYRO_ZOUT_L);
+		real_data->GYRO_ZOUT =
+			combine_8_to_16(raw_data.GYRO_ZOUT_H,
+			raw_data.GYRO_ZOUT_L);
+	}
+
+	return MPU_SUCCESS;
+}
+
+int icm20602_read_fifo(struct inv_icm20602_state *st,
+					void *buf, const int size)
+{
+	return icm20602_bulk_read(st,
+		reg_set_20602.FIFO_R_W.address, buf, size);
+}
+
+int icm20602_start_fifo(struct inv_icm20602_state *st)
+{
+	struct icm20602_user_config *config = NULL;
+
+	config = st->config;
+
+	/* enable fifo */
+	if (config->fifo_enabled) {
+		reg_set_20602.USER_CTRL.reg_u.REG.FIFO_EN = 0x1;
+		if (icm20602_write_reg_simple(st,
+						reg_set_20602.USER_CTRL)) {
+			pr_err("icm20602 start fifo failed\n");
+			return -MPU_FAIL;
+		}
+
+		/* enable interrupt, need to test */
+		reg_set_20602.INT_ENABLE.reg_u.REG.FIFO_OFLOW_EN = 0x1;
+		reg_set_20602.INT_ENABLE.reg_u.REG.DATA_RDY_INT_EN = 0x0;
+		if (icm20602_write_reg_simple(st,
+						reg_set_20602.INT_ENABLE)) {
+			pr_err("icm20602 set FIFO_OFLOW_EN failed\n");
+			return -MPU_FAIL;
+		}
+	}
+
+	return MPU_SUCCESS;
+}
+
+int icm20602_stop_fifo(struct inv_icm20602_state *st)
+{
+	struct icm20602_user_config *config = NULL;
+
+	config = st->config;
+	/* disable fifo */
+	if (config->fifo_enabled) {
+		reg_set_20602.USER_CTRL.reg_u.REG.FIFO_EN = 0x0;
+		reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x1;
+		if (icm20602_write_reg_simple(st,
+				reg_set_20602.USER_CTRL)) {
+			reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+			pr_err("icm20602 stop fifo failed\n");
+			return -MPU_FAIL;
+		}
+		reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+	}
+
+	return MPU_SUCCESS;
+}
+
+int icm20602_int_status(struct inv_icm20602_state *st,
+	u8 *int_status)
+{
+	return icm20602_read_reg(st,
+		reg_set_20602.INT_STATUS.address, int_status);
+}
+
+int icm20602_int_wm_status(struct inv_icm20602_state *st,
+	u8 *int_status)
+{
+	return icm20602_read_reg(st,
+		reg_set_20602.FIFO_WM_INT_STATUS.address, int_status);
+}
+
+int icm20602_fifo_count(struct inv_icm20602_state *st,
+	u16 *fifo_count)
+{
+	u8 count_h, count_l;
+
+	*fifo_count = 0;
+	icm20602_read_reg(st, reg_set_20602.FIFO_COUNTH.address, &count_h);
+	icm20602_read_reg(st, reg_set_20602.FIFO_COUNTL.address, &count_l);
+	*fifo_count |= (count_h << 8);
+	*fifo_count |= count_l;
+	return MPU_SUCCESS;
+}
+
+static int icm20602_config_waterlevel(struct inv_icm20602_state *st)
+{
+	struct icm20602_user_config *config = NULL;
+	uint8_t val = 0;
+
+	config = st->config;
+	if (config->fifo_enabled != true)
+		return MPU_SUCCESS;
+	/* config waterlevel as the fps need */
+	config->fifo_waterlevel = (config->user_fps_in_ms /
+				(1000 / config->gyro_accel_sample_rate))
+				*ICM20602_PACKAGE_SIZE;
+
+	if (config->fifo_waterlevel > 1023 ||
+		config->fifo_waterlevel/50 >
+		(1023-config->fifo_waterlevel)/ICM20602_PACKAGE_SIZE) {
+		pr_err("set fifo_waterlevel failed %d\n",
+					config->fifo_waterlevel);
+		return MPU_FAIL;
+	}
+	reg_set_20602.FIFO_WM_TH1.reg_u.reg =
+				(config->fifo_waterlevel & 0xff00) >> 8;
+	reg_set_20602.FIFO_WM_TH2.reg_u.reg =
+				(config->fifo_waterlevel & 0x00ff);
+
+	icm20602_write_reg_simple(st, reg_set_20602.FIFO_WM_TH1);
+	icm20602_write_reg_simple(st, reg_set_20602.FIFO_WM_TH2);
+	icm20602_read_reg(st, reg_set_20602.FIFO_WM_TH1.address, &val);
+	icm20602_read_reg(st, reg_set_20602.FIFO_WM_TH2.address, &val);
+
+	return MPU_SUCCESS;
+}
+
+static int icm20602_read_ST_code(struct inv_icm20602_state *st)
+{
+	struct icm20602_user_config *config = NULL;
+	int result = 0;
+
+	config = st->config;
+	result |= icm20602_read_reg(st, reg_set_20602.SELF_TEST_X_ACCEL.address,
+		&(config->acc_self_test.X));
+	result |= icm20602_read_reg(st, reg_set_20602.SELF_TEST_Y_ACCEL.address,
+		&(config->acc_self_test.Y));
+	result |= icm20602_read_reg(st, reg_set_20602.SELF_TEST_Z_ACCEL.address,
+		&(config->acc_self_test.Z));
+
+	result |= icm20602_read_reg(st, reg_set_20602.SELF_TEST_X_GYRO.address,
+		&(config->gyro_self_test.X));
+	result |= icm20602_read_reg(st, reg_set_20602.SELF_TEST_Y_GYRO.address,
+		&(config->gyro_self_test.Y));
+	result |= icm20602_read_reg(st, reg_set_20602.SELF_TEST_Z_GYRO.address,
+		&(config->gyro_self_test.Z));
+
+	return result;
+}
+
+static int icm20602_set_self_test(struct inv_icm20602_state *st)
+{
+	int result = 0;
+
+	reg_set_20602.SMPLRT_DIV.reg_u.REG.SMPLRT_DIV = 0;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.SMPLRT_DIV);
+
+	reg_set_20602.CONFIG.reg_u.REG.DLFP_CFG = INV_ICM20602_GYRO_LFP_92HZ;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.CONFIG);
+
+	reg_set_20602.GYRO_CONFIG.reg_u.REG.FCHOICE_B = 0x0;
+	reg_set_20602.GYRO_CONFIG.reg_u.REG.FS_SEL = ICM20602_GYRO_FSR_250DPS;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.GYRO_CONFIG);
+
+	reg_set_20602.ACCEL_CONFIG2.reg_u.REG.A_DLPF_CFG = ICM20602_ACCLFP_99;
+	reg_set_20602.ACCEL_CONFIG2.reg_u.REG.ACCEL_FCHOICE_B = 0X0;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.ACCEL_CONFIG2);
+
+	reg_set_20602.ACCEL_CONFIG.reg_u.REG.ACCEL_FS_SEL = ICM20602_ACC_FSR_2G;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.ACCEL_CONFIG);
+
+	icm20602_read_ST_code(st);
+
+	return 0;
+}
+
+static int icm20602_do_test_acc(struct inv_icm20602_state *st,
+	struct X_Y_Z *acc, struct X_Y_Z *acc_st)
+{
+	struct struct_icm20602_real_data *real_data =
+		kmalloc(sizeof(struct inv_icm20602_state), GFP_ATOMIC);
+	int i;
+
+	for (i = 0; i < SELFTEST_COUNT; i++) {
+		icm20602_read_raw(st, real_data, ACCEL);
+		acc->X += real_data->ACCEL_XOUT;
+		acc->Y += real_data->ACCEL_YOUT;
+		acc->Z += real_data->ACCEL_ZOUT;
+		usleep_range(1000, 1001);
+	}
+	acc->X /= SELFTEST_COUNT;
+	acc->X *= ST_PRECISION;
+
+	acc->Y /= SELFTEST_COUNT;
+	acc->Y *= ST_PRECISION;
+
+	acc->Z /= SELFTEST_COUNT;
+	acc->Z *= ST_PRECISION;
+
+	reg_set_20602.ACCEL_CONFIG.reg_u.REG.XG_ST = 0x1;
+	reg_set_20602.ACCEL_CONFIG.reg_u.REG.YG_ST = 0x1;
+	reg_set_20602.ACCEL_CONFIG.reg_u.REG.ZG_ST = 0x1;
+	icm20602_write_reg_simple(st, reg_set_20602.ACCEL_CONFIG);
+
+	for (i = 0; i < SELFTEST_COUNT; i++) {
+		icm20602_read_raw(st, real_data, ACCEL);
+		acc_st->X += real_data->ACCEL_XOUT;
+		acc_st->Y += real_data->ACCEL_YOUT;
+		acc_st->Z += real_data->ACCEL_ZOUT;
+		usleep_range(1000, 1001);
+	}
+	acc_st->X /= SELFTEST_COUNT;
+	acc_st->X *= ST_PRECISION;
+
+	acc_st->Y /= SELFTEST_COUNT;
+	acc_st->Y *= ST_PRECISION;
+
+	acc_st->Z /= SELFTEST_COUNT;
+	acc_st->Z *= ST_PRECISION;
+
+	return MPU_SUCCESS;
+}
+
+static int icm20602_do_test_gyro(struct inv_icm20602_state *st,
+	struct X_Y_Z *gyro, struct X_Y_Z *gyro_st)
+{
+	struct struct_icm20602_real_data *real_data =
+		kmalloc(sizeof(struct inv_icm20602_state), GFP_ATOMIC);
+	int i;
+
+	for (i = 0; i < SELFTEST_COUNT; i++) {
+		icm20602_read_raw(st, real_data, GYRO);
+		gyro->X += real_data->GYRO_XOUT;
+		gyro->Y += real_data->GYRO_YOUT;
+		gyro->Z += real_data->GYRO_ZOUT;
+		usleep_range(1000, 1001);
+	}
+	gyro->X /= SELFTEST_COUNT;
+	gyro->X *= ST_PRECISION;
+
+	gyro->Y /= SELFTEST_COUNT;
+	gyro->Y *= ST_PRECISION;
+
+	gyro->Z /= SELFTEST_COUNT;
+	gyro->Z *= ST_PRECISION;
+
+	reg_set_20602.GYRO_CONFIG.reg_u.REG.XG_ST = 0x1;
+	reg_set_20602.GYRO_CONFIG.reg_u.REG.YG_ST = 0x1;
+	reg_set_20602.GYRO_CONFIG.reg_u.REG.ZG_ST = 0x1;
+	icm20602_write_reg_simple(st, reg_set_20602.GYRO_CONFIG);
+
+	for (i = 0; i < SELFTEST_COUNT; i++) {
+		icm20602_read_raw(st, real_data, ACCEL);
+		gyro_st->X += real_data->GYRO_XOUT;
+		gyro_st->Y += real_data->GYRO_YOUT;
+		gyro_st->Z += real_data->GYRO_ZOUT;
+		usleep_range(1000, 1001);
+	}
+	gyro_st->X /= SELFTEST_COUNT;
+	gyro_st->X *= ST_PRECISION;
+
+	gyro_st->Y /= SELFTEST_COUNT;
+	gyro_st->Y *= ST_PRECISION;
+
+	gyro_st->Z /= SELFTEST_COUNT;
+	gyro_st->Z *= ST_PRECISION;
+
+	return MPU_SUCCESS;
+}
+
+static bool icm20602_check_acc_selftest(struct inv_icm20602_state *st,
+	struct X_Y_Z *acc, struct X_Y_Z *acc_st)
+{
+	struct X_Y_Z acc_ST_code, st_otp, st_shift_cust;
+	bool otp_value_zero = false, test_result = true;
+
+	acc_ST_code.X = st->config->acc_self_test.X;
+	acc_ST_code.Y = st->config->acc_self_test.Y;
+	acc_ST_code.Z = st->config->acc_self_test.Z;
+
+	st_otp.X = (st_otp.X != 0) ? mpu_st_tb[acc_ST_code.X - 1] : 0;
+	st_otp.Y = (st_otp.Y != 0) ? mpu_st_tb[acc_ST_code.Y - 1] : 0;
+	st_otp.Z = (st_otp.Z != 0) ? mpu_st_tb[acc_ST_code.Z - 1] : 0;
+
+	if ((st_otp.X & st_otp.Y & st_otp.Z) == 0)
+		otp_value_zero = true;
+
+	st_shift_cust.X = acc_st->X - acc->X;
+	st_shift_cust.Y = acc_st->X - acc->Y;
+	st_shift_cust.Z = acc_st->X - acc->Z;
+	if (!otp_value_zero) {
+		if (
+		st_shift_cust.X <
+		(st_otp.X * ST_PRECISION * ACC_ST_SHIFT_MIN / 100) ||
+		st_shift_cust.Y <
+		(st_otp.Y * ST_PRECISION * ACC_ST_SHIFT_MIN / 100) ||
+		st_shift_cust.Z <
+		(st_otp.Z * ST_PRECISION * ACC_ST_SHIFT_MIN / 100) ||
+
+		st_shift_cust.X >
+		(st_otp.X * ST_PRECISION * ACC_ST_SHIFT_MAX / 100) ||
+		st_shift_cust.Y >
+		(st_otp.Y * ST_PRECISION * ACC_ST_SHIFT_MAX / 100) ||
+		st_shift_cust.Z >
+		(st_otp.Z * ST_PRECISION * ACC_ST_SHIFT_MAX / 100)
+		) {
+			test_result = false;
+		}
+	} else {
+		if (
+		abs(st_shift_cust.X) <
+		(ACC_ST_AL_MIN * 16384 / 1000 * ST_PRECISION) ||
+		abs(st_shift_cust.Y) <
+		(ACC_ST_AL_MIN * 16384 / 1000 * ST_PRECISION) ||
+		abs(st_shift_cust.Z) <
+		(ACC_ST_AL_MIN * 16384 / 1000 * ST_PRECISION)  ||
+
+		abs(st_shift_cust.X) >
+		(ACC_ST_AL_MAX * 16384 / 1000 * ST_PRECISION) ||
+		abs(st_shift_cust.Y) >
+		(ACC_ST_AL_MAX * 16384 / 1000 * ST_PRECISION) ||
+		abs(st_shift_cust.Z) >
+		(ACC_ST_AL_MAX * 16384 / 1000 * ST_PRECISION)
+		) {
+			test_result = false;
+		}
+	}
+
+	return test_result;
+}
+
+static int icm20602_check_gyro_selftest(struct inv_icm20602_state *st,
+	struct X_Y_Z *gyro, struct X_Y_Z *gyro_st)
+{
+	struct X_Y_Z gyro_ST_code, st_otp, st_shift_cust;
+	bool otp_value_zero = false, test_result = true;
+
+	gyro_ST_code.X = st->config->gyro_self_test.X;
+	gyro_ST_code.Y = st->config->gyro_self_test.Y;
+	gyro_ST_code.Z = st->config->gyro_self_test.Z;
+
+	st_otp.X = (gyro_ST_code.X != 0) ? mpu_st_tb[gyro_ST_code.X - 1] : 0;
+	st_otp.Y = (gyro_ST_code.Y != 0) ? mpu_st_tb[gyro_ST_code.Y - 1] : 0;
+	st_otp.Z = (gyro_ST_code.Z != 0) ? mpu_st_tb[gyro_ST_code.Z - 1] : 0;
+
+	if ((st_otp.X & st_otp.Y & st_otp.Z) == 0)
+		otp_value_zero = true;
+
+	st_shift_cust.X = gyro_st->X - gyro->X;
+	st_shift_cust.Y = gyro_st->X - gyro->Y;
+	st_shift_cust.Z = gyro_st->X - gyro->Z;
+	if (!otp_value_zero) {
+		if (
+		st_shift_cust.X <
+		(st_otp.X * ST_PRECISION * GYRO_ST_SHIFT / 100) ||
+		st_shift_cust.Y <
+		(st_otp.Y * ST_PRECISION * GYRO_ST_SHIFT / 100) ||
+		st_shift_cust.Z <
+		(st_otp.Z * ST_PRECISION * GYRO_ST_SHIFT / 100)
+		) {
+			test_result = false;
+		}
+	} else {
+		if (
+		abs(st_shift_cust.X) <
+		(GYRO_ST_AL * 32768 / 250 * ST_PRECISION) ||
+		abs(st_shift_cust.Y) <
+		(GYRO_ST_AL * 32768 / 250 * ST_PRECISION) ||
+		abs(st_shift_cust.Z) <
+		(GYRO_ST_AL * 32768 / 250 * ST_PRECISION)
+		) {
+			test_result = false;
+		}
+	}
+
+	if (test_result == true) {
+		/* Self Test Pass/Fail Criteria C */
+		if (
+		abs(st_shift_cust.X) >
+		GYRO_OFFSET_MAX * 32768 / 250 * ST_PRECISION ||
+		abs(st_shift_cust.Y) >
+		GYRO_OFFSET_MAX * 32768 / 250 * ST_PRECISION ||
+		abs(st_shift_cust.Z) >
+		GYRO_OFFSET_MAX * 32768 / 250 * ST_PRECISION
+		) {
+			test_result = false;
+		}
+	}
+
+	return test_result;
+}
+
+bool icm20602_self_test(struct inv_icm20602_state *st)
+{
+	struct X_Y_Z acc, acc_st;
+	struct X_Y_Z gyro, gyro_st;
+	bool test_result = true;
+
+	icm20602_set_self_test(st);
+	icm20602_do_test_acc(st, &acc, &acc_st);
+	icm20602_do_test_gyro(st, &gyro, &gyro_st);
+	test_result = icm20602_check_acc_selftest(st, &acc, &acc_st);
+	test_result = icm20602_check_gyro_selftest(st, &gyro, &gyro_st);
+
+	return test_result;
+}
+
+static int icm20602_config_fifo(struct inv_icm20602_state *st)
+{
+	struct icm20602_user_config *config = NULL;
+
+	config = st->config;
+	if (config->fifo_enabled != true)
+		return MPU_SUCCESS;
+
+	/*
+	 * Set CONFIG.USER_SET_BIT = 0, No reason as datasheet said
+	 */
+	reg_set_20602.CONFIG.reg_u.REG.USER_SET_BIT = 0x0;
+	/*
+	 * Set CONFIG.FIFO_MODE = 1,
+	 * i.e. when FIFO is full, additional writes will
+	 * not be written to FIFO
+	 */
+	reg_set_20602.CONFIG.reg_u.REG.FIFO_MODE = 0x1;
+	if (icm20602_write_reg_simple(st, reg_set_20602.CONFIG))
+		return -MPU_FAIL;
+
+	/* reset fifo */
+	reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x1;
+	if (icm20602_write_reg_simple(st, reg_set_20602.USER_CTRL)) {
+		reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+		return -MPU_FAIL;
+	}
+	reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+
+	/* Enable FIFO on specified sensors */
+	reg_set_20602.FIFO_EN.reg_u.REG.GYRO_FIFO_EN = 0x1;
+	reg_set_20602.FIFO_EN.reg_u.REG.ACCEL_FIFO_EN = 0x1;
+	if (icm20602_write_reg_simple(st, reg_set_20602.FIFO_EN))
+		return -MPU_FAIL;
+
+	if (icm20602_config_waterlevel(st))
+		return -MPU_FAIL;
+
+	if (icm20602_start_fifo(st))
+		return -MPU_FAIL;
+
+	return MPU_SUCCESS;
+}
+
+static int icm20602_initialize_gyro(struct inv_icm20602_state *st)
+{
+	struct icm20602_user_config *config = NULL;
+	int result = MPU_SUCCESS;
+
+	if (st == NULL)
+		return -MPU_FAIL;
+
+	/*
+	 * ICM20602 supports gyro sampling rate up to 32KHz
+	 * when fchoice_b != 0x00
+	 * In our driver, we supports up to 8KHz
+	 * thus always set fchoice_b to 0x00;
+	 */
+	config = st->config;
+	/*
+	 * SAPLRT_DIV in ICM20602_REG_SMPLRT_DIV is only used for 1kHz internal
+	 * sampling, i.e. fchoice_b in ICM20602_REG_GYRO_CONFIG is 00
+	 * and 0 < dlpf_cfg in ICM20602_REG_CONFIG < 7
+	 * SAMPLE_RATE=Internal_Sample_Rate / (1 + SMPLRT_DIV)
+	 */
+	if (config->gyro_accel_sample_rate <= ICM20602_SAMPLE_RATE_1000HZ)
+		reg_set_20602.SMPLRT_DIV.reg_u.reg =
+		ICM20602_INTERNAL_SAMPLE_RATE_HZ /
+		config->gyro_accel_sample_rate - 1;
+
+	result = icm20602_write_reg_simple(st, reg_set_20602.SMPLRT_DIV);
+
+	/* Set gyro&temperature(combine) LPF */
+	reg_set_20602.CONFIG.reg_u.REG.DLFP_CFG = config->gyro_lpf;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.CONFIG);
+
+	/* Set gyro full scale range */
+	reg_set_20602.GYRO_CONFIG.reg_u.REG.FCHOICE_B = 0x0;
+	reg_set_20602.GYRO_CONFIG.reg_u.REG.FS_SEL = config->gyro_fsr;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.GYRO_CONFIG);
+
+	/* Set Accel full scale range */
+	reg_set_20602.ACCEL_CONFIG.reg_u.REG.ACCEL_FS_SEL = config->acc_fsr;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.ACCEL_CONFIG);
+
+	/*
+	 * Set accel LPF
+	 * Support accel sample rate up to 1KHz
+	 * thus set accel_fchoice_b to 0x00
+	 * The actual accel sample rate is 1KHz/(1+SMPLRT_DIV)
+	 */
+	reg_set_20602.ACCEL_CONFIG2.reg_u.REG.ACCEL_FCHOICE_B = 0x0;
+	reg_set_20602.ACCEL_CONFIG2.reg_u.REG.A_DLPF_CFG = config->acc_lpf;
+	result |= icm20602_write_reg_simple(st,
+				reg_set_20602.ACCEL_CONFIG2);
+
+	if (result) {
+		pr_err("icm20602 init gyro and accel failed\n");
+		return -MPU_FAIL;
+	}
+
+	return result;
+}
+
+int icm20602_set_power_itg(struct inv_icm20602_state *st, bool power_on)
+{
+	int result = MPU_SUCCESS;
+
+	if (power_on) {
+		reg_set_20602.PWR_MGMT_1.reg_u.reg = 0;
+		result = icm20602_write_reg_simple(st,
+					reg_set_20602.PWR_MGMT_1);
+	} else {
+		reg_set_20602.PWR_MGMT_1.reg_u.REG.SLEEP = 0x1;
+		result = icm20602_write_reg_simple(st,
+					reg_set_20602.PWR_MGMT_1);
+	}
+	if (result) {
+		pr_err("set power failed power %d  err %d\n",
+					power_on, result);
+		return result;
+	}
+
+	if (power_on)
+		msleep(30);
+
+	return result;
+}
+
+int icm20602_init_device(struct inv_icm20602_state *st)
+{
+	int result = MPU_SUCCESS;
+	struct icm20602_user_config *config = NULL;
+	int package_count;
+	int i;
+
+	config = st->config;
+	if (st == NULL || st->config == NULL) {
+		pr_err("icm20602 validate config failed\n");
+		return -MPU_FAIL;
+	}
+
+	/* turn on gyro and accel */
+	reg_set_20602.PWR_MGMT_2.reg_u.reg = 0x0;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.PWR_MGMT_2);
+	msleep(30);
+
+	/* disable INT */
+	reg_set_20602.INT_ENABLE.reg_u.reg = 0x0;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.INT_ENABLE);
+
+	/* disbale FIFO */
+	reg_set_20602.FIFO_EN.reg_u.reg = 0x0;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.FIFO_EN);
+
+	/* reset FIFO */
+	reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x1;
+	result |= icm20602_write_reg_simple(st, reg_set_20602.USER_CTRL);
+	reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+	msleep(30);
+
+	/* init gyro and accel */
+	if (icm20602_initialize_gyro(st)) {
+		pr_err("icm20602 init device failed\n");
+		return -MPU_FAIL;
+	}
+
+	/* if FIFO enable, config FIFO */
+	if (config->fifo_enabled) {
+		if (icm20602_config_fifo(st)) {
+			pr_err("icm20602 init config fifo failed\n");
+			return -MPU_FAIL;
+		}
+	} else {
+		/* enable interrupt */
+		reg_set_20602.INT_ENABLE.reg_u.REG.DATA_RDY_INT_EN = 0x0;
+		if (icm20602_write_reg_simple(st,
+				reg_set_20602.INT_ENABLE)) {
+			pr_err("icm20602 set raw rdy failed\n");
+			return -MPU_FAIL;
+		}
+	}
+
+	/* buffer malloc */
+	package_count = config->fifo_waterlevel / ICM20602_PACKAGE_SIZE;
+
+	st->buf = kzalloc(config->fifo_waterlevel * 2, GFP_ATOMIC);
+	if (!st->buf)
+		return -ENOMEM;
+
+	st->data_push = kcalloc(package_count,
+		sizeof(struct struct_icm20602_data), GFP_ATOMIC);
+	if (!st->data_push)
+		return -ENOMEM;
+
+	for (i = 0; i < package_count; i++) {
+		st->data_push[i].raw_data =
+			kzalloc(ICM20602_PACKAGE_SIZE, GFP_ATOMIC);
+	}
+
+	return result;
+}
+
+int icm20602_reset_fifo(struct inv_icm20602_state *st)
+{
+	reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x1;
+	if (icm20602_write_reg_simple(st, reg_set_20602.USER_CTRL)) {
+		reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+		return -MPU_FAIL;
+	}
+	reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+	return MPU_SUCCESS;
+}
+
+
+void icm20602_rw_test(struct inv_icm20602_state *st)
+{
+	uint8_t val = 0;
+
+	reg_set_20602.PWR_MGMT_2.reg_u.REG.STBY_ZG = 0x1;
+	icm20602_write_reg_simple(st, reg_set_20602.PWR_MGMT_2);
+	reg_set_20602.CONFIG.reg_u.REG.FIFO_MODE = 0x1;
+	icm20602_write_reg_simple(st, reg_set_20602.CONFIG);
+
+	icm20602_read_reg(st, reg_set_20602.PWR_MGMT_2.address, &val);
+	icm20602_read_reg(st, reg_set_20602.CONFIG.address, &val);
+
+}
+
+int icm20602_detect(struct inv_icm20602_state *st)
+{
+	int result = MPU_SUCCESS;
+	uint8_t retry = 0, val = 0;
+
+	pr_debug("icm20602_detect\n");
+	/* reset to make sure previous state are not there */
+	reg_set_20602.PWR_MGMT_1.reg_u.REG.DEVICE_RESET = 0x1;
+	result = icm20602_write_reg_simple(st, reg_set_20602.PWR_MGMT_1);
+	if (result) {
+		pr_err("mpu write reg 0x%x value 0x%x failed\n",
+		reg_set_20602.PWR_MGMT_1.reg_u.reg,
+		reg_set_20602.PWR_MGMT_1.reg_u.REG.DEVICE_RESET);
+		return result;
+	}
+	reg_set_20602.PWR_MGMT_1.reg_u.REG.DEVICE_RESET = 0x0;
+
+	/* the power up delay */
+	msleep(30);
+
+	/* out of sleep */
+	result = icm20602_set_power_itg(st, true);
+	if (result)
+		return result;
+	/* get who am i register */
+	while (retry < 10) {
+		/* get version (expecting 0x12 for the icm20602) */
+		icm20602_read_reg(st, reg_set_20602.WHO_AM_I.address, &val);
+		if (val == ICM20602_WHO_AM_I)
+			break;
+		retry++;
+	}
+
+	if (val != ICM20602_WHO_AM_I) {
+		pr_err("detect mpu failed,whoami reg 0x%x\n", val);
+		result = -MPU_FAIL;
+	} else {
+		pr_debug("detect mpu ok,whoami reg 0x%x\n", val);
+	}
+	icm20602_rw_test(st);
+
+	return result;
+}
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.h b/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.h
new file mode 100644
index 0000000..9c4dbd0
--- /dev/null
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.h
@@ -0,0 +1,978 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* register high bit=1 for read */
+#define ICM20602_READ_REG(reg) (reg | 0x80)
+
+/* register high bit=0 for write */
+#define ICM20602_WRITE_REG(reg) (reg & (~0x80))
+#define ICM20602_WHO_AM_I 0x12
+#define ICM20602_INTERNAL_SAMPLE_RATE_HZ 1000
+
+#define SELFTEST_COUNT 200
+#define ST_PRECISION 1000
+#define ACC_ST_SHIFT_MAX	150
+#define ACC_ST_SHIFT_MIN	50
+#define ACC_ST_AL_MIN		225
+#define ACC_ST_AL_MAX		675
+#define GYRO_ST_SHIFT	50
+#define GYRO_ST_AL		60
+#define GYRO_OFFSET_MAX	20
+
+static const uint16_t mpu_st_tb[256] = {
+	2620, 2646, 2672, 2699, 2726, 2753, 2781, 2808,
+	2837, 2865, 2894, 2923, 2952, 2981, 3011, 3041,
+	3072, 3102, 3133, 3165, 3196, 3228, 3261, 3293,
+	3326, 3359, 3393, 3427, 3461, 3496, 3531, 3566,
+	3602, 3638, 3674, 3711, 3748, 3786, 3823, 3862,
+	3900, 3939, 3979, 4019, 4059, 4099, 4140, 4182,
+	4224, 4266, 4308, 4352, 4395, 4439, 4483, 4528,
+	4574, 4619, 4665, 4712, 4759, 4807, 4855, 4903,
+	4953, 5002, 5052, 5103, 5154, 5205, 5257, 5310,
+	5363, 5417, 5471, 5525, 5581, 5636, 5693, 5750,
+	5807, 5865, 5924, 5983, 6043, 6104, 6165, 6226,
+	6289, 6351, 6415, 6479, 6544, 6609, 6675, 6742,
+	6810, 6878, 6946, 7016, 7086, 7157, 7229, 7301,
+	7374, 7448, 7522, 7597, 7673, 7750, 7828, 7906,
+	7985, 8065, 8145, 8227, 8309, 8392, 8476, 8561,
+	8647, 8733, 8820, 8909, 8998, 9088, 9178, 9270,
+	9363, 9457, 9551, 9647, 9743, 9841, 9939, 10038,
+	10139, 10240, 10343, 10446, 10550, 10656, 10763, 10870,
+	10979, 11089, 11200, 11312, 11425, 11539, 11654, 11771,
+	11889, 12008, 12128, 12249, 12371, 12495, 12620, 12746,
+	12874, 13002, 13132, 13264, 13396, 13530, 13666, 13802,
+	13940, 14080, 14221, 14363, 14506, 14652, 14798, 14946,
+	15096, 15247, 15399, 15553, 15709, 15866, 16024, 16184,
+	16346, 16510, 16675, 16842, 17010, 17180, 17352, 17526,
+	17701, 17878, 18057, 18237, 18420, 18604, 18790, 18978,
+	19167, 19359, 19553, 19748, 19946, 20145, 20347, 20550,
+	20756, 20963, 21173, 21385, 21598, 21814, 22033, 22253,
+	22475, 22700, 22927, 23156, 23388, 23622, 23858, 24097,
+	24338, 24581, 24827, 25075, 25326, 25579, 25835, 26093,
+	26354, 26618, 26884, 27153, 27424, 27699, 27976, 28255,
+	28538, 28823, 29112, 29403, 29697, 29994, 30294, 30597,
+	30903, 31212, 31524, 31839, 32157, 32479, 32804
+};
+
+enum inv_icm20602_reg_addr {
+	ADDR_XG_OFFS_TC_H = 0x04,
+	ADDR_XG_OFFS_TC_L,
+	ADDR_YG_OFFS_TC_H = 0x07,
+	ADDR_YG_OFFS_TC_L,
+	ADDR_ZG_OFFS_TC_H = 0x0A,
+	ADDR_ZG_OFFS_TC_L,
+
+	ADDR_SELF_TEST_X_ACCEL = 0x0D,
+	ADDR_SELF_TEST_Y_ACCEL,
+	ADDR_SELF_TEST_Z_ACCEL,
+
+	ADDR_XG_OFFS_USRH = 0x13,
+	ADDR_XG_OFFS_USRL,
+	ADDR_YG_OFFS_USRH,
+	ADDR_YG_OFFS_USRL,
+	ADDR_ZG_OFFS_USRH,
+	ADDR_ZG_OFFS_USRL,
+
+	ADDR_SMPLRT_DIV,
+	ADDR_CONFIG,
+
+	ADDR_GYRO_CONFIG,
+
+	ADDR_ACCEL_CONFIG,
+	ADDR_ACCEL_CONFIG2,
+
+	ADDR_LP_MODE_CFG,
+
+	ADDR_ACCEL_WOM_X_THR = 0x20,
+	ADDR_ACCEL_WOM_Y_THR,
+	ADDR_ACCEL_WOM_Z_THR,
+	ADDR_FIFO_EN,
+
+	ADDR_FSYNC_INT = 0x36,
+	ADDR_INT_PIN_CFG,
+	ADDR_INT_ENABLE,
+	ADDR_FIFO_WM_INT_STATUS,
+	ADDR_INT_STATUS,
+
+	ADDR_ACCEL_XOUT_H,
+	ADDR_ACCEL_XOUT_L,
+	ADDR_ACCEL_YOUT_H,
+	ADDR_ACCEL_YOUT_L,
+	ADDR_ACCEL_ZOUT_H,
+	ADDR_ACCEL_ZOUT_L,
+
+	ADDR_TEMP_OUT_H,
+	ADDR_TEMP_OUT_L,
+
+	ADDR_GYRO_XOUT_H,
+	ADDR_GYRO_XOUT_L,
+	ADDR_GYRO_YOUT_H,
+	ADDR_GYRO_YOUT_L,
+	ADDR_GYRO_ZOUT_H,
+	ADDR_GYRO_ZOUT_L,
+
+	ADDR_SELF_TEST_X_GYRO = 0x50,
+	ADDR_SELF_TEST_Y_GYRO,
+	ADDR_SELF_TEST_Z_GYRO,
+
+	ADDR_FIFO_WM_TH1 = 0x60,
+	ADDR_FIFO_WM_TH2,
+
+	ADDR_SIGNAL_PATH_RESET = 0x68,
+	ADDR_ACCEL_INTEL_CTRL,
+	ADDR_USER_CTRL,
+
+	ADDR_PWR_MGMT_1,
+	ADDR_PWR_MGMT_2,
+
+	ADDR_I2C_IF = 0x70,
+
+	ADDR_FIFO_COUNTH = 0x72,
+	ADDR_FIFO_COUNTL,
+	ADDR_FIFO_R_W,
+
+	ADDR_WHO_AM_I,
+
+	ADDR_XA_OFFSET_H,
+	ADDR_XA_OFFSET_L,
+	ADDR_YA_OFFSET_H = 0x7A,
+	ADDR_YA_OFFSET_L,
+	ADDR_ZA_OFFSET_H = 0x7D,
+	ADDR_ZA_OFFSET_L
+};
+
+struct struct_XG_OFFS_TC_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 REG_XG_OFFS_TC_H :2;
+			u8 REG_XG_OFFS_LP	:6;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_XG_OFFS_TC_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 REG_XG_OFFS_TC_L	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_YG_OFFS_TC_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 REG_YG_OFFS_TC_H	:2;
+			u8 REG_YG_OFFS_LP	:6;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_YG_OFFS_TC_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 REG_YG_OFFS_TC_L :8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ZG_OFFS_TC_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 REG_ZG_OFFS_TC_H :2;
+			u8 REG_ZG_OFFS_LP	:6;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ZG_OFFS_TC_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 REG_ZG_OFFS_TC_L :8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_SELF_TEST_X_ACCEL {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 XA_ST_DATA	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_SELF_TEST_Y_ACCEL {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 YA_ST_DATA	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_SELF_TEST_Z_ACCEL {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 ZA_ST_DATA	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_XG_OFFS_USRH {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 X_OFFS_USR	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_XG_OFFS_USRL {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 X_OFFS_USR	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_YG_OFFS_USRH {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 Y_OFFS_USR	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_YG_OFFS_USRL {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 Y_OFFS_USR	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ZG_OFFS_USRH {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 Z_OFFS_USR	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ZG_OFFS_USRL {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 Z_OFFS_USR	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_SMPLRT_DIV {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 SMPLRT_DIV	:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_CONFIG {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 DLFP_CFG		:3;
+			u8 EXT_SYNC_SET	:3;
+			u8 FIFO_MODE	:1;
+			u8 USER_SET_BIT	:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_GYRO_CONFIG {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 FCHOICE_B	:2;
+			u8 RESERVE0		:1;
+			u8 FS_SEL		:2;
+			u8 ZG_ST		:1;
+			u8 YG_ST		:1;
+			u8 XG_ST		:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_CONFIG {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 RESERVE0			:3;
+			u8 ACCEL_FS_SEL		:2;
+			u8 ZG_ST			:1;
+			u8 YG_ST			:1;
+			u8 XG_ST			:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_CONFIG2 {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 A_DLPF_CFG		:3;
+			u8 ACCEL_FCHOICE_B	:1;
+			u8 DEC2_CFG			:2;
+			u8 RESERVE0			:2;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_LP_MODE_CFG {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 RESERVE0			:4;
+			u8 G_AVGCFG			:3;
+			u8 GYRO_CYCLE		:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_WOM_X_THR {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 WOM_X_TH			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+
+struct struct_ACCEL_WOM_Y_THR {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 WOM_Y_TH			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_WOM_Z_THR {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 WOM_Z_TH			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_FIFO_EN {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 RESERVE1			:3;
+			u8 ACCEL_FIFO_EN	:1;
+			u8 GYRO_FIFO_EN		:1;
+			u8 RESERVE0			:3;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_FSYNC_INT {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 RESERVE0			:7;
+			u8 FSYNC_INT		:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_INT_PIN_CFG {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 RESERVE0			:2;
+			u8 FSYNC_INT_MODE_EN:1;
+			u8 FSYNC_INT_LEVEL	:1;
+			u8 INT_RD_CLEAR		:1;
+			u8 LATCH_INT_EN		:1;
+			u8 INT_OPEN			:1;
+			u8 INT_LEVEL		:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_INT_ENABLE {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 DATA_RDY_INT_EN	:1;
+			u8 RESERVE0			:1;
+			u8 GDRIVE_INT_EN	:1;
+			u8 FSYNC_INT_EN		:1;
+			u8 FIFO_OFLOW_EN	:1;
+			u8 WOM_Z_INT_EN		:1;
+			u8 WOM_Y_INT_EN		:1;
+			u8 WOM_X_INT_EN		:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_FIFO_WM_INT_STATUS {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 RESERVE1			:6;
+			u8 FIFO_WM_INT		:1;
+			u8 RESERVE0			:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_INT_STATUS {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 DATA_RDY_INT		:1;
+			u8 RESERVE1			:1;
+			u8 GDRIVE_INT		:1;
+			u8 RESERVE0			:1;
+			u8 FIFO_OFLOW_INT	:1;
+			u8 WOM_Z_INT		:1;
+			u8 WOM_Y_INT		:1;
+			u8 WOM_X_INT		:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_XOUT_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 ACCEL_XOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_XOUT_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 ACCEL_XOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_YOUT_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 ACCEL_YOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_YOUT_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 ACCEL_YOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_ZOUT_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 ACCEL_ZOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_ZOUT_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 ACCEL_ZOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_TEMP_OUT_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 TEMP_OUT			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_TEMP_OUT_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 TEMP_OUT			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_GYRO_XOUT_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 GYRO_XOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_GYRO_XOUT_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 GYRO_XOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_GYRO_YOUT_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 GYRO_YOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_GYRO_YOUT_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 GYRO_YOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_GYRO_ZOUT_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 GYRO_ZOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_GYRO_ZOUT_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 GYRO_ZOUT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_SELF_TEST_X_GYRO {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 XG_ST_DATA		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_SELF_TEST_Y_GYRO {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 YG_ST_DATA		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_SELF_TEST_Z_GYRO {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 ZG_ST_DATA		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_FIFO_WM_TH1 {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 RESERVE0			:6;
+			u8 FIFO_WM_TH		:2;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_FIFO_WM_TH2 {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 FIFO_WM_TH		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_SIGNAL_PATH_RESET {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 TEMP_RST			:1;
+			u8 ACCEL_RST		:1;
+			u8 FIFO_WM_TH		:6;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ACCEL_INTEL_CTRL {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 WOM_TH_MODE		:1;
+			u8 OUTPUT_LIMIT		:1;
+			u8 RESERVE0			:4;
+			u8 ACCEL_INTEL_MODE	:1;
+			u8 ACCEL_INTEL_EN	:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_USER_CTRL {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 SIG_COND_RST		:1;
+			u8 RESERVE2			:1;
+			u8 FIFO_RST			:1;
+			u8 RESERVE1			:3;
+			u8 FIFO_EN			:1;
+			u8 RESERVE0			:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_PWR_MGMT_1 {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 CLKSEL			:3;
+			u8 TEMP_DIS			:1;
+			u8 GYRO_STANDBY		:1;
+			u8 CYCLE			:1;
+			u8 SLEEP			:1;
+			u8 DEVICE_RESET		:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_PWR_MGMT_2 {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 STBY_ZG			:1;
+			u8 STBY_YG			:1;
+			u8 STBY_XG			:1;
+			u8 STBY_ZA			:1;
+			u8 STBY_YA			:1;
+			u8 STBY_XA			:1;
+			u8 RESERVE0			:2;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_I2C_IF {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 RESERVE1			:6;
+			u8 I2C_IF_DIS		:1;
+			u8 RESERVE0			:1;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_FIFO_COUNTH {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 FIFO_COUNT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_FIFO_COUNTL {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 FIFO_COUNT		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_FIFO_R_W {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 FIFO_DATA		:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_WHO_AM_I {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 WHOAMI			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_XA_OFFSET_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 XA_OFFS			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_XA_OFFSET_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 XA_OFFS			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_YA_OFFSET_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 YA_OFFS			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_YA_OFFSET_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 YA_OFFS			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ZA_OFFSET_H {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 ZA_OFFS			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+struct struct_ZA_OFFSET_L {
+	enum inv_icm20602_reg_addr address;
+	union {
+		struct {
+			u8 ZA_OFFS			:8;
+		} REG;
+		u8 reg;
+	} reg_u;
+};
+
+/*
+ *  struct inv_icm20602_reg_map - Notable registers.
+ *  @sample_rate_div:	Divider applied to gyro output rate.
+ *  @lpf:		Configures internal low pass filter.
+ *  @user_ctrl:		Enables/resets the FIFO.
+ *  @fifo_en:		Determines which data will appear in FIFO.
+ *  @gyro_config:	gyro config register.
+ *  @accl_config:	accel config register
+ *  @fifo_count_h:	Upper byte of FIFO count.
+ *  @fifo_r_w:		FIFO register.
+ *  @raw_gyro:		Address of first gyro register.
+ *  @raw_accl:		Address of first accel register.
+ *  @temperature:	temperature register
+ *  @int_enable:	Interrupt enable register.
+ *  @pwr_mgmt_1:	Controls chip's power state and clock source.
+ *  @pwr_mgmt_2:	Controls power state of individual sensors.
+ */
+struct inv_icm20602_reg_map {
+	struct struct_XG_OFFS_TC_H XG_OFFS_TC_H;
+	struct struct_XG_OFFS_TC_L XG_OFFS_TC_L;
+	struct struct_YG_OFFS_TC_H YG_OFFS_TC_H;
+	struct struct_YG_OFFS_TC_L YG_OFFS_TC_L;
+	struct struct_ZG_OFFS_TC_H ZG_OFFS_TC_H;
+	struct struct_ZG_OFFS_TC_L ZG_OFFS_TC_L;
+
+	struct struct_SELF_TEST_X_ACCEL SELF_TEST_X_ACCEL;
+	struct struct_SELF_TEST_Y_ACCEL SELF_TEST_Y_ACCEL;
+	struct struct_SELF_TEST_Z_ACCEL SELF_TEST_Z_ACCEL;
+
+	struct struct_XG_OFFS_USRH XG_OFFS_USRH;
+	struct struct_XG_OFFS_USRL XG_OFFS_USRL;
+	struct struct_YG_OFFS_USRH YG_OFFS_USRH;
+	struct struct_YG_OFFS_USRL YG_OFFS_USRL;
+	struct struct_ZG_OFFS_USRH ZG_OFFS_USRH;
+	struct struct_ZG_OFFS_USRL ZG_OFFS_USRL;
+
+	struct struct_SMPLRT_DIV SMPLRT_DIV;
+	struct struct_CONFIG CONFIG;
+
+	struct struct_GYRO_CONFIG GYRO_CONFIG;
+
+	struct struct_ACCEL_CONFIG ACCEL_CONFIG;
+	struct struct_ACCEL_CONFIG2 ACCEL_CONFIG2;
+
+	struct struct_LP_MODE_CFG LP_MODE_CFG;
+
+	struct struct_ACCEL_WOM_X_THR ACCEL_WOM_X_THR;
+	struct struct_ACCEL_WOM_Y_THR ACCEL_WOM_Y_THR;
+	struct struct_ACCEL_WOM_Z_THR ACCEL_WOM_Z_THR;
+
+	struct struct_FIFO_EN FIFO_EN;
+	struct struct_FSYNC_INT FSYNC_INT;
+	struct struct_INT_PIN_CFG INT_PIN_CFG;
+	struct struct_INT_ENABLE INT_ENABLE;
+	struct struct_FIFO_WM_INT_STATUS FIFO_WM_INT_STATUS;
+	struct struct_INT_STATUS INT_STATUS;
+
+	struct struct_ACCEL_XOUT_H ACCEL_XOUT_H;
+	struct struct_ACCEL_XOUT_L ACCEL_XOUT_L;
+	struct struct_ACCEL_YOUT_H ACCEL_YOUT_H;
+	struct struct_ACCEL_YOUT_L ACCEL_YOUT_L;
+	struct struct_ACCEL_ZOUT_H ACCEL_ZOUT_H;
+	struct struct_ACCEL_ZOUT_L ACCEL_ZOUT_L;
+
+	struct struct_TEMP_OUT_H TEMP_OUT_H;
+	struct struct_TEMP_OUT_L TEMP_OUT_L;
+
+	struct struct_GYRO_XOUT_H GYRO_XOUT_H;
+	struct struct_GYRO_XOUT_L GYRO_XOUT_L;
+	struct struct_GYRO_YOUT_H GYRO_YOUT_H;
+	struct struct_GYRO_YOUT_L GYRO_YOUT_L;
+	struct struct_GYRO_ZOUT_H GYRO_ZOUT_H;
+	struct struct_GYRO_ZOUT_L GYRO_ZOUT_L;
+
+	struct struct_SELF_TEST_X_GYRO SELF_TEST_X_GYRO;
+	struct struct_SELF_TEST_Y_GYRO SELF_TEST_Y_GYRO;
+	struct struct_SELF_TEST_Z_GYRO SELF_TEST_Z_GYRO;
+	struct struct_FIFO_WM_TH1 FIFO_WM_TH1;
+	struct struct_FIFO_WM_TH2 FIFO_WM_TH2;
+	struct struct_SIGNAL_PATH_RESET SIGNAL_PATH_RESET;
+	struct struct_ACCEL_INTEL_CTRL ACCEL_INTEL_CTRL;
+	struct struct_USER_CTRL USER_CTRL;
+
+	struct struct_PWR_MGMT_1 PWR_MGMT_1;
+	struct struct_PWR_MGMT_2 PWR_MGMT_2;
+
+	struct struct_I2C_IF I2C_IF;
+
+	struct struct_FIFO_COUNTH FIFO_COUNTH;
+	struct struct_FIFO_COUNTL FIFO_COUNTL;
+	struct struct_FIFO_R_W FIFO_R_W;
+
+	struct struct_WHO_AM_I WHO_AM_I;
+
+	struct struct_XA_OFFSET_H XA_OFFSET_H;
+	struct struct_XA_OFFSET_L XA_OFFSET_L;
+	struct struct_YA_OFFSET_H YA_OFFSET_H;
+	struct struct_YA_OFFSET_L YA_OFFSET_L;
+	struct struct_ZA_OFFSET_H ZA_OFFSET_H;
+	struct struct_ZA_OFFSET_L ZA_OFFSET_L;
+};
+
+
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
new file mode 100644
index 0000000..7dda14e
--- /dev/null
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
@@ -0,0 +1,615 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (C) 2012 Invensense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include "inv_icm20602_iio.h"
+#include <linux/regulator/consumer.h>
+
+/* Attribute of icm20602 device init show */
+static ssize_t inv_icm20602_init_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	return 0;
+}
+
+static void inv_icm20602_def_config(struct inv_icm20602_state *st)
+{
+	struct icm20602_user_config *config = st->config;
+
+	config->user_fps_in_ms = 20;
+	config->gyro_lpf = INV_ICM20602_GYRO_LFP_92HZ;
+	config->gyro_fsr = ICM20602_GYRO_FSR_1000DPS;
+	config->acc_lpf = ICM20602_ACCLFP_99;
+	config->acc_fsr = ICM20602_ACC_FSR_4G;
+	config->gyro_accel_sample_rate = ICM20602_SAMPLE_RATE_200HZ;
+	config->fifo_enabled = true;
+
+}
+
+static void inv_icm20602_load_config(struct inv_icm20602_state *st)
+{
+	struct icm20602_user_config *config = st->config;
+
+	if (config->user_fps_in_ms == 0)
+		inv_icm20602_def_config(st);
+	config->fifo_enabled = true;
+
+}
+
+static ssize_t inv_icm20602_init_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int result = MPU_SUCCESS;
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	inv_icm20602_load_config(st);
+	result |= icm20602_detect(st);
+	result |= icm20602_init_device(st);
+	icm20602_start_fifo(st);
+	if (result)
+		return result;
+
+	return count;
+}
+
+static IIO_DEVICE_ATTR(
+						inv_icm20602_init,
+						0644,
+						inv_icm20602_init_show,
+						inv_icm20602_init_store,
+						0);
+
+/* Attribute of gyro lpf base on the enum inv_icm20602_gyro_temp_lpf_e */
+static ssize_t inv_gyro_lpf_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->gyro_lpf);
+}
+
+static ssize_t inv_gyro_lpf_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+	struct icm20602_user_config *config = st->config;
+	int gyro_lpf;
+
+	if (kstrtoint(buf, 10, &gyro_lpf))
+		return -EINVAL;
+	if (gyro_lpf > INV_ICM20602_GYRO_LFP_NUM)
+		return -EINVAL;
+	config->gyro_lpf = gyro_lpf;
+
+	return count;
+}
+static IIO_DEVICE_ATTR(
+						inv_icm20602_gyro_lpf,
+						0644,
+						inv_gyro_lpf_show,
+						inv_gyro_lpf_store,
+						0);
+
+/* Attribute of gyro fsr base on enum inv_icm20602_gyro_temp_lpf_e */
+static ssize_t inv_gyro_fsr_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->gyro_fsr);
+}
+
+static ssize_t inv_gyro_fsr_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+	struct icm20602_user_config *config = st->config;
+	int gyro_fsr;
+
+	if (kstrtoint(buf, 10, &gyro_fsr))
+		return -EINVAL;
+	if (gyro_fsr > ICM20602_GYRO_FSR_NUM)
+		return -EINVAL;
+
+	config->gyro_fsr = gyro_fsr;
+	return count;
+}
+
+static IIO_DEVICE_ATTR(
+						inv_icm20602_gyro_fsr,
+						0644,
+						inv_gyro_fsr_show,
+						inv_gyro_fsr_store,
+						0);
+
+/* Attribute of self_test */
+static ssize_t inv_self_test_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return 0;
+}
+
+static ssize_t inv_self_test_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	icm20602_self_test(st);
+	return count;
+}
+static IIO_DEVICE_ATTR(
+						inv_icm20602_self_test,
+						0644,
+						inv_self_test_show,
+						inv_self_test_store,
+						0);
+
+/* Attribute of gyro fsr base on enum inv_icm20602_acc_fsr_e */
+static ssize_t inv_gyro_acc_fsr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->acc_fsr);
+}
+
+static ssize_t inv_gyro_acc_fsr_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+	struct icm20602_user_config *config = st->config;
+	int acc_fsr;
+
+	if (kstrtoint(buf, 10, &acc_fsr))
+		return -EINVAL;
+	if (acc_fsr > ICM20602_ACC_FSR_NUM)
+		return -EINVAL;
+
+	config->acc_fsr = acc_fsr;
+	return count;
+}
+static IIO_DEVICE_ATTR(
+						inv_icm20602_acc_fsr,
+						0644,
+						inv_gyro_acc_fsr_show,
+						inv_gyro_acc_fsr_store,
+						0);
+
+/* Attribute of gyro fsr base on enum inv_icm20602_acc_lpf_e */
+static ssize_t inv_gyro_acc_lpf_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->acc_lpf);
+}
+
+static ssize_t inv_gyro_acc_lpf_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+	struct icm20602_user_config *config = st->config;
+	int acc_lpf;
+
+	if (kstrtoint(buf, 10, &acc_lpf))
+		return -EINVAL;
+	if (acc_lpf > ICM20602_ACCLPF_NUM)
+		return -EINVAL;
+
+	config->acc_fsr = acc_lpf;
+	return count;
+}
+static IIO_DEVICE_ATTR(
+						inv_icm20602_acc_lpf,
+						0644,
+						inv_gyro_acc_lpf_show,
+						inv_gyro_acc_lpf_store,
+						0);
+
+/* Attribute of user_fps_in_ms */
+static ssize_t inv_user_fps_in_ms_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->user_fps_in_ms);
+}
+
+static ssize_t inv_user_fps_in_ms_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+	struct icm20602_user_config *config = st->config;
+	int user_fps_in_ms;
+
+	if (kstrtoint(buf, 10, &user_fps_in_ms))
+		return -EINVAL;
+	if (user_fps_in_ms < 10)
+		return -EINVAL;
+
+	config->user_fps_in_ms = user_fps_in_ms;
+	return count;
+}
+static IIO_DEVICE_ATTR(
+						inv_user_fps_in_ms,
+						0644,
+						inv_user_fps_in_ms_show,
+						inv_user_fps_in_ms_store,
+						0);
+
+/* Attribute of gyro_accel_sample_rate */
+static ssize_t inv_sampling_frequency_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->gyro_accel_sample_rate);
+}
+
+static ssize_t inv_sampling_frequency_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+	struct icm20602_user_config *config = st->config;
+	int gyro_accel_sample_rate;
+
+	if (kstrtoint(buf, 10, &gyro_accel_sample_rate))
+		return -EINVAL;
+	if (gyro_accel_sample_rate < 10)
+		return -EINVAL;
+
+	config->gyro_accel_sample_rate = gyro_accel_sample_rate;
+	return count;
+}
+static IIO_DEV_ATTR_SAMP_FREQ(
+						0644,
+						inv_sampling_frequency_show,
+						inv_sampling_frequency_store);
+
+static struct attribute *inv_icm20602_attributes[] = {
+	&iio_dev_attr_inv_icm20602_init.dev_attr.attr,
+
+	&iio_dev_attr_inv_icm20602_gyro_fsr.dev_attr.attr,
+	&iio_dev_attr_inv_icm20602_gyro_lpf.dev_attr.attr,
+
+	&iio_dev_attr_inv_icm20602_self_test.dev_attr.attr,
+
+	&iio_dev_attr_inv_icm20602_acc_fsr.dev_attr.attr,
+	&iio_dev_attr_inv_icm20602_acc_lpf.dev_attr.attr,
+
+	&iio_dev_attr_sampling_frequency.dev_attr.attr,
+
+	&iio_dev_attr_inv_user_fps_in_ms.dev_attr.attr,
+	NULL,
+};
+
+#define INV_ICM20602_CHAN(_type, _channel2, _index)                    \
+{                                                             \
+	.type = _type,                                        \
+	.modified = 1,                                        \
+	.channel2 = _channel2,                                \
+	.info_mask_shared_by_type =  BIT(IIO_CHAN_INFO_SCALE), \
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),         \
+	.scan_index = _index,                                 \
+	.scan_type = {                                        \
+			.sign = 's',                          \
+			.realbits = 16,                       \
+			.storagebits = 16,                    \
+			.shift = 0,                          \
+			.endianness = IIO_BE,                 \
+	},                                       \
+}
+
+static const struct iio_chan_spec inv_icm20602_channels[] = {
+	IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
+
+	{
+		.type = IIO_TEMP,
+		.info_mask_separate =  BIT(IIO_CHAN_INFO_RAW)
+				| BIT(IIO_CHAN_INFO_OFFSET)
+				| BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = INV_ICM20602_SCAN_TEMP,
+		.channel2 = IIO_MOD_X,
+		.scan_type = {
+				.sign = 's',
+				.realbits = 16,
+				.storagebits = 16,
+				.shift = 0,
+				.endianness = IIO_BE,
+			     },
+	},
+	INV_ICM20602_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_ICM20602_SCAN_GYRO_X),
+	INV_ICM20602_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_ICM20602_SCAN_GYRO_Y),
+	INV_ICM20602_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_ICM20602_SCAN_GYRO_Z),
+
+	INV_ICM20602_CHAN(IIO_ACCEL, IIO_MOD_X, INV_ICM20602_SCAN_ACCL_X),
+	INV_ICM20602_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_ICM20602_SCAN_ACCL_Y),
+	INV_ICM20602_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z),
+};
+
+static const struct attribute_group inv_icm20602_attribute_group = {
+	.attrs = inv_icm20602_attributes,
+};
+
+static const struct iio_info icm20602_info = {
+	.driver_module = THIS_MODULE,
+	.read_raw = NULL,
+	.write_raw = NULL,
+	.attrs = &inv_icm20602_attribute_group,
+	.validate_trigger = inv_icm20602_validate_trigger,
+};
+
+static int icm20602_ldo_work(struct inv_icm20602_state *st, bool enable)
+{
+	int ret = 0;
+
+	if (enable) {
+		ret = regulator_set_voltage(st->reg_ldo,
+			ICM20602_LDO_VTG_MIN_UV, ICM20602_LDO_VTG_MAX_UV);
+		if (ret)
+			pr_err("Failed to request LDO voltage.\n");
+
+		ret = regulator_enable(st->reg_ldo);
+		if (ret)
+			pr_err("Failed to enable LDO %d\n", ret);
+	} else {
+		ret = regulator_disable(st->reg_ldo);
+		if (ret)
+			pr_err("Failed to disable LDO %d\n", ret);
+		regulator_set_load(st->reg_ldo, 0);
+	}
+
+	return MPU_SUCCESS;
+}
+
+static int icm20602_init_regulators(struct inv_icm20602_state *st)
+{
+	struct regulator *reg;
+
+	reg = regulator_get(&st->client->dev, "vdd-ldo");
+	if (IS_ERR_OR_NULL(reg)) {
+		pr_err("Unable to get regulator for LDO\n");
+		return -MPU_FAIL;
+	}
+
+	st->reg_ldo = reg;
+
+	return MPU_SUCCESS;
+}
+
+static int of_populate_icm20602_dt(struct inv_icm20602_state *st)
+{
+	int result = MPU_SUCCESS;
+
+	/* use client device irq */
+	st->gpio = of_get_named_gpio(st->client->dev.of_node,
+			"invensense,icm20602-gpio", 0);
+	result = gpio_is_valid(st->gpio);
+	if (!result) {
+		pr_err("gpio_is_valid %d failed\n", st->gpio);
+		return -MPU_FAIL;
+	}
+
+	result = gpio_request(st->gpio, "icm20602-irq");
+	if (result) {
+		pr_err("gpio_request failed\n");
+		return -MPU_FAIL;
+	}
+
+	result = gpio_direction_input(st->gpio);
+	if (result) {
+		pr_err("gpio_direction_input failed\n");
+		return -MPU_FAIL;
+	}
+
+	st->client->irq = gpio_to_irq(st->gpio);
+	if (st->client->irq < 0) {
+		pr_err("gpio_to_irq failed\n");
+		return -MPU_FAIL;
+	}
+
+	return MPU_SUCCESS;
+}
+
+/*
+ *  inv_icm20602_probe() - probe function.
+ *  @client:          i2c client.
+ *  @id:              i2c device id.
+ *
+ *  Returns 0 on success, a negative error code otherwise.
+ *  The I2C address of the ICM-20602 is 0x68 or 0x69
+ *  depending upon the value driven on AD0 pin.
+ */
+static int inv_icm20602_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	struct inv_icm20602_state *st;
+	struct iio_dev *indio_dev;
+	int result = MPU_SUCCESS;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+	if (indio_dev == NULL) {
+		result =  -ENOMEM;
+		goto out_remove_trigger;
+	}
+	st = iio_priv(indio_dev);
+	st->client = client;
+	st->interface = ICM20602_I2C;
+
+	pr_debug("i2c address is %x\n", client->addr);
+	result = of_populate_icm20602_dt(st);
+	if (result)
+		return result;
+
+	st->config = kzalloc(sizeof(struct icm20602_user_config), GFP_ATOMIC);
+	if (st->config == NULL)
+		return -ENOMEM;
+
+	icm20602_init_reg_map();
+
+	i2c_set_clientdata(client, indio_dev);
+
+	dev_set_drvdata(&client->dev, indio_dev);
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->name = ICM20602_DEV_NAME;
+	indio_dev->channels = inv_icm20602_channels;
+	indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
+
+	indio_dev->info = &icm20602_info;
+	indio_dev->modes = INDIO_BUFFER_TRIGGERED;
+	result = iio_triggered_buffer_setup(indio_dev,
+		inv_icm20602_irq_handler, inv_icm20602_read_fifo_fn, NULL);
+	if (result) {
+		dev_err(&st->client->dev, " configure buffer fail %d\n",
+				result);
+		goto out_remove_trigger;
+	}
+	icm20602_init_regulators(st);
+	icm20602_ldo_work(st, true);
+
+	result = inv_icm20602_probe_trigger(indio_dev);
+	if (result) {
+		dev_err(&st->client->dev, "trigger probe fail %d\n", result);
+		goto out_unreg_ring;
+	}
+
+	INIT_KFIFO(st->timestamps);
+	spin_lock_init(&st->time_stamp_lock);
+	result = iio_device_register(indio_dev);
+	if (result) {
+		dev_err(&st->client->dev, "IIO register fail %d\n", result);
+		goto out_remove_trigger;
+	}
+
+	return 0;
+
+out_remove_trigger:
+	inv_icm20602_remove_trigger(st);
+out_unreg_ring:
+	iio_triggered_buffer_cleanup(indio_dev);
+	iio_device_free(indio_dev);
+	gpio_free(st->gpio);
+
+	return 0;
+}
+
+static int inv_icm20602_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	gpio_free(st->gpio);
+	iio_device_unregister(indio_dev);
+	inv_icm20602_remove_trigger(st);
+	iio_triggered_buffer_cleanup(indio_dev);
+	iio_device_free(indio_dev);
+
+	return 0;
+}
+
+static int inv_icm20602_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	icm20602_stop_fifo(st);
+	icm20602_ldo_work(st, false);
+	return 0;
+}
+
+static int inv_icm20602_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	icm20602_ldo_work(st, true);
+	icm20602_detect(st);
+	icm20602_init_device(st);
+	icm20602_start_fifo(st);
+
+	return 0;
+}
+
+static const struct dev_pm_ops icm20602_pm_ops = {
+	.resume		=	inv_icm20602_resume,
+	.suspend	=	inv_icm20602_suspend,
+};
+
+static const struct of_device_id icm20602_match_table[] = {
+	{.compatible = "invensense,icm20602"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, icm20602_match_table);
+
+static const struct i2c_device_id inv_icm20602_id[] = {
+	{"icm20602", 0},
+	{}
+};
+
+static struct i2c_driver icm20602_i2c_driver = {
+	.probe		=	inv_icm20602_probe,
+	.remove		=	inv_icm20602_remove,
+	.id_table	=	inv_icm20602_id,
+	.driver = {
+		.owner	=	THIS_MODULE,
+		.name	=	"inv-icm20602",
+		.of_match_table = icm20602_match_table,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+		.pm = &icm20602_pm_ops,
+	},
+};
+
+static int __init inv_icm20602_init(void)
+{
+	return i2c_add_driver(&icm20602_i2c_driver);
+}
+module_init(inv_icm20602_init);
+
+static void __exit inv_icm20602_exit(void)
+{
+	i2c_del_driver(&icm20602_i2c_driver);
+}
+module_exit(inv_icm20602_exit);
+
+MODULE_DESCRIPTION("icm20602 IMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h b/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
new file mode 100644
index 0000000..b369ae4
--- /dev/null
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (C) 2012 Invensense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _INV_ICM20602_IIO_H_
+#define _INV_ICM20602_IIO_H_
+
+#include <linux/i2c.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/platform_data/invensense_mpu6050.h>
+
+/*
+ * it uses sensor irq to trigger
+ * if set INV20602_DEVICE_IRQ_TRIGGER as 1,
+ * otherwise use SMD IRQ to trigger
+ */
+#define INV20602_DEVICE_IRQ_TRIGGER    0
+
+#if INV20602_DEVICE_IRQ_TRIGGER
+#define INV20602_SMD_IRQ_TRIGGER    0
+#else
+#define INV20602_SMD_IRQ_TRIGGER    1
+#endif
+
+#define ICM20602_LDO_VTG_MIN_UV 3300000
+#define ICM20602_LDO_VTG_MAX_UV 3300000
+
+#define INV_ICM20602_TIME_STAMP_TOR           5
+#define ICM20602_PACKAGE_SIZE 14
+
+/* device enum */
+enum inv_devices {
+	INV_ICM20602,
+	INV_NUM_PARTS,
+};
+
+enum _mpu_err {
+	MPU_SUCCESS = 0,
+	MPU_FAIL = 1,
+	MPU_READ_FAIL = 2,
+	MPU_WRITE_FAIL = 3,
+};
+
+/* Gyro Full Scale Range Enum */
+enum inv_icm20602_gyro_fsr_e {
+	ICM20602_GYRO_FSR_250DPS = 0,
+	ICM20602_GYRO_FSR_500DPS,
+	ICM20602_GYRO_FSR_1000DPS,
+	ICM20602_GYRO_FSR_2000DPS,
+	ICM20602_GYRO_FSR_NUM,
+};
+
+/* Accelerometor Full Scale Range Enum */
+enum inv_icm20602_acc_fsr_e {
+	ICM20602_ACC_FSR_2G = 0,
+	ICM20602_ACC_FSR_4G,
+	ICM20602_ACC_FSR_8G,
+	ICM20602_ACC_FSR_16G,
+	ICM20602_ACC_FSR_NUM,
+};
+
+/* scan element definition */
+enum inv_icm20602_scan {
+	INV_ICM20602_SCAN_ACCL_X,
+	INV_ICM20602_SCAN_ACCL_Y,
+	INV_ICM20602_SCAN_ACCL_Z,
+	INV_ICM20602_SCAN_GYRO_X,
+	INV_ICM20602_SCAN_GYRO_Y,
+	INV_ICM20602_SCAN_GYRO_Z,
+	INV_ICM20602_SCAN_TEMP,
+	INV_ICM20602_SCAN_TIMESTAMP,
+};
+
+/* this is for CONFIGURATION reg bit[2:0] DLFP_CFG */
+enum inv_icm20602_gyro_temp_lpf_e {
+	INV_ICM20602_GLFP_250HZ = 0,  /* 8KHz */
+	INV_ICM20602_GYRO_LFP_176HZ,      /* 1KHz */
+	INV_ICM20602_GYRO_LFP_92HZ,
+	INV_ICM20602_GYRO_LFP_41HZ,
+	INV_ICM20602_GYRO_LFP_20HZ,
+	INV_ICM20602_GYRO_LFP_10HZ,
+	INV_ICM20602_GYRO_LFP_5HZ,
+	INV_ICM20602_GYRO_LFP_NUM,
+};
+
+enum inv_icm20602_gyro_sample_rate_e {
+	ICM20602_SAMPLE_RATE_100HZ = 100,
+	ICM20602_SAMPLE_RATE_200HZ = 200,
+	ICM20602_SAMPLE_RATE_500HZ = 500,
+	ICM20602_SAMPLE_RATE_1000HZ = 1000,
+};
+
+/* this is for ACCEL CONFIGURATION 2 reg */
+enum inv_icm20602_acc_lpf_e {
+	ICM20602_ACCLFP_218 = 1,
+	ICM20602_ACCLFP_99,
+	ICM20602_ACCLFP_44,
+	ICM20602_ACCLFP_21,
+	ICM20602_ACCLFP_10,
+	ICM20602_ACCLFP_5,
+	ICM20602_ACCLFP_420_NOLPF,
+	ICM20602_ACCLPF_NUM = 7,
+};
+
+/* IIO attribute address */
+enum INV_ICM20602_IIO_ATTR_ADDR {
+	ATTR_ICM20602_GYRO_MATRIX,
+	ATTR_ICM20602_ACCL_MATRIX,
+};
+
+/* this is for GYRO CONFIGURATION reg */
+enum inv_icm20602_fs_e {
+	INV_ICM20602_FS_250DPS = 0,
+	INV_ICM20602_FS_500DPS,
+	INV_ICM20602_FS_1000DPS,
+	INV_ICM20602_FS_2000DPS,
+	NUM_ICM20602_FS,
+};
+
+enum inv_icm20602_clock_sel_e {
+	INV_ICM20602_CLK_INTERNAL = 0,
+	INV_ICM20602_CLK_PLL,
+	INV_NUM_CLK,
+};
+
+enum inv_icm20602_spi_freq {
+	MPU_SPI_FREQUENCY_1MHZ = 960000UL,
+	MPU_SPI_FREQUENCY_5MHZ = 4800000UL,
+	MPU_SPI_FREQUENCY_8MHZ = 8000000UL,
+	MPU_SPI_FREQUENCY_10MHZ = 10000000UL,
+	MPU_SPI_FREQUENCY_15MHZ = 15000000UL,
+	MPU_SPI_FREQUENCY_20MHZ = 20000000UL,
+};
+
+#define MPU_SPI_BUF_LEN   512
+#define	ICM20602_DEV_NAME	"icm20602_iio"
+
+struct inv_icm20602_platform_data {
+	__s8 orientation[9];
+};
+
+struct X_Y_Z {
+	u8 X;
+	u8 Y;
+	u8 Z;
+};
+
+enum RAW_TYPE {
+	ACCEL = 1,
+	GYRO  = 2,
+	TEMP  = 4,
+};
+
+struct icm20602_user_config {
+	enum inv_icm20602_gyro_temp_lpf_e gyro_lpf;
+	enum inv_icm20602_gyro_fsr_e gyro_fsr;
+	struct X_Y_Z gyro_self_test;
+
+	enum inv_icm20602_acc_lpf_e	acc_lpf;
+	enum inv_icm20602_acc_fsr_e	acc_fsr;
+	struct X_Y_Z acc_self_test;
+
+	uint32_t gyro_accel_sample_rate;
+	uint32_t user_fps_in_ms;
+
+	bool fifo_enabled;
+	uint16_t fifo_waterlevel;
+	struct X_Y_Z wake_on_motion;
+};
+
+enum inv_icm20602_interface {
+	ICM20602_I2C = 0,
+	ICM20602_SPI
+};
+
+/*
+ *  struct inv_icm20602_state - Driver state variables.
+ *  @TIMESTAMP_FIFO_SIZE: fifo size for timestamp.
+ *  @trig:              IIO trigger.
+ *  @chip_config:	Cached attribute information.
+ *  @reg:		Map of important registers.
+ *  @hw:		Other hardware-specific information.
+ *  @chip_type:		chip type.
+ *  @time_stamp_lock:	spin lock to time stamp.
+ *  @spi: spi devices
+ *  @plat_data:		platform data.
+ *  @timestamps:        kfifo queue to store time stamp.
+ */
+struct inv_icm20602_state {
+#define TIMESTAMP_FIFO_SIZE 32
+	enum inv_icm20602_interface interface;
+	struct iio_trigger  *trig;
+	const struct inv_icm20602_reg_map *reg;
+	struct icm20602_user_config *config;
+	spinlock_t time_stamp_lock;
+	struct spi_device *spi;
+	struct i2c_client *client;
+	u8 fifo_packet_size;
+	int fifo_cnt_threshold;
+	char *buf;
+	struct struct_icm20602_data *data_push;
+	enum inv_devices chip_type;
+	int gpio;
+	struct regulator *reg_ldo;
+	DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
+};
+
+struct struct_icm20602_raw_data {
+	u8 ACCEL_XOUT_H;
+	u8 ACCEL_XOUT_L;
+
+	u8 ACCEL_YOUT_H;
+	u8 ACCEL_YOUT_L;
+
+	u8 ACCEL_ZOUT_H;
+	u8 ACCEL_ZOUT_L;
+
+	u8 TEMP_OUT_H;
+	u8 TEMP_OUT_L;
+
+	u8 GYRO_XOUT_H;
+	u8 GYRO_XOUT_L;
+
+	u8 GYRO_YOUT_H;
+	u8 GYRO_YOUT_L;
+
+	u8 GYRO_ZOUT_H;
+	u8 GYRO_ZOUT_L;
+};
+
+struct struct_icm20602_real_data {
+	u16 ACCEL_XOUT;
+	u16 ACCEL_YOUT;
+	u16 ACCEL_ZOUT;
+
+	u16 GYRO_XOUT;
+	u16 GYRO_YOUT;
+	u16 GYRO_ZOUT;
+
+	u16 TEMP_OUT;
+};
+
+struct struct_icm20602_data {
+	s64 timestamps;
+	u8 *raw_data;
+};
+
+extern struct iio_trigger *inv_trig;
+irqreturn_t inv_icm20602_irq_handler(int irq, void *p);
+irqreturn_t inv_icm20602_read_fifo_fn(int irq, void *p);
+
+int inv_icm20602_probe_trigger(struct iio_dev *indio_dev);
+void inv_icm20602_remove_trigger(struct inv_icm20602_state *st);
+int inv_icm20602_validate_trigger(struct iio_dev *indio_dev,
+				struct iio_trigger *trig);
+int icm20602_int_status(struct inv_icm20602_state *st, u8 *int_status);
+int icm20602_int_wm_status(struct inv_icm20602_state *st, u8 *int_status);
+int icm20602_reset_fifo(struct inv_icm20602_state *st);
+int icm20602_fifo_count(struct inv_icm20602_state *st, u16 *fifo_count);
+
+int icm20602_read_raw(struct inv_icm20602_state *st,
+		struct struct_icm20602_real_data *real_data, uint32_t type);
+int icm20602_init_reg_map(void);
+int icm20602_init_device(struct inv_icm20602_state *st);
+int icm20602_detect(struct inv_icm20602_state *st);
+int icm20602_read_fifo(struct inv_icm20602_state *st,
+	void *buf, const int size);
+int icm20602_start_fifo(struct inv_icm20602_state *st);
+int icm20602_stop_fifo(struct inv_icm20602_state *st);
+bool icm20602_self_test(struct inv_icm20602_state *st);
+#endif
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
new file mode 100644
index 0000000..081bec9
--- /dev/null
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (C) 2012 Invensense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/spi/spi.h>
+#include "inv_icm20602_iio.h"
+#include <linux/i2c.h>
+
+#define combine_8_to_16(upper, lower)  ((_upper << 8) | _lower)
+
+static void inv_clear_kfifo(struct inv_icm20602_state *st)
+{
+	unsigned long flags;
+
+	/* Take the spin lock sem to avoid interrupt kick in */
+	spin_lock_irqsave(&st->time_stamp_lock, flags);
+	kfifo_reset(&st->timestamps);
+	spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+}
+
+/*
+ * inv_icm20602_irq_handler() - Cache a timestamp at each data ready interrupt.
+ */
+irqreturn_t inv_icm20602_irq_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+	s64 timestamp;
+
+	timestamp = iio_get_time_ns(indio_dev);
+
+	kfifo_in_spinlocked(&st->timestamps, &timestamp, 1,
+				&st->time_stamp_lock);
+
+	return IRQ_WAKE_THREAD;
+}
+
+#define BIT_FIFO_OFLOW_INT			0x10
+#define BIT_FIFO_WM_INT				0x40
+static int inv_icm20602_read_data(struct iio_dev *indio_dev)
+{
+	int result = MPU_SUCCESS;
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+	struct icm20602_user_config *config = st->config;
+	int package_count;
+	char *buf = st->buf;
+	s64 timestamp;
+	u8 int_status;
+	u16 fifo_count;
+	int i;
+
+	if (!st)
+		return -MPU_FAIL;
+	package_count = config->fifo_waterlevel / ICM20602_PACKAGE_SIZE;
+	mutex_lock(&indio_dev->mlock);
+	icm20602_int_status(st, &int_status);
+	if (int_status & BIT_FIFO_OFLOW_INT) {
+		icm20602_fifo_count(st, &fifo_count);
+		pr_debug("fifo_count = %d\n", fifo_count);
+		inv_clear_kfifo(st);
+		icm20602_reset_fifo(st);
+		goto end_session;
+	}
+	if (config->fifo_enabled) {
+		result = kfifo_out(&st->timestamps,
+				&timestamp, 1);
+		/* when there is no timestamp, put it as 0 */
+		if (result == 0)
+		timestamp = 0;
+		for (i = 0; i < package_count; i++) {
+			result = icm20602_read_fifo(st,
+			buf, ICM20602_PACKAGE_SIZE);
+			memcpy(st->data_push[i].raw_data,
+			buf, ICM20602_PACKAGE_SIZE);
+			iio_push_to_buffers_with_timestamp(indio_dev,
+			st->data_push[i].raw_data, timestamp);
+			buf += ICM20602_PACKAGE_SIZE;
+		}
+		memset(st->buf, 0, config->fifo_waterlevel);
+	}
+end_session:
+	mutex_unlock(&indio_dev->mlock);
+	iio_trigger_notify_done(indio_dev->trig);
+	return MPU_SUCCESS;
+}
+
+/*
+ * inv_icm20602_read_fifo() - Transfer data from hardware FIFO to KFIFO.
+ */
+irqreturn_t inv_icm20602_read_fifo_fn(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+
+	inv_icm20602_read_data(indio_dev);
+	return IRQ_HANDLED;
+}
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_trigger.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_trigger.c
new file mode 100644
index 0000000..f05a631
--- /dev/null
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_trigger.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (C) 2012 Invensense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/sched/rt.h>
+#include <linux/delay.h>
+#include "inv_icm20602_iio.h"
+#include <linux/i2c.h>
+
+static const struct iio_trigger_ops inv_icm20602_trigger_ops = {
+	.owner = THIS_MODULE,
+};
+
+int inv_icm20602_validate_trigger(struct iio_dev *indio_dev,
+					struct iio_trigger *trig)
+{
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	if (st->trig != trig)
+		return -EINVAL;
+
+	return 0;
+}
+
+int inv_icm20602_probe_trigger(struct iio_dev *indio_dev)
+{
+	int ret;
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	st->trig = iio_trigger_alloc("%s-dev%d",
+					indio_dev->name,
+					indio_dev->id);
+	if (st->trig == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	ret = request_irq(st->client->irq, &iio_trigger_generic_data_rdy_poll,
+				IRQF_TRIGGER_RISING,
+				"inv_icm20602",
+				st->trig);
+	if (ret) {
+		pr_err("request_irq failed\n");
+		goto error_free_trig;
+	}
+	st->trig->dev.parent = &st->client->dev;
+	st->trig->ops = &inv_icm20602_trigger_ops;
+	iio_trigger_set_drvdata(st->trig, indio_dev);
+	ret = iio_trigger_register(st->trig);
+	if (ret) {
+		pr_err("iio_trigger_register failed\n");
+		goto error_free_irq;
+	}
+	indio_dev->trig = st->trig;
+
+	return 0;
+
+error_free_irq:
+	free_irq(st->client->irq, st->trig);
+error_free_trig:
+	iio_trigger_free(st->trig);
+error_ret:
+	return ret;
+}
+
+void inv_icm20602_remove_trigger(struct inv_icm20602_state *st)
+{
+	iio_trigger_unregister(st->trig);
+	free_irq(st->client->irq, st->trig);
+	iio_trigger_free(st->trig);
+}
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index 7de0f39..d23cf77 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -510,13 +510,26 @@
 
 	ret = pm_runtime_set_active(&client->dev);
 	if (ret < 0)
-		return ret;
+		goto err_poweroff;
 
 	pm_runtime_enable(&client->dev);
 	pm_runtime_set_autosuspend_delay(&client->dev, RPR0521_SLEEP_DELAY_MS);
 	pm_runtime_use_autosuspend(&client->dev);
 
-	return iio_device_register(indio_dev);
+	ret = iio_device_register(indio_dev);
+	if (ret)
+		goto err_pm_disable;
+
+	return 0;
+
+err_pm_disable:
+	pm_runtime_disable(&client->dev);
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_put_noidle(&client->dev);
+err_poweroff:
+	rpr0521_poweroff(data);
+
+	return ret;
 }
 
 static int rpr0521_remove(struct i2c_client *client)
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 6325e7d..f3cb4dc 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -48,8 +48,6 @@
 }
 
 static const struct spi_device_id st_magn_id_table[] = {
-	{ LSM303DLHC_MAGN_DEV_NAME },
-	{ LSM303DLM_MAGN_DEV_NAME },
 	{ LIS3MDL_MAGN_DEV_NAME },
 	{ LSM303AGR_MAGN_DEV_NAME },
 	{},
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 19d2eb4..2a4a62e 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -871,12 +871,13 @@
 {
 	int          ret;
 	unsigned int val;
+	long     timeout;
 
 	zpa2326_dbg(indio_dev, "waiting for one shot completion interrupt");
 
-	ret = wait_for_completion_interruptible_timeout(
+	timeout = wait_for_completion_interruptible_timeout(
 		&private->data_ready, ZPA2326_CONVERSION_JIFFIES);
-	if (ret > 0)
+	if (timeout > 0)
 		/*
 		 * Interrupt handler completed before timeout: return operation
 		 * status.
@@ -886,13 +887,16 @@
 	/* Clear all interrupts just to be sure. */
 	regmap_read(private->regmap, ZPA2326_INT_SOURCE_REG, &val);
 
-	if (!ret)
+	if (!timeout) {
 		/* Timed out. */
+		zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)",
+			     timeout);
 		ret = -ETIME;
-
-	if (ret != -ERESTARTSYS)
-		zpa2326_warn(indio_dev, "no one shot interrupt occurred (%d)",
-			     ret);
+	} else if (timeout < 0) {
+		zpa2326_warn(indio_dev,
+			     "wait for one shot interrupt cancelled");
+		ret = -ERESTARTSYS;
+	}
 
 	return ret;
 }
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index fb4ce03..978b8d9 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -209,6 +209,22 @@
 }
 EXPORT_SYMBOL(rdma_addr_size);
 
+int rdma_addr_size_in6(struct sockaddr_in6 *addr)
+{
+	int ret = rdma_addr_size((struct sockaddr *) addr);
+
+	return ret <= sizeof(*addr) ? ret : 0;
+}
+EXPORT_SYMBOL(rdma_addr_size_in6);
+
+int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
+{
+	int ret = rdma_addr_size((struct sockaddr *) addr);
+
+	return ret <= sizeof(*addr) ? ret : 0;
+}
+EXPORT_SYMBOL(rdma_addr_size_kss);
+
 static struct rdma_addr_client self;
 
 void rdma_addr_register_client(struct rdma_addr_client *client)
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 760ef60..15f4bdf 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -999,8 +999,7 @@
 		return -ENOMEM;
 
 	ib_comp_wq = alloc_workqueue("ib-comp-wq",
-			WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
-			WQ_UNBOUND_MAX_ACTIVE);
+			WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
 	if (!ib_comp_wq) {
 		ret = -ENOMEM;
 		goto err;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 322cb67..28d1845 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -724,21 +724,19 @@
 {
 	int ret;
 	u16 gid_index;
-	u8 p;
 
-	if (rdma_protocol_roce(device, port_num)) {
-		ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
-						 gid_type, port_num,
-						 ndev,
-						 &gid_index);
-	} else if (rdma_protocol_ib(device, port_num)) {
-		ret = ib_find_cached_gid(device, &rec->port_gid,
-					 IB_GID_TYPE_IB, NULL, &p,
+	/* GID table is not based on the netdevice for IB link layer,
+	 * so ignore ndev during search.
+	 */
+	if (rdma_protocol_ib(device, port_num))
+		ndev = NULL;
+	else if (!rdma_protocol_roce(device, port_num))
+		return -EINVAL;
+
+	ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
+					 gid_type, port_num,
+					 ndev,
 					 &gid_index);
-	} else {
-		ret = -EINVAL;
-	}
-
 	if (ret)
 		return ret;
 
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 81b742c..4baf3b8 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1137,10 +1137,9 @@
 
 		resolved_dev = dev_get_by_index(dev_addr.net,
 						dev_addr.bound_dev_if);
-		if (resolved_dev->flags & IFF_LOOPBACK) {
-			dev_put(resolved_dev);
-			resolved_dev = idev;
-			dev_hold(resolved_dev);
+		if (!resolved_dev) {
+			dev_put(idev);
+			return -ENODEV;
 		}
 		ndev = ib_get_ndev_from_path(rec);
 		rcu_read_lock();
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 017a09c..a036d70 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -132,7 +132,7 @@
 	ctx = idr_find(&ctx_idr, id);
 	if (!ctx)
 		ctx = ERR_PTR(-ENOENT);
-	else if (ctx->file != file)
+	else if (ctx->file != file || !ctx->cm_id)
 		ctx = ERR_PTR(-EINVAL);
 	return ctx;
 }
@@ -454,6 +454,7 @@
 	struct rdma_ucm_create_id cmd;
 	struct rdma_ucm_create_id_resp resp;
 	struct ucma_context *ctx;
+	struct rdma_cm_id *cm_id;
 	enum ib_qp_type qp_type;
 	int ret;
 
@@ -474,10 +475,10 @@
 		return -ENOMEM;
 
 	ctx->uid = cmd.uid;
-	ctx->cm_id = rdma_create_id(current->nsproxy->net_ns,
-				    ucma_event_handler, ctx, cmd.ps, qp_type);
-	if (IS_ERR(ctx->cm_id)) {
-		ret = PTR_ERR(ctx->cm_id);
+	cm_id = rdma_create_id(current->nsproxy->net_ns,
+			       ucma_event_handler, ctx, cmd.ps, qp_type);
+	if (IS_ERR(cm_id)) {
+		ret = PTR_ERR(cm_id);
 		goto err1;
 	}
 
@@ -487,14 +488,19 @@
 		ret = -EFAULT;
 		goto err2;
 	}
+
+	ctx->cm_id = cm_id;
 	return 0;
 
 err2:
-	rdma_destroy_id(ctx->cm_id);
+	rdma_destroy_id(cm_id);
 err1:
 	mutex_lock(&mut);
 	idr_remove(&ctx_idr, ctx->id);
 	mutex_unlock(&mut);
+	mutex_lock(&file->mut);
+	list_del(&ctx->list);
+	mutex_unlock(&file->mut);
 	kfree(ctx);
 	return ret;
 }
@@ -624,6 +630,9 @@
 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 		return -EFAULT;
 
+	if (!rdma_addr_size_in6(&cmd.addr))
+		return -EINVAL;
+
 	ctx = ucma_get_ctx(file, cmd.id);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
@@ -637,22 +646,21 @@
 			 int in_len, int out_len)
 {
 	struct rdma_ucm_bind cmd;
-	struct sockaddr *addr;
 	struct ucma_context *ctx;
 	int ret;
 
 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 		return -EFAULT;
 
-	addr = (struct sockaddr *) &cmd.addr;
-	if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
+	if (cmd.reserved || !cmd.addr_size ||
+	    cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
 		return -EINVAL;
 
 	ctx = ucma_get_ctx(file, cmd.id);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	ret = rdma_bind_addr(ctx->cm_id, addr);
+	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
 	ucma_put_ctx(ctx);
 	return ret;
 }
@@ -668,13 +676,16 @@
 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 		return -EFAULT;
 
+	if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
+	    !rdma_addr_size_in6(&cmd.dst_addr))
+		return -EINVAL;
+
 	ctx = ucma_get_ctx(file, cmd.id);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
-				(struct sockaddr *) &cmd.dst_addr,
-				cmd.timeout_ms);
+				(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
 	ucma_put_ctx(ctx);
 	return ret;
 }
@@ -684,24 +695,23 @@
 				 int in_len, int out_len)
 {
 	struct rdma_ucm_resolve_addr cmd;
-	struct sockaddr *src, *dst;
 	struct ucma_context *ctx;
 	int ret;
 
 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 		return -EFAULT;
 
-	src = (struct sockaddr *) &cmd.src_addr;
-	dst = (struct sockaddr *) &cmd.dst_addr;
-	if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
-	    !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
+	if (cmd.reserved ||
+	    (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
+	    !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
 		return -EINVAL;
 
 	ctx = ucma_get_ctx(file, cmd.id);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
+	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+				(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
 	ucma_put_ctx(ctx);
 	return ret;
 }
@@ -1146,6 +1156,11 @@
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
+	if (!ctx->cm_id->device) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	resp.qp_attr_mask = 0;
 	memset(&qp_attr, 0, sizeof qp_attr);
 	qp_attr.qp_state = cmd.qp_state;
@@ -1216,6 +1231,9 @@
 	if (!optlen)
 		return -EINVAL;
 
+	if (!ctx->cm_id->device)
+		return -EINVAL;
+
 	memset(&sa_path, 0, sizeof(sa_path));
 
 	ib_sa_unpack_path(path_data->path_rec, &sa_path);
@@ -1278,7 +1296,7 @@
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
+	if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
 		return -EINVAL;
 
 	optval = memdup_user((void __user *) (unsigned long) cmd.optval,
@@ -1302,7 +1320,7 @@
 {
 	struct rdma_ucm_notify cmd;
 	struct ucma_context *ctx;
-	int ret;
+	int ret = -EINVAL;
 
 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 		return -EFAULT;
@@ -1311,7 +1329,9 @@
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
+	if (ctx->cm_id->device)
+		ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
+
 	ucma_put_ctx(ctx);
 	return ret;
 }
@@ -1397,7 +1417,7 @@
 	join_cmd.response = cmd.response;
 	join_cmd.uid = cmd.uid;
 	join_cmd.id = cmd.id;
-	join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
+	join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
 	if (!join_cmd.addr_size)
 		return -EINVAL;
 
@@ -1416,7 +1436,7 @@
 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 		return -EFAULT;
 
-	if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
+	if (!rdma_addr_size_kss(&cmd.addr))
 		return -EINVAL;
 
 	return ucma_process_join(file, &cmd, out_len);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 6512a55..dd18b74 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -488,6 +488,7 @@
 
 	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
 	release_ep_resources(ep);
+	kfree_skb(skb);
 	return 0;
 }
 
@@ -498,6 +499,7 @@
 	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
 	c4iw_put_ep(&ep->parent_ep->com);
 	release_ep_resources(ep);
+	kfree_skb(skb);
 	return 0;
 }
 
@@ -569,11 +571,13 @@
 
 	PDBG("%s rdev %p\n", __func__, rdev);
 	req->cmd = CPL_ABORT_NO_RST;
+	skb_get(skb);
 	ret = c4iw_ofld_send(rdev, skb);
 	if (ret) {
 		__state_set(&ep->com, DEAD);
 		queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
-	}
+	} else
+		kfree_skb(skb);
 }
 
 static int send_flowc(struct c4iw_ep *ep)
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index b85a1a9..9e0f2cc 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -856,6 +856,11 @@
 
 	rdev->status_page->db_off = 0;
 
+	init_completion(&rdev->rqt_compl);
+	init_completion(&rdev->pbl_compl);
+	kref_init(&rdev->rqt_kref);
+	kref_init(&rdev->pbl_kref);
+
 	return 0;
 err_free_status_page:
 	free_page((unsigned long)rdev->status_page);
@@ -872,12 +877,14 @@
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
-	destroy_workqueue(rdev->free_workq);
 	kfree(rdev->wr_log);
 	free_page((unsigned long)rdev->status_page);
 	c4iw_pblpool_destroy(rdev);
 	c4iw_rqtpool_destroy(rdev);
+	wait_for_completion(&rdev->pbl_compl);
+	wait_for_completion(&rdev->rqt_compl);
 	c4iw_destroy_resource(&rdev->resource);
+	destroy_workqueue(rdev->free_workq);
 }
 
 static void c4iw_dealloc(struct uld_ctx *ctx)
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7d54066..896dff7 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -186,6 +186,10 @@
 	struct wr_log_entry *wr_log;
 	int wr_log_size;
 	struct workqueue_struct *free_workq;
+	struct completion rqt_compl;
+	struct completion pbl_compl;
+	struct kref rqt_kref;
+	struct kref pbl_kref;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 67df71a..803c677 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -260,12 +260,22 @@
 		rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
 		if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
 			rdev->stats.pbl.max = rdev->stats.pbl.cur;
+		kref_get(&rdev->pbl_kref);
 	} else
 		rdev->stats.pbl.fail++;
 	mutex_unlock(&rdev->stats.lock);
 	return (u32)addr;
 }
 
+static void destroy_pblpool(struct kref *kref)
+{
+	struct c4iw_rdev *rdev;
+
+	rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
+	gen_pool_destroy(rdev->pbl_pool);
+	complete(&rdev->pbl_compl);
+}
+
 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
@@ -273,6 +283,7 @@
 	rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
 	mutex_unlock(&rdev->stats.lock);
 	gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+	kref_put(&rdev->pbl_kref, destroy_pblpool);
 }
 
 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
@@ -312,7 +323,7 @@
 
 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
 {
-	gen_pool_destroy(rdev->pbl_pool);
+	kref_put(&rdev->pbl_kref, destroy_pblpool);
 }
 
 /*
@@ -333,12 +344,22 @@
 		rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
 		if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
 			rdev->stats.rqt.max = rdev->stats.rqt.cur;
+		kref_get(&rdev->rqt_kref);
 	} else
 		rdev->stats.rqt.fail++;
 	mutex_unlock(&rdev->stats.lock);
 	return (u32)addr;
 }
 
+static void destroy_rqtpool(struct kref *kref)
+{
+	struct c4iw_rdev *rdev;
+
+	rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
+	gen_pool_destroy(rdev->rqt_pool);
+	complete(&rdev->rqt_compl);
+}
+
 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
@@ -346,6 +367,7 @@
 	rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
 	mutex_unlock(&rdev->stats.lock);
 	gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+	kref_put(&rdev->rqt_kref, destroy_rqtpool);
 }
 
 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
@@ -383,7 +405,7 @@
 
 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
 {
-	gen_pool_destroy(rdev->rqt_pool);
+	kref_put(&rdev->rqt_kref, destroy_rqtpool);
 }
 
 /*
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 7853b0c..148b313 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -5860,6 +5860,7 @@
 	u64 status;
 	u32 sw_index;
 	int i = 0;
+	unsigned long irq_flags;
 
 	sw_index = dd->hw_to_sw[hw_context];
 	if (sw_index >= dd->num_send_contexts) {
@@ -5869,10 +5870,12 @@
 		return;
 	}
 	sci = &dd->send_contexts[sw_index];
+	spin_lock_irqsave(&dd->sc_lock, irq_flags);
 	sc = sci->sc;
 	if (!sc) {
 		dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
 			   sw_index, hw_context);
+		spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
 		return;
 	}
 
@@ -5894,6 +5897,7 @@
 	 */
 	if (sc->type != SC_USER)
 		queue_work(dd->pport->hfi1_wq, &sc->halt_work);
+	spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
 
 	/*
 	 * Update the counters for the corresponding status bits.
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 632ba21..0022660 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -67,13 +67,13 @@
 	loff_t *ppos)
 {
 	struct dentry *d = file->f_path.dentry;
-	int srcu_idx;
 	ssize_t r;
 
-	r = debugfs_use_file_start(d, &srcu_idx);
-	if (likely(!r))
-		r = seq_read(file, buf, size, ppos);
-	debugfs_use_file_finish(srcu_idx);
+	r = debugfs_file_get(d);
+	if (unlikely(r))
+		return r;
+	r = seq_read(file, buf, size, ppos);
+	debugfs_file_put(d);
 	return r;
 }
 
@@ -83,13 +83,13 @@
 	int whence)
 {
 	struct dentry *d = file->f_path.dentry;
-	int srcu_idx;
 	loff_t r;
 
-	r = debugfs_use_file_start(d, &srcu_idx);
-	if (likely(!r))
-		r = seq_lseek(file, offset, whence);
-	debugfs_use_file_finish(srcu_idx);
+	r = debugfs_file_get(d);
+	if (unlikely(r))
+		return r;
+	r = seq_lseek(file, offset, whence);
+	debugfs_file_put(d);
 	return r;
 }
 
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 84a97f3..ae1f90d 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1049,6 +1049,8 @@
 		return ERR_PTR(-ENOMEM);
 	dd->num_pports = nports;
 	dd->pport = (struct hfi1_pportdata *)(dd + 1);
+	dd->pcidev = pdev;
+	pci_set_drvdata(pdev, dd);
 
 	INIT_LIST_HEAD(&dd->list);
 	idr_preload(GFP_KERNEL);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 335613a1..7176260 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -162,9 +162,6 @@
 	unsigned long len;
 	resource_size_t addr;
 
-	dd->pcidev = pdev;
-	pci_set_drvdata(pdev, dd);
-
 	addr = pci_resource_start(pdev, 0);
 	len = pci_resource_len(pdev, 0);
 
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 919a547..621b60a 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -196,7 +196,8 @@
 };
 
 static struct attribute *port_cc_default_attributes[] = {
-	&cc_prescan_attr.attr
+	&cc_prescan_attr.attr,
+	NULL
 };
 
 static struct kobj_type port_cc_ktype = {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 2c4b4d0..3b973cb 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -3644,8 +3644,10 @@
 		hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
 		hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
 
-		hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
-		hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
+		hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
+			roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
+		hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
+			roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
 		hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
 			hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
 		hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index d1328a6..8ce599e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -86,6 +86,7 @@
 #define RDMA_OPCODE_MASK        0x0f
 #define RDMA_READ_REQ_OPCODE    1
 #define Q2_BAD_FRAME_OFFSET     72
+#define Q2_FPSN_OFFSET          64
 #define CQE_MAJOR_DRV           0x8000
 
 #define I40IW_TERM_SENT 0x01
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c62d354..3ed4914 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -1320,7 +1320,7 @@
 	u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
 	u32 rcv_wnd = hw_host_ctx[23];
 	/* first partial seq # in q2 */
-	u32 fps = qp->q2_buf[16];
+	u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
 	struct list_head *rxlist = &pfpdu->rxlist;
 	struct list_head *plist;
 
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4b892ca..095912fb 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1515,6 +1515,7 @@
 		err_code = -EOVERFLOW;
 		goto err;
 	}
+	stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
 	iwmr->stag = stag;
 	iwmr->ibmr.rkey = stag;
 	iwmr->ibmr.lkey = stag;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 19bc1c2..8d59a59 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -216,8 +216,6 @@
 			gid_tbl[i].version = 2;
 			if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
 				gid_tbl[i].type = 1;
-			else
-				memset(&gid_tbl[i].gid, 0, 12);
 		}
 	}
 
@@ -363,8 +361,13 @@
 		if (!gids) {
 			ret = -ENOMEM;
 		} else {
-			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
-				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
+				memcpy(&gids[i].gid,
+				       &port_gid_table->gids[i].gid,
+				       sizeof(union ib_gid));
+				gids[i].gid_type =
+				    port_gid_table->gids[i].gid_type;
+			}
 		}
 	}
 	spin_unlock_bh(&iboe->lock);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 403df35..abb47e7 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -253,7 +253,11 @@
 	} else {
 		if (ucmd) {
 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+			if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
+				return -EINVAL;
 			qp->rq.wqe_shift = ucmd->rq_wqe_shift;
+			if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+				return -EINVAL;
 			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
 			qp->rq.max_post = qp->rq.wqe_cnt;
 		} else {
@@ -2164,18 +2168,18 @@
 
 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
 {
-	if (rate == IB_RATE_PORT_CURRENT) {
+	if (rate == IB_RATE_PORT_CURRENT)
 		return 0;
-	} else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
-		return -EINVAL;
-	} else {
-		while (rate != IB_RATE_2_5_GBPS &&
-		       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
-			 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
-			--rate;
-	}
 
-	return rate + MLX5_STAT_RATE_OFFSET;
+	if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
+		return -EINVAL;
+
+	while (rate != IB_RATE_PORT_CURRENT &&
+	       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
+		 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+		--rate;
+
+	return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
 }
 
 static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
@@ -2805,8 +2809,10 @@
 		mlx5_ib_qp_disable_pagefaults(qp);
 
 	if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
-	    !optab[mlx5_cur][mlx5_new])
+	    !optab[mlx5_cur][mlx5_new]) {
+		err = -EINVAL;
 		goto out;
+	}
 
 	op = optab[mlx5_cur][mlx5_new];
 	optpar = ib_mask_to_mlx5_opt(attr_mask);
@@ -2848,7 +2854,8 @@
 	 * If we moved a kernel QP to RESET, clean up all old CQ
 	 * entries and reinitialize the QP.
 	 */
-	if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+	if (new_state == IB_QPS_RESET &&
+	    !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
 		mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
 				 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
 		if (send_cq != recv_cq)
@@ -4605,13 +4612,10 @@
 	int err;
 
 	err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
-	if (err) {
+	if (err)
 		mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
-		return err;
-	}
 
 	kfree(xrcd);
-
 	return 0;
 }
 
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 58e92bc..f937873 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -762,7 +762,8 @@
 
 	dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
 	if (!dev->num_cnq) {
-		DP_ERR(dev, "not enough CNQ resources.\n");
+		DP_ERR(dev, "Failed. At least one CNQ is required.\n");
+		rc = -ENOMEM;
 		goto init_err;
 	}
 
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 35d5b89..cd0408c 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1888,18 +1888,23 @@
 		SET_FIELD(qp_params.modify_flags,
 			  QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
 
-		qp_params.ack_timeout = attr->timeout;
-		if (attr->timeout) {
-			u32 temp;
-
-			temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
-			/* FW requires [msec] */
-			qp_params.ack_timeout = temp;
-		} else {
-			/* Infinite */
+		/* The received timeout value is an exponent used like this:
+		 *    "12.7.34 LOCAL ACK TIMEOUT
+		 *    Value representing the transport (ACK) timeout for use by
+		 *    the remote, expressed as: 4.096 * 2^timeout [usec]"
+		 * The FW expects timeout in msec so we need to divide the usec
+		 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
+		 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
+		 * The value of zero means infinite so we use a 'max_t' to make
+		 * sure that sub 1 msec values will be configured as 1 msec.
+		 */
+		if (attr->timeout)
+			qp_params.ack_timeout =
+					1 << max_t(int, attr->timeout - 8, 0);
+		else
 			qp_params.ack_timeout = 0;
-		}
 	}
+
 	if (attr_mask & IB_QP_RETRY_CNT) {
 		SET_FIELD(qp_params.modify_flags,
 			  QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
@@ -2807,6 +2812,11 @@
 
 	switch (wr->opcode) {
 	case IB_WR_SEND_WITH_IMM:
+		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+			rc = -EINVAL;
+			*bad_wr = wr;
+			break;
+		}
 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
 		swqe->wqe_size = 2;
@@ -2848,6 +2858,11 @@
 		break;
 
 	case IB_WR_RDMA_WRITE_WITH_IMM:
+		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+			rc = -EINVAL;
+			*bad_wr = wr;
+			break;
+		}
 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
 
@@ -3467,7 +3482,7 @@
 {
 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
 	struct qedr_cq *cq = get_qedr_cq(ibcq);
-	union rdma_cqe *cqe = cq->latest_cqe;
+	union rdma_cqe *cqe;
 	u32 old_cons, new_cons;
 	unsigned long flags;
 	int update = 0;
@@ -3477,6 +3492,7 @@
 		return qedr_gsi_poll_cq(ibcq, num_entries, wc);
 
 	spin_lock_irqsave(&cq->cq_lock, flags);
+	cqe = cq->latest_cqe;
 	old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
 	while (num_entries && is_valid_cqe(cq, cqe)) {
 		struct qedr_qp *qp;
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 6d9904a..29dc527 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -197,7 +197,7 @@
 		return ERR_PTR(-EINVAL);
 
 	/* Allocate the completion queue structure. */
-	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+	cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
 	if (!cq)
 		return ERR_PTR(-ENOMEM);
 
@@ -213,7 +213,9 @@
 		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
 	else
 		sz += sizeof(struct ib_wc) * (entries + 1);
-	wc = vmalloc_user(sz);
+	wc = udata ?
+		vmalloc_user(sz) :
+		vzalloc_node(sz, rdi->dparms.node);
 	if (!wc) {
 		ret = ERR_PTR(-ENOMEM);
 		goto bail_cq;
@@ -368,7 +370,9 @@
 		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
 	else
 		sz += sizeof(struct ib_wc) * (cqe + 1);
-	wc = vmalloc_user(sz);
+	wc = udata ?
+		vmalloc_user(sz) :
+		vzalloc_node(sz, rdi->dparms.node);
 	if (!wc)
 		return -ENOMEM;
 
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 59f37f4..ced416f 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -747,9 +747,8 @@
 		memcpy(wqe->dma.sge, ibwr->sg_list,
 		       num_sge * sizeof(struct ib_sge));
 
-	wqe->iova		= (mask & WR_ATOMIC_MASK) ?
-					atomic_wr(ibwr)->remote_addr :
-					rdma_wr(ibwr)->remote_addr;
+	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
+		mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
 	wqe->mask		= mask;
 	wqe->dma.length		= length;
 	wqe->dma.resid		= length;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 0df7d45..17c5bc7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2119,6 +2119,9 @@
 		goto event_failed;
 	}
 
+	/* call event handler to ensure pkey in sync */
+	queue_work(ipoib_workqueue, &priv->flush_heavy);
+
 	result = register_netdev(priv->dev);
 	if (result) {
 		printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 84f9185..463ea59 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2626,9 +2626,11 @@
 		ret = FAST_IO_FAIL;
 	else
 		ret = FAILED;
-	srp_free_req(ch, req, scmnd, 0);
-	scmnd->result = DID_ABORT << 16;
-	scmnd->scsi_done(scmnd);
+	if (ret == SUCCESS) {
+		srp_free_req(ch, req, scmnd, 0);
+		scmnd->result = DID_ABORT << 16;
+		scmnd->scsi_done(scmnd);
+	}
 
 	return ret;
 }
@@ -3395,12 +3397,10 @@
 				      num_online_nodes());
 		const int ch_end = ((node_idx + 1) * target->ch_count /
 				    num_online_nodes());
-		const int cv_start = (node_idx * ibdev->num_comp_vectors /
-				      num_online_nodes() + target->comp_vector)
-				     % ibdev->num_comp_vectors;
-		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
-				    num_online_nodes() + target->comp_vector)
-				   % ibdev->num_comp_vectors;
+		const int cv_start = node_idx * ibdev->num_comp_vectors /
+				     num_online_nodes();
+		const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
+				   num_online_nodes();
 		int cpu_idx = 0;
 
 		for_each_online_cpu(cpu) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 7cf468a..9888c9b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2292,12 +2292,8 @@
 	}
 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
 
-	if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
-		     || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
-		atomic_inc(&ch->req_lim_delta);
-		srpt_abort_cmd(ioctx);
+	if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
 		return;
-	}
 
 	/* For read commands, transfer the data to the initiator. */
 	if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
@@ -2670,7 +2666,8 @@
 	struct srpt_rdma_ch *ch = ioctx->ch;
 	unsigned long flags;
 
-	WARN_ON(ioctx->state != SRPT_STATE_DONE);
+	WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
+		     !(ioctx->cmd.transport_state & CMD_T_ABORTED));
 
 	if (ioctx->n_rw_ctx) {
 		srpt_free_rw_ctxs(ch, ioctx);
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 766bf26..5f04b2d 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -88,6 +88,7 @@
 			      const struct input_device_id *id)
 {
 	struct input_leds *leds;
+	struct input_led *led;
 	unsigned int num_leds;
 	unsigned int led_code;
 	int led_no;
@@ -119,14 +120,13 @@
 
 	led_no = 0;
 	for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
-		struct input_led *led = &leds->leds[led_no];
-
-		led->handle = &leds->handle;
-		led->code = led_code;
-
 		if (!input_led_info[led_code].name)
 			continue;
 
+		led = &leds->leds[led_no];
+		led->handle = &leds->handle;
+		led->code = led_code;
+
 		led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
 					   dev_name(&dev->dev),
 					   input_led_info[led_code].name);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 3026c06..f8a987a 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -398,6 +398,18 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called keychord.
 
+
+config STMVL53L0X
+        tristate "STM VL53L0X Ranging Sensor"
+        depends on I2C
+        help
+	  Say Y here if you want to enable the key chord driver
+          This is a Time-of-Flight (ToF) laser-ranging sensor, provide
+	  the distance from obstacle.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called vl5310x.
+
 config INPUT_KEYSPAN_REMOTE
 	tristate "Keyspan DMR USB remote control"
 	depends on USB_ARCH_HAS_HCD
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index c3af7c2..149273c 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -82,3 +82,5 @@
 obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND)	+= xen-kbdfront.o
 obj-$(CONFIG_INPUT_YEALINK)		+= yealink.o
 obj-$(CONFIG_INPUT_IDEAPAD_SLIDEBAR)	+= ideapad_slidebar.o
+obj-$(CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH)	+= ots_pat9125/
+obj-$(CONFIG_STMVL53L0X)	+= vl53l0x/
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 930424e..251d64c 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -521,7 +521,7 @@
 	if (!haptics)
 		return -ENOMEM;
 
-	haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+	haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
 	haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
 
 	if (pdata) {
diff --git a/drivers/input/misc/vl53l0x/Makefile b/drivers/input/misc/vl53l0x/Makefile
new file mode 100644
index 0000000..af00414
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the vl53L0X drivers.
+#
+
+# Each configuration option enables a list of files.
+FEATURE_USE_CCI := false
+#FEATURE_USE_CCI := true
+
+ifeq ($(FEATURE_USE_CCI), true)
+ccflags-y	+= -Idrivers/input/misc/vl53l0x/inc -DCAMERA_CCI
+else
+ccflags-y	+= -Idrivers/input/misc/vl53l0x/inc -DSTM_TEST
+endif
+
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_STMVL53L0X)			+= stmvl53l0x.o
+stmvl53l0x-objs				:= stmvl53l0x_module.o stmvl53l0x_module-i2c.o stmvl53l0x_module-cci.o src/vl53l0x_api_calibration.o src/vl53l0x_api_core.o src/vl53l0x_api_ranging.o src/vl53l0x_api_strings.o src/vl53l0x_api.o src/vl53l0x_platform.o src/vl53l0x_i2c_platform.o src/vl53l0x_port_i2c.o
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_api.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_api.h
new file mode 100644
index 0000000..a0e0e79
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_api.h
@@ -0,0 +1,1943 @@
+/*
+ *  vl53l0x_api.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef _VL_API_H_
+#define _VL_API_H_
+
+#include "vl53l0x_api_strings.h"
+#include "vl53l0x_def.h"
+#include "vl53l0x_platform.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#ifdef _MSC_VER
+#   ifdef VL_API_EXPORTS
+#       define VL_API  __declspec(dllexport)
+#   else
+#       define VL_API
+#   endif
+#else
+#   define VL_API
+#endif
+
+/** @defgroup VL_cut11_group VL53L0X cut1.1 Function Definition
+ *  @brief    VL53L0X cut1.1 Function Definition
+ *  @{
+ */
+
+/** @defgroup VL_general_group VL53L0X General Functions
+ *  @brief    General functions and definitions
+ *  @{
+ */
+
+/**
+ * @brief Return the VL53L0X PAL Implementation Version
+ *
+ * @note This function doesn't access to the device
+ *
+ * @param   pVersion              Pointer to current PAL Implementation Version
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_GetVersion(struct VL_Version_t *pVersion);
+
+/**
+ * @brief Return the PAL Specification Version used for the current
+ * implementation.
+ *
+ * @note This function doesn't access to the device
+ *
+ * @param   pPalSpecVersion       Pointer to current PAL Specification Version
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_GetPalSpecVersion(
+	struct VL_Version_t *pPalSpecVersion);
+
+/**
+ * @brief Reads the Product Revision for a for given Device
+ * This function can be used to distinguish cut1.0 from cut1.1.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                 Device Handle
+ * @param   pProductRevisionMajor  Pointer to Product Revision Major
+ * for a given Device
+ * @param   pProductRevisionMinor  Pointer to Product Revision Minor
+ * for a given Device
+ * @return  VL_ERROR_NONE      Success
+ * @return  "Other error code"  See ::int8_t
+ */
+VL_API int8_t VL_GetProductRevision(struct vl_data *Dev,
+	uint8_t *pProductRevisionMajor, uint8_t *pProductRevisionMinor);
+
+/**
+ * @brief Reads the Device information for given Device
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                 Device Handle
+ * @param   pVL_DeviceInfo  Pointer to current device info for a given
+ *  Device
+ * @return  VL_ERROR_NONE   Success
+ * @return  "Other error code"  See ::int8_t
+ */
+VL_API int8_t VL_GetDeviceInfo(struct vl_data *Dev,
+	struct VL_DeviceInfo_t *pVL_DeviceInfo);
+
+/**
+ * @brief Read current status of the error register for the selected device
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   pDeviceErrorStatus    Pointer to current error code of the device
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_GetDeviceErrorStatus(struct vl_data *Dev,
+	uint8_t *pDeviceErrorStatus);
+
+/**
+ * @brief Human readable Range Status string for a given RangeStatus
+ *
+ * @note This function doesn't access to the device
+ *
+ * @param   RangeStatus         The RangeStatus code as stored on
+ * @a struct VL_RangingMeasurementData_t
+ * @param   pRangeStatusString  The returned RangeStatus string.
+ * @return  VL_ERROR_NONE   Success
+ * @return  "Other error code"  See ::int8_t
+ */
+VL_API int8_t VL_GetRangeStatusString(uint8_t RangeStatus,
+	char *pRangeStatusString);
+
+/**
+ * @brief Human readable error string for a given Error Code
+ *
+ * @note This function doesn't access to the device
+ *
+ * @param   ErrorCode           The error code as stored on ::uint8_t
+ * @param   pDeviceErrorString  The error string corresponding to the ErrorCode
+ * @return  VL_ERROR_NONE   Success
+ * @return  "Other error code"  See ::int8_t
+ */
+VL_API int8_t VL_GetDeviceErrorString(
+	uint8_t ErrorCode, char *pDeviceErrorString);
+
+/**
+ * @brief Human readable error string for current PAL error status
+ *
+ * @note This function doesn't access to the device
+ *
+ * @param   PalErrorCode       The error code as stored on @a int8_t
+ * @param   pPalErrorString    The error string corresponding to the
+ * PalErrorCode
+ * @return  VL_ERROR_NONE  Success
+ * @return  "Other error code" See ::int8_t
+ */
+VL_API int8_t VL_GetPalErrorString(int8_t PalErrorCode,
+	char *pPalErrorString);
+
+/**
+ * @brief Human readable PAL State string
+ *
+ * @note This function doesn't access to the device
+ *
+ * @param   PalStateCode          The State code as stored on @a uint8_t
+ * @param   pPalStateString       The State string corresponding to the
+ * PalStateCode
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_GetPalStateString(uint8_t PalStateCode,
+	char *pPalStateString);
+
+/**
+ * @brief Reads the internal state of the PAL for a given Device
+ *
+ * @note This function doesn't access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   pPalState             Pointer to current state of the PAL for a
+ * given Device
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_GetPalState(struct vl_data *Dev,
+	uint8_t *pPalState);
+
+/**
+ * @brief Set the power mode for a given Device
+ * The power mode can be Standby or Idle. Different level of both Standby and
+ * Idle can exists.
+ * This function should not be used when device is in Ranging state.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   PowerMode             The value of the power mode to set.
+ * see ::uint8_t
+ *                                Valid values are:
+ *                                VL_POWERMODE_STANDBY_LEVEL1,
+ *                                VL_POWERMODE_IDLE_LEVEL1
+ * @return  VL_ERROR_NONE                  Success
+ * @return  VL_ERROR_MODE_NOT_SUPPORTED    This error occurs when PowerMode
+ * is not in the supported list
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_SetPowerMode(struct vl_data *Dev,
+	uint8_t PowerMode);
+
+/**
+ * @brief Get the power mode for a given Device
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   pPowerMode            Pointer to the current value of the power
+ * mode. see ::uint8_t
+ *                                Valid values are:
+ *                                VL_POWERMODE_STANDBY_LEVEL1,
+ *                                VL_POWERMODE_IDLE_LEVEL1
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_GetPowerMode(struct vl_data *Dev,
+	uint8_t *pPowerMode);
+
+/**
+ * Set or over-hide part to part calibration offset
+ * \sa VL_DataInit()   VL_GetOffsetCalibrationDataMicroMeter()
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                                Device Handle
+ * @param   OffsetCalibrationDataMicroMeter    Offset (microns)
+ * @return  VL_ERROR_NONE                  Success
+ * @return  "Other error code"                 See ::int8_t
+ */
+VL_API int8_t VL_SetOffsetCalibrationDataMicroMeter(
+	struct vl_data *Dev, int32_t OffsetCalibrationDataMicroMeter);
+
+/**
+ * @brief Get part to part calibration offset
+ *
+ * @par Function Description
+ * Should only be used after a successful call to @a VL_DataInit to backup
+ * device NVM value
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                                Device Handle
+ * @param   pOffsetCalibrationDataMicroMeter   Return part to part
+ * calibration offset from device (microns)
+ * @return  VL_ERROR_NONE                  Success
+ * @return  "Other error code"                 See ::int8_t
+ */
+VL_API int8_t VL_GetOffsetCalibrationDataMicroMeter(
+	struct vl_data *Dev, int32_t *pOffsetCalibrationDataMicroMeter);
+
+/**
+ * Set the linearity corrective gain
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                                Device Handle
+ * @param   LinearityCorrectiveGain            Linearity corrective
+ * gain in x1000
+ * if value is 1000 then no modification is applied.
+ * @return  VL_ERROR_NONE                  Success
+ * @return  "Other error code"                 See ::int8_t
+ */
+VL_API int8_t VL_SetLinearityCorrectiveGain(struct vl_data *Dev,
+	int16_t LinearityCorrectiveGain);
+
+/**
+ * @brief Get the linearity corrective gain
+ *
+ * @par Function Description
+ * Should only be used after a successful call to @a VL_DataInit to backup
+ * device NVM value
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                                Device Handle
+ * @param   pLinearityCorrectiveGain           Pointer to the linearity
+ * corrective gain in x1000
+ * if value is 1000 then no modification is applied.
+ * @return  VL_ERROR_NONE                  Success
+ * @return  "Other error code"                 See ::int8_t
+ */
+VL_API int8_t VL_GetLinearityCorrectiveGain(struct vl_data *Dev,
+	uint16_t *pLinearityCorrectiveGain);
+
+/**
+ * Set Group parameter Hold state
+ *
+ * @par Function Description
+ * Set or remove device internal group parameter hold
+ *
+ * @note This function is not Implemented
+ *
+ * @param   Dev      Device Handle
+ * @param   GroupParamHold   Group parameter Hold state to be set (on/off)
+ * @return  VL_ERROR_NOT_IMPLEMENTED        Not implemented
+ */
+VL_API int8_t VL_SetGroupParamHold(struct vl_data *Dev,
+	uint8_t GroupParamHold);
+
+/**
+ * @brief Get the maximal distance for actual setup
+ * @par Function Description
+ * Device must be initialized through @a VL_SetParameters() prior calling
+ * this function.
+ *
+ * Any range value more than the value returned is to be considered as
+ * "no target detected" or
+ * "no target in detectable range"\n
+ * @warning The maximal distance depends on the setup
+ *
+ * @note This function is not Implemented
+ *
+ * @param   Dev      Device Handle
+ * @param   pUpperLimitMilliMeter   The maximal range limit for actual setup
+ * (in millimeter)
+ * @return  VL_ERROR_NOT_IMPLEMENTED        Not implemented
+ */
+VL_API int8_t VL_GetUpperLimitMilliMeter(struct vl_data *Dev,
+	uint16_t *pUpperLimitMilliMeter);
+
+
+/**
+ * @brief Get the Total Signal Rate
+ * @par Function Description
+ * This function will return the Total Signal Rate after a good ranging is done.
+ *
+ * @note This function access to Device
+ *
+ * @param   Dev      Device Handle
+ * @param   pTotalSignalRate   Total Signal Rate value in Mega count per second
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_GetTotalSignalRate(struct vl_data *Dev,
+	unsigned int *pTotalSignalRate);
+
+/** @} VL_general_group */
+
+/** @defgroup VL_init_group VL53L0X Init Functions
+ *  @brief    VL53L0X Init Functions
+ *  @{
+ */
+
+/**
+ * @brief Set new device address
+ *
+ * After completion the device will answer to the new address programmed.
+ * This function should be called when several devices are used in parallel
+ * before start programming the sensor.
+ * When a single device us used, there is no need to call this function.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   DeviceAddress         The new Device address
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_SetDeviceAddress(struct vl_data *Dev,
+	uint8_t DeviceAddress);
+
+/**
+ *
+ * @brief One time device initialization
+ *
+ * To be called once and only once after device is brought out of reset
+ * (Chip enable) and booted see @a VL_WaitDeviceBooted()
+ *
+ * @par Function Description
+ * When not used after a fresh device "power up" or reset, it may return
+ * @a #VL_ERROR_CALIBRATION_WARNING meaning wrong calibration data
+ * may have been fetched from device that can result in ranging offset error\n
+ * If application cannot execute device reset or need to run VL_DataInit
+ * multiple time then it  must ensure proper offset calibration saving and
+ * restore on its own by using @a VL_GetOffsetCalibrationData() on first
+ * power up and then @a VL_SetOffsetCalibrationData() in all subsequent
+ * init.
+ * This function will change the uint8_t from VL_STATE_POWERDOWN to
+ * VL_STATE_WAIT_STATICINIT.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_DataInit(struct vl_data *Dev);
+
+/**
+ * @brief Set the tuning settings pointer
+ *
+ * This function is used to specify the Tuning settings buffer to be used
+ * for a given device. The buffer contains all the necessary data to permit
+ * the API to write tuning settings.
+ * This function permit to force the usage of either external or internal
+ * tuning settings.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                             Device Handle
+ * @param   pTuningSettingBuffer            Pointer to tuning settings buffer.
+ * @param   UseInternalTuningSettings       Use internal tuning settings value.
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_SetTuningSettingBuffer(struct vl_data *Dev,
+	uint8_t *pTuningSettingBuffer, uint8_t UseInternalTuningSettings);
+
+/**
+ * @brief Get the tuning settings pointer and the internal external switch
+ * value.
+ *
+ * This function is used to get the Tuning settings buffer pointer and the
+ * value.
+ * of the switch to select either external or internal tuning settings.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                        Device Handle
+ * @param   ppTuningSettingBuffer      Pointer to tuning settings buffer.
+ * @param   pUseInternalTuningSettings Pointer to store Use internal tuning
+ *                                     settings value.
+ * @return  VL_ERROR_NONE          Success
+ * @return  "Other error code"         See ::int8_t
+ */
+VL_API int8_t VL_GetTuningSettingBuffer(struct vl_data *Dev,
+	uint8_t **ppTuningSettingBuffer, uint8_t *pUseInternalTuningSettings);
+
+/**
+ * @brief Do basic device init (and eventually patch loading)
+ * This function will change the uint8_t from
+ * VL_STATE_WAIT_STATICINIT to VL_STATE_IDLE.
+ * In this stage all default setting will be applied.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_StaticInit(struct vl_data *Dev);
+
+/**
+ * @brief Wait for device booted after chip enable (hardware standby)
+ * This function can be run only when uint8_t is VL_STATE_POWERDOWN.
+ *
+ * @note This function is not Implemented
+ *
+ * @param   Dev      Device Handle
+ * @return  VL_ERROR_NOT_IMPLEMENTED Not implemented
+ *
+ */
+VL_API int8_t VL_WaitDeviceBooted(struct vl_data *Dev);
+
+/**
+ * @brief Do an hard reset or soft reset (depending on implementation) of the
+ * device \nAfter call of this function, device must be in same state as right
+ * after a power-up sequence.This function will change the uint8_t to
+ * VL_STATE_POWERDOWN.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_ResetDevice(struct vl_data *Dev);
+
+/** @} VL_init_group */
+
+/** @defgroup VL_parameters_group VL53L0X Parameters Functions
+ *  @brief    Functions used to prepare and setup the device
+ *  @{
+ */
+
+/**
+ * @brief  Prepare device for operation
+ * @par Function Description
+ * Update device with provided parameters
+ * @li Then start ranging operation.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   pDeviceParameters     Pointer to store current device parameters.
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_SetDeviceParameters(struct vl_data *Dev,
+	const struct VL_DeviceParameters_t *pDeviceParameters);
+
+/**
+ * @brief  Retrieve current device parameters
+ * @par Function Description
+ * Get actual parameters of the device
+ * @li Then start ranging operation.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   pDeviceParameters     Pointer to store current device parameters.
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_GetDeviceParameters(struct vl_data *Dev,
+	struct VL_DeviceParameters_t *pDeviceParameters);
+
+/**
+ * @brief  Set a new device mode
+ * @par Function Description
+ * Set device to a new mode (ranging, histogram ...)
+ *
+ * @note This function doesn't Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   DeviceMode            New device mode to apply
+ *                                Valid values are:
+ *                                VL_DEVICEMODE_SINGLE_RANGING
+ *                                VL_DEVICEMODE_CONTINUOUS_RANGING
+ *                                VL_DEVICEMODE_CONTINUOUS_TIMED_RANGING
+ *                                VL_DEVICEMODE_SINGLE_HISTOGRAM
+ *                                VL_HISTOGRAMMODE_REFERENCE_ONLY
+ *                                VL_HISTOGRAMMODE_RETURN_ONLY
+ *                                VL_HISTOGRAMMODE_BOTH
+ *
+ *
+ * @return  VL_ERROR_NONE               Success
+ * @return  VL_ERROR_MODE_NOT_SUPPORTED This error occurs when DeviceMode is
+ *                                          not in the supported list
+ */
+VL_API int8_t VL_SetDeviceMode(struct vl_data *Dev,
+	uint8_t DeviceMode);
+
+/**
+ * @brief  Get current new device mode
+ * @par Function Description
+ * Get actual mode of the device(ranging, histogram ...)
+ *
+ * @note This function doesn't Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   pDeviceMode           Pointer to current apply mode value
+ *                                Valid values are:
+ *                                VL_DEVICEMODE_SINGLE_RANGING
+ *                                VL_DEVICEMODE_CONTINUOUS_RANGING
+ *                                VL_DEVICEMODE_CONTINUOUS_TIMED_RANGING
+ *                                VL_DEVICEMODE_SINGLE_HISTOGRAM
+ *                                VL_HISTOGRAMMODE_REFERENCE_ONLY
+ *                                VL_HISTOGRAMMODE_RETURN_ONLY
+ *                                VL_HISTOGRAMMODE_BOTH
+ *
+ * @return  VL_ERROR_NONE                   Success
+ * @return  VL_ERROR_MODE_NOT_SUPPORTED     This error occurs when
+ * DeviceMode is not in the supported list
+ */
+VL_API int8_t VL_GetDeviceMode(struct vl_data *Dev,
+	uint8_t *pDeviceMode);
+
+/**
+ * @brief  Sets the resolution of range measurements.
+ * @par Function Description
+ * Set resolution of range measurements to either 0.25mm if
+ * fraction enabled or 1mm if not enabled.
+ *
+ * @note This function Accesses the device
+ *
+ * @param   Dev               Device Handle
+ * @param   Enable            Enable high resolution
+ *
+ * @return  VL_ERROR_NONE               Success
+ * @return  "Other error code"              See ::int8_t
+ */
+VL_API int8_t VL_SetRangeFractionEnable(struct vl_data *Dev,
+	uint8_t Enable);
+
+/**
+ * @brief  Gets the fraction enable parameter indicating the resolution of
+ * range measurements.
+ *
+ * @par Function Description
+ * Gets the fraction enable state, which translates to the resolution of
+ * range measurements as follows :Enabled:=0.25mm resolution,
+ * Not Enabled:=1mm resolution.
+ *
+ * @note This function Accesses the device
+ *
+ * @param   Dev       Device Handle
+ * @param   pEnable   Output Parameter reporting the fraction enable state.
+ *
+ * @return  VL_ERROR_NONE                   Success
+ * @return  "Other error code"                  See ::int8_t
+ */
+VL_API int8_t VL_GetFractionEnable(struct vl_data *Dev,
+	uint8_t *pEnable);
+
+/**
+ * @brief  Set a new Histogram mode
+ * @par Function Description
+ * Set device to a new Histogram mode
+ *
+ * @note This function doesn't Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   HistogramMode         New device mode to apply
+ *                                Valid values are:
+ *                                VL_HISTOGRAMMODE_DISABLED
+ *                                struct vl_data *ICEMODE_SINGLE_HISTOGRAM
+ *                                VL_HISTOGRAMMODE_REFERENCE_ONLY
+ *                                VL_HISTOGRAMMODE_RETURN_ONLY
+ *                                VL_HISTOGRAMMODE_BOTH
+ *
+ * @return  VL_ERROR_NONE                   Success
+ * @return  VL_ERROR_MODE_NOT_SUPPORTED     This error occurs when
+ * HistogramMode is not in the supported list
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_SetHistogramMode(struct vl_data *Dev,
+	uint8_t HistogramMode);
+
+/**
+ * @brief  Get current new device mode
+ * @par Function Description
+ * Get current Histogram mode of a Device
+ *
+ * @note This function doesn't Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   pHistogramMode        Pointer to current Histogram Mode value
+ *                                Valid values are:
+ *                                VL_HISTOGRAMMODE_DISABLED
+ *                                struct vl_data *ICEMODE_SINGLE_HISTOGRAM
+ *                                VL_HISTOGRAMMODE_REFERENCE_ONLY
+ *                                VL_HISTOGRAMMODE_RETURN_ONLY
+ *                                VL_HISTOGRAMMODE_BOTH
+ * @return  VL_ERROR_NONE     Success
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_GetHistogramMode(struct vl_data *Dev,
+	uint8_t *pHistogramMode);
+
+/**
+ * @brief Set Ranging Timing Budget in microseconds
+ *
+ * @par Function Description
+ * Defines the maximum time allowed by the user to the device to run a
+ * full ranging sequence for the current mode (ranging, histogram, ASL ...)
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                                Device Handle
+ * @param MeasurementTimingBudgetMicroSeconds  Max measurement time in
+ * microseconds.
+ *                                   Valid values are:
+ *                                   >= 17000 microsecs when wraparound enabled
+ *                                   >= 12000 microsecs when wraparound disabled
+ * @return  VL_ERROR_NONE             Success
+ * @return  VL_ERROR_INVALID_PARAMS   This error is returned if
+ MeasurementTimingBudgetMicroSeconds out of range
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_SetMeasurementTimingBudgetMicroSeconds(
+	struct vl_data *Dev, uint32_t MeasurementTimingBudgetMicroSeconds);
+
+/**
+ * @brief Get Ranging Timing Budget in microseconds
+ *
+ * @par Function Description
+ * Returns the programmed the maximum time allowed by the user to the
+ * device to run a full ranging sequence for the current mode
+ * (ranging, histogram, ASL ...)
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                                    Device Handle
+ * @param   pMeasurementTimingBudgetMicroSeconds   Max measurement time in
+ * microseconds.
+ *                                   Valid values are:
+ *                                   >= 17000 microsecs when wraparound enabled
+ *                                   >= 12000 microsecs when wraparound disabled
+ * @return  VL_ERROR_NONE                      Success
+ * @return  "Other error code"                     See ::int8_t
+ */
+VL_API int8_t VL_GetMeasurementTimingBudgetMicroSeconds(
+	struct vl_data *Dev, uint32_t *pMeasurementTimingBudgetMicroSeconds);
+
+/**
+ * @brief Gets the VCSEL pulse period.
+ *
+ * @par Function Description
+ * This function retrieves the VCSEL pulse period for the given period type.
+ *
+ * @note This function Accesses the device
+ *
+ * @param   Dev                      Device Handle
+ * @param   VcselPeriodType          VCSEL period identifier (pre-range|final).
+ * @param   pVCSELPulsePeriod        Pointer to VCSEL period value.
+ * @return  VL_ERROR_NONE        Success
+ * @return  VL_ERROR_INVALID_PARAMS  Error VcselPeriodType parameter not
+ *                                       supported.
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_GetVcselPulsePeriod(struct vl_data *Dev,
+	uint8_t VcselPeriodType, uint8_t *pVCSELPulsePeriod);
+
+/**
+ * @brief Sets the VCSEL pulse period.
+ *
+ * @par Function Description
+ * This function retrieves the VCSEL pulse period for the given period type.
+ *
+ * @note This function Accesses the device
+ *
+ * @param   Dev                       Device Handle
+ * @param   VcselPeriodType	      VCSEL period identifier (pre-range|final).
+ * @param   VCSELPulsePeriod          VCSEL period value
+ * @return  VL_ERROR_NONE            Success
+ * @return  VL_ERROR_INVALID_PARAMS  Error VcselPeriodType parameter not
+ *                                       supported.
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_SetVcselPulsePeriod(struct vl_data *Dev,
+	uint8_t VcselPeriodType, uint8_t VCSELPulsePeriod);
+
+/**
+ * @brief Sets the (on/off) state of a requested sequence step.
+ *
+ * @par Function Description
+ * This function enables/disables a requested sequence step.
+ *
+ * @note This function Accesses the device
+ *
+ * @param   Dev                          Device Handle
+ * @param   SequenceStepId	         Sequence step identifier.
+ * @param   SequenceStepEnabled          Demanded state {0=Off,1=On}
+ *                                       is enabled.
+ * @return  VL_ERROR_NONE            Success
+ * @return  VL_ERROR_INVALID_PARAMS  Error SequenceStepId parameter not
+ *                                       supported.
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_SetSequenceStepEnable(struct vl_data *Dev,
+	uint8_t SequenceStepId, uint8_t SequenceStepEnabled);
+
+/**
+ * @brief Gets the (on/off) state of a requested sequence step.
+ *
+ * @par Function Description
+ * This function retrieves the state of a requested sequence step, i.e. on/off.
+ *
+ * @note This function Accesses the device
+ *
+ * @param   Dev                    Device Handle
+ * @param   SequenceStepId         Sequence step identifier.
+ * @param   pSequenceStepEnabled   Out parameter reporting if the sequence step
+ *                                 is enabled {0=Off,1=On}.
+ * @return  VL_ERROR_NONE            Success
+ * @return  VL_ERROR_INVALID_PARAMS  Error SequenceStepId parameter not
+ *                                       supported.
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_GetSequenceStepEnable(struct vl_data *Dev,
+	uint8_t SequenceStepId, uint8_t *pSequenceStepEnabled);
+
+/**
+ * @brief Gets the (on/off) state of all sequence steps.
+ *
+ * @par Function Description
+ * This function retrieves the state of all sequence step in the scheduler.
+ *
+ * @note This function Accesses the device
+ *
+ * @param   Dev                          Device Handle
+ * @param   pSchedulerSequenceSteps      Pointer to struct containing result.
+ * @return  VL_ERROR_NONE            Success
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_GetSequenceStepEnables(struct vl_data *Dev,
+	struct VL_SchedulerSequenceSteps_t *pSchedulerSequenceSteps);
+
+/**
+ * @brief Sets the timeout of a requested sequence step.
+ *
+ * @par Function Description
+ * This function sets the timeout of a requested sequence step.
+ *
+ * @note This function Accesses the device
+ *
+ * @param   Dev                          Device Handle
+ * @param   SequenceStepId               Sequence step identifier.
+ * @param   TimeOutMilliSecs             Demanded timeout
+ * @return  VL_ERROR_NONE            Success
+ * @return  VL_ERROR_INVALID_PARAMS  Error SequenceStepId parameter not
+ *                                       supported.
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_SetSequenceStepTimeout(struct vl_data *Dev,
+	uint8_t SequenceStepId, unsigned int TimeOutMilliSecs);
+
+/**
+ * @brief Gets the timeout of a requested sequence step.
+ *
+ * @par Function Description
+ * This function retrieves the timeout of a requested sequence step.
+ *
+ * @note This function Accesses the device
+ *
+ * @param   Dev                          Device Handle
+ * @param   SequenceStepId               Sequence step identifier.
+ * @param   pTimeOutMilliSecs            Timeout value.
+ * @return  VL_ERROR_NONE            Success
+ * @return  VL_ERROR_INVALID_PARAMS  Error SequenceStepId parameter not
+ *                                       supported.
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_GetSequenceStepTimeout(struct vl_data *Dev,
+	uint8_t SequenceStepId,
+	unsigned int *pTimeOutMilliSecs);
+
+/**
+ * @brief Gets number of sequence steps managed by the API.
+ *
+ * @par Function Description
+ * This function retrieves the number of sequence steps currently managed
+ * by the API
+ *
+ * @note This function Accesses the device
+ *
+ * @param   Dev                          Device Handle
+ * @param   pNumberOfSequenceSteps       Out parameter reporting the number of
+ *                                       sequence steps.
+ * @return  VL_ERROR_NONE            Success
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_GetNumberOfSequenceSteps(struct vl_data *Dev,
+	uint8_t *pNumberOfSequenceSteps);
+
+/**
+ * @brief Gets the name of a given sequence step.
+ *
+ * @par Function Description
+ * This function retrieves the name of sequence steps corresponding to
+ * SequenceStepId.
+ *
+ * @note This function doesn't Accesses the device
+ *
+ * @param   SequenceStepId               Sequence step identifier.
+ * @param   pSequenceStepsString         Pointer to Info string
+ *
+ * @return  VL_ERROR_NONE            Success
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_GetSequenceStepsInfo(
+	uint8_t SequenceStepId, char *pSequenceStepsString);
+
+/**
+ * Program continuous mode Inter-Measurement period in milliseconds
+ *
+ * @par Function Description
+ * When trying to set too short time return  INVALID_PARAMS minimal value
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                                  Device Handle
+ * @param   InterMeasurementPeriodMilliSeconds   Inter-Measurement Period in ms.
+ * @return  VL_ERROR_NONE                    Success
+ * @return  "Other error code"                   See ::int8_t
+ */
+VL_API int8_t VL_SetInterMeasurementPeriodMilliSeconds(
+	struct vl_data *Dev, uint32_t InterMeasurementPeriodMilliSeconds);
+
+/**
+ * Get continuous mode Inter-Measurement period in milliseconds
+ *
+ * @par Function Description
+ * When trying to set too short time return  INVALID_PARAMS minimal value
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                                  Device Handle
+ * @param   pInterMeasurementPeriodMilliSeconds  Pointer to programmed
+ *  Inter-Measurement Period in milliseconds.
+ * @return  VL_ERROR_NONE                    Success
+ * @return  "Other error code"                   See ::int8_t
+ */
+VL_API int8_t VL_GetInterMeasurementPeriodMilliSeconds(
+	struct vl_data *Dev, uint32_t *pInterMeasurementPeriodMilliSeconds);
+
+/**
+ * @brief Enable/Disable Cross talk compensation feature
+ *
+ * @note This function is not Implemented.
+ * Enable/Disable Cross Talk by set to zero the Cross Talk value
+ * by using @a VL_SetXTalkCompensationRateMegaCps().
+ *
+ * @param   Dev                       Device Handle
+ * @param   XTalkCompensationEnable   Cross talk compensation
+ *  to be set 0=disabled else = enabled
+ * @return  VL_ERROR_NOT_IMPLEMENTED   Not implemented
+ */
+VL_API int8_t VL_SetXTalkCompensationEnable(struct vl_data *Dev,
+	uint8_t XTalkCompensationEnable);
+
+/**
+ * @brief Get Cross talk compensation rate
+ *
+ * @note This function is not Implemented.
+ * Enable/Disable Cross Talk by set to zero the Cross Talk value by
+ * using @a VL_SetXTalkCompensationRateMegaCps().
+ *
+ * @param   Dev                        Device Handle
+ * @param   pXTalkCompensationEnable   Pointer to the Cross talk compensation
+ *  state 0=disabled or 1 = enabled
+ * @return  VL_ERROR_NOT_IMPLEMENTED   Not implemented
+ */
+VL_API int8_t VL_GetXTalkCompensationEnable(struct vl_data *Dev,
+	uint8_t *pXTalkCompensationEnable);
+
+/**
+ * @brief Set Cross talk compensation rate
+ *
+ * @par Function Description
+ * Set Cross talk compensation rate.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                            Device Handle
+ * @param   XTalkCompensationRateMegaCps   Compensation rate in
+ *  Mega counts per second (16.16 fix point) see datasheet for details
+ * @return  VL_ERROR_NONE              Success
+ * @return  "Other error code"             See ::int8_t
+ */
+VL_API int8_t VL_SetXTalkCompensationRateMegaCps(
+	struct vl_data *Dev, unsigned int XTalkCompensationRateMegaCps);
+
+/**
+ * @brief Get Cross talk compensation rate
+ *
+ * @par Function Description
+ * Get Cross talk compensation rate.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                            Device Handle
+ * @param   pXTalkCompensationRateMegaCps  Pointer to Compensation rate
+ in Mega counts per second (16.16 fix point) see datasheet for details
+ * @return  VL_ERROR_NONE              Success
+ * @return  "Other error code"             See ::int8_t
+ */
+VL_API int8_t VL_GetXTalkCompensationRateMegaCps(
+	struct vl_data *Dev, unsigned int *pXTalkCompensationRateMegaCps);
+
+/**
+ * @brief Set Reference Calibration Parameters
+ *
+ * @par Function Description
+ * Set Reference Calibration Parameters.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                            Device Handle
+ * @param   VhvSettings                    Parameter for VHV
+ * @param   PhaseCal                       Parameter for PhaseCal
+ * @return  VL_ERROR_NONE              Success
+ * @return  "Other error code"             See ::int8_t
+ */
+VL_API int8_t VL_SetRefCalibration(struct vl_data *Dev,
+	uint8_t VhvSettings, uint8_t PhaseCal);
+
+/**
+ * @brief Get Reference Calibration Parameters
+ *
+ * @par Function Description
+ * Get Reference Calibration Parameters.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                            Device Handle
+ * @param   pVhvSettings                   Pointer to VHV parameter
+ * @param   pPhaseCal                      Pointer to PhaseCal Parameter
+ * @return  VL_ERROR_NONE              Success
+ * @return  "Other error code"             See ::int8_t
+ */
+VL_API int8_t VL_GetRefCalibration(struct vl_data *Dev,
+	uint8_t *pVhvSettings, uint8_t *pPhaseCal);
+
+/**
+ * @brief  Get the number of the check limit managed by a given Device
+ *
+ * @par Function Description
+ * This function give the number of the check limit managed by the Device
+ *
+ * @note This function doesn't Access to the device
+ *
+ * @param   pNumberOfLimitCheck           Pointer to the number of check limit.
+ * @return  VL_ERROR_NONE             Success
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_GetNumberOfLimitCheck(
+	uint16_t *pNumberOfLimitCheck);
+
+/**
+ * @brief  Return a description string for a given limit check number
+ *
+ * @par Function Description
+ * This function returns a description string for a given limit check number.
+ * The limit check is identified with the LimitCheckId.
+ *
+ * @note This function doesn't Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   LimitCheckId                  Limit Check ID
+ (0<= LimitCheckId < VL_GetNumberOfLimitCheck() ).
+ * @param   pLimitCheckString             Pointer to the
+ description string of the given check limit.
+ * @return  VL_ERROR_NONE             Success
+ * @return  VL_ERROR_INVALID_PARAMS   This error is
+ returned when LimitCheckId value is out of range.
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_GetLimitCheckInfo(struct vl_data *Dev,
+	uint16_t LimitCheckId, char *pLimitCheckString);
+
+/**
+ * @brief  Return a the Status of the specified check limit
+ *
+ * @par Function Description
+ * This function returns the Status of the specified check limit.
+ * The value indicate if the check is fail or not.
+ * The limit check is identified with the LimitCheckId.
+ *
+ * @note This function doesn't Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   LimitCheckId                  Limit Check ID
+ (0<= LimitCheckId < VL_GetNumberOfLimitCheck() ).
+ * @param   pLimitCheckStatus             Pointer to the
+ Limit Check Status of the given check limit.
+ * LimitCheckStatus :
+ * 0 the check is not fail
+ * 1 the check if fail or not enabled
+ *
+ * @return  VL_ERROR_NONE             Success
+ * @return  VL_ERROR_INVALID_PARAMS   This error is
+ returned when LimitCheckId value is out of range.
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_GetLimitCheckStatus(struct vl_data *Dev,
+	uint16_t LimitCheckId, uint8_t *pLimitCheckStatus);
+
+/**
+ * @brief  Enable/Disable a specific limit check
+ *
+ * @par Function Description
+ * This function Enable/Disable a specific limit check.
+ * The limit check is identified with the LimitCheckId.
+ *
+ * @note This function doesn't Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   LimitCheckId                  Limit Check ID
+ *  (0<= LimitCheckId < VL_GetNumberOfLimitCheck() ).
+ * @param   LimitCheckEnable              if 1 the check limit
+ *  corresponding to LimitCheckId is Enabled
+ *                                        if 0 the check limit
+ *  corresponding to LimitCheckId is disabled
+ * @return  VL_ERROR_NONE             Success
+ * @return  VL_ERROR_INVALID_PARAMS   This error is returned
+ *  when LimitCheckId value is out of range.
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_SetLimitCheckEnable(struct vl_data *Dev,
+	uint16_t LimitCheckId, uint8_t LimitCheckEnable);
+
+/**
+ * @brief  Get specific limit check enable state
+ *
+ * @par Function Description
+ * This function get the enable state of a specific limit check.
+ * The limit check is identified with the LimitCheckId.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   LimitCheckId                  Limit Check ID
+ *  (0<= LimitCheckId < VL_GetNumberOfLimitCheck() ).
+ * @param   pLimitCheckEnable             Pointer to the check limit enable
+ * value.
+ *  if 1 the check limit
+ *        corresponding to LimitCheckId is Enabled
+ *  if 0 the check limit
+ *        corresponding to LimitCheckId is disabled
+ * @return  VL_ERROR_NONE             Success
+ * @return  VL_ERROR_INVALID_PARAMS   This error is returned
+ *  when LimitCheckId value is out of range.
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_GetLimitCheckEnable(struct vl_data *Dev,
+	uint16_t LimitCheckId, uint8_t *pLimitCheckEnable);
+
+/**
+ * @brief  Set a specific limit check value
+ *
+ * @par Function Description
+ * This function set a specific limit check value.
+ * The limit check is identified with the LimitCheckId.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   LimitCheckId                  Limit Check ID
+ *  (0<= LimitCheckId < VL_GetNumberOfLimitCheck() ).
+ * @param   LimitCheckValue               Limit check Value for a given
+ * LimitCheckId
+ * @return  VL_ERROR_NONE             Success
+ * @return  VL_ERROR_INVALID_PARAMS   This error is returned when either
+ *  LimitCheckId or LimitCheckValue value is out of range.
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_SetLimitCheckValue(struct vl_data *Dev,
+	uint16_t LimitCheckId, unsigned int LimitCheckValue);
+
+/**
+ * @brief  Get a specific limit check value
+ *
+ * @par Function Description
+ * This function get a specific limit check value from device then it updates
+ * internal values and check enables.
+ * The limit check is identified with the LimitCheckId.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   LimitCheckId                  Limit Check ID
+ *  (0<= LimitCheckId < VL_GetNumberOfLimitCheck() ).
+ * @param   pLimitCheckValue              Pointer to Limit
+ *  check Value for a given LimitCheckId.
+ * @return  VL_ERROR_NONE             Success
+ * @return  VL_ERROR_INVALID_PARAMS   This error is returned
+ *  when LimitCheckId value is out of range.
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_GetLimitCheckValue(struct vl_data *Dev,
+	uint16_t LimitCheckId, unsigned int *pLimitCheckValue);
+
+/**
+ * @brief  Get the current value of the signal used for the limit check
+ *
+ * @par Function Description
+ * This function get a the current value of the signal used for the limit check.
+ * To obtain the latest value you should run a ranging before.
+ * The value reported is linked to the limit check identified with the
+ * LimitCheckId.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   LimitCheckId                  Limit Check ID
+ *  (0<= LimitCheckId < VL_GetNumberOfLimitCheck() ).
+ * @param   pLimitCheckCurrent            Pointer to current Value for a
+ * given LimitCheckId.
+ * @return  VL_ERROR_NONE             Success
+ * @return  VL_ERROR_INVALID_PARAMS   This error is returned when
+ * LimitCheckId value is out of range.
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_GetLimitCheckCurrent(struct vl_data *Dev,
+	uint16_t LimitCheckId, unsigned int *pLimitCheckCurrent);
+
+/**
+ * @brief  Enable (or disable) Wrap around Check
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                    Device Handle
+ * @param   WrapAroundCheckEnable  Wrap around Check to be set
+ *                                 0=disabled, other = enabled
+ * @return  VL_ERROR_NONE      Success
+ * @return  "Other error code"     See ::int8_t
+ */
+VL_API int8_t VL_SetWrapAroundCheckEnable(struct vl_data *Dev,
+		uint8_t WrapAroundCheckEnable);
+
+/**
+ * @brief  Get setup of Wrap around Check
+ *
+ * @par Function Description
+ * This function get the wrapAround check enable parameters
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                     Device Handle
+ * @param   pWrapAroundCheckEnable  Pointer to the Wrap around Check state
+ *                                  0=disabled or 1 = enabled
+ * @return  VL_ERROR_NONE       Success
+ * @return  "Other error code"      See ::int8_t
+ */
+VL_API int8_t VL_GetWrapAroundCheckEnable(struct vl_data *Dev,
+		uint8_t *pWrapAroundCheckEnable);
+
+/**
+ * @brief   Set Dmax Calibration Parameters for a given device
+ * When one of the parameter is zero, this function will get parameter
+ * from NVM.
+ * @note This function doesn't Access to the device
+ *
+ * @param   Dev                    Device Handle
+ * @param   RangeMilliMeter        Calibration Distance
+ * @param   SignalRateRtnMegaCps   Signal rate return read at CalDistance
+ * @return  VL_ERROR_NONE      Success
+ * @return  "Other error code"     See ::int8_t
+ */
+VL_API int8_t VL_SetDmaxCalParameters(struct vl_data *Dev,
+		uint16_t RangeMilliMeter, unsigned int SignalRateRtnMegaCps);
+
+/**
+ * @brief  Get Dmax Calibration Parameters for a given device
+ *
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                     Device Handle
+ * @param   pRangeMilliMeter        Pointer to Calibration Distance
+ * @param   pSignalRateRtnMegaCps   Pointer to Signal rate return
+ * @return  VL_ERROR_NONE       Success
+ * @return  "Other error code"      See ::int8_t
+ */
+VL_API int8_t VL_GetDmaxCalParameters(struct vl_data *Dev,
+	uint16_t *pRangeMilliMeter, unsigned int *pSignalRateRtnMegaCps);
+
+/** @} VL_parameters_group */
+
+/** @defgroup VL_measurement_group VL53L0X Measurement Functions
+ *  @brief    Functions used for the measurements
+ *  @{
+ */
+
+/**
+ * @brief Single shot measurement.
+ *
+ * @par Function Description
+ * Perform simple measurement sequence (Start measure, Wait measure to end,
+ * and returns when measurement is done).
+ * Once function returns, user can get valid data by calling
+ * VL_GetRangingMeasurement or VL_GetHistogramMeasurement
+ * depending on defined measurement mode
+ * User should Clear the interrupt in case this are enabled by using the
+ * function VL_ClearInterruptMask().
+ *
+ * @warning This function is a blocking function
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                  Device Handle
+ * @return  VL_ERROR_NONE    Success
+ * @return  "Other error code"   See ::int8_t
+ */
+VL_API int8_t VL_PerformSingleMeasurement(struct vl_data *Dev);
+
+/**
+ * @brief Perform Reference Calibration
+ *
+ * @details Perform a reference calibration of the Device.
+ * This function should be run from time to time before doing
+ * a ranging measurement.
+ * This function will launch a special ranging measurement, so
+ * if interrupt are enable an interrupt will be done.
+ * This function will clear the interrupt generated automatically.
+ *
+ * @warning This function is a blocking function
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                  Device Handle
+ * @param   pVhvSettings         Pointer to vhv settings parameter.
+ * @param   pPhaseCal            Pointer to PhaseCal parameter.
+ * @return  VL_ERROR_NONE    Success
+ * @return  "Other error code"   See ::int8_t
+ */
+VL_API int8_t VL_PerformRefCalibration(struct vl_data *Dev,
+	uint8_t *pVhvSettings, uint8_t *pPhaseCal);
+
+/**
+ * @brief Perform XTalk Measurement
+ *
+ * @details Measures the current cross talk from glass in front
+ * of the sensor.
+ * This functions performs a histogram measurement and uses the results
+ * to measure the crosstalk. For the function to be successful, there
+ * must be no target in front of the sensor.
+ *
+ * @warning This function is a blocking function
+ *
+ * @warning This function is not supported when the final range
+ * vcsel clock period is set below 10 PCLKS.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                  Device Handle
+ * @param   TimeoutMs            Histogram measurement duration.
+ * @param   pXtalkPerSpad        Output parameter containing the crosstalk
+ * measurement result, in MCPS/Spad. Format fixpoint 16:16.
+ * @param   pAmbientTooHigh      Output parameter which indicate that
+ * pXtalkPerSpad is not good if the Ambient is too high.
+ * @return  VL_ERROR_NONE    Success
+ * @return  VL_ERROR_INVALID_PARAMS vcsel clock period not supported
+ * for this operation. Must not be less than 10PCLKS.
+ * @return  "Other error code"   See ::int8_t
+ */
+VL_API int8_t VL_PerformXTalkMeasurement(struct vl_data *Dev,
+	uint32_t TimeoutMs, unsigned int *pXtalkPerSpad,
+	uint8_t *pAmbientTooHigh);
+
+/**
+ * @brief Perform XTalk Calibration
+ *
+ * @details Perform a XTalk calibration of the Device.
+ * This function will launch a ranging measurement, if interrupts
+ * are enabled an interrupt will be done.
+ * This function will clear the interrupt generated automatically.
+ * This function will program a new value for the XTalk compensation
+ * and it will enable the cross talk before exit.
+ * This function will disable the VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD.
+ *
+ * @warning This function is a blocking function
+ *
+ * @note This function Access to the device
+ *
+ * @note This function change the device mode to
+ * struct vl_data *ICEMODE_SINGLE_RANGING
+ *
+ * @param   Dev                  Device Handle
+ * @param   XTalkCalDistance     XTalkCalDistance value used for the XTalk
+ * computation.
+ * @param   pXTalkCompensationRateMegaCps  Pointer to new
+ * XTalkCompensation value.
+ * @return  VL_ERROR_NONE    Success
+ * @return  "Other error code"   See ::int8_t
+ */
+VL_API int8_t VL_PerformXTalkCalibration(struct vl_data *Dev,
+	unsigned int XTalkCalDistance,
+	unsigned int *pXTalkCompensationRateMegaCps);
+
+/**
+ * @brief Perform Offset Calibration
+ *
+ * @details Perform a Offset calibration of the Device.
+ * This function will launch a ranging measurement, if interrupts are
+ * enabled an interrupt will be done.
+ * This function will clear the interrupt generated automatically.
+ * This function will program a new value for the Offset calibration value
+ * This function will disable the VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD.
+ *
+ * @warning This function is a blocking function
+ *
+ * @note This function Access to the device
+ *
+ * @note This function does not change the device mode.
+ *
+ * @param   Dev                  Device Handle
+ * @param   CalDistanceMilliMeter     Calibration distance value used for the
+ * offset compensation.
+ * @param   pOffsetMicroMeter  Pointer to new Offset value computed by the
+ * function.
+ *
+ * @return  VL_ERROR_NONE    Success
+ * @return  "Other error code"   See ::int8_t
+ */
+VL_API int8_t VL_PerformOffsetCalibration(struct vl_data *Dev,
+	unsigned int CalDistanceMilliMeter, int32_t *pOffsetMicroMeter);
+
+/**
+ * @brief Start device measurement
+ *
+ * @details Started measurement will depend on device parameters set through
+ * @a VL_SetParameters()
+ * This is a non-blocking function.
+ * This function will change the uint8_t from VL_STATE_IDLE to
+ * VL_STATE_RUNNING.
+ *
+ * @note This function Access to the device
+ *
+
+ * @param   Dev                  Device Handle
+ * @return  VL_ERROR_NONE                  Success
+ * @return  VL_ERROR_MODE_NOT_SUPPORTED    This error occurs when
+ * DeviceMode programmed with @a VL_SetDeviceMode is not in the supported
+ * list:
+ *	Supported mode are:
+ *	struct vl_data *ICEMODE_SINGLE_RANGING,
+ *	struct vl_data *ICEMODE_CONTINUOUS_RANGING,
+ *	struct vl_data *ICEMODE_CONTINUOUS_TIMED_RANGING
+ * @return  VL_ERROR_TIME_OUT    Time out on start measurement
+ * @return  "Other error code"   See ::int8_t
+ */
+VL_API int8_t VL_StartMeasurement(struct vl_data *Dev);
+
+/**
+ * @brief Stop device measurement
+ *
+ * @details Will set the device in standby mode at end of current measurement\n
+ *          Not necessary in single mode as device shall return automatically
+ *          in standby mode at end of measurement.
+ *          This function will change the uint8_t from
+ *          VL_STATE_RUNNING to VL_STATE_IDLE.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                  Device Handle
+ * @return  VL_ERROR_NONE    Success
+ * @return  "Other error code"   See ::int8_t
+ */
+VL_API int8_t VL_StopMeasurement(struct vl_data *Dev);
+
+/**
+ * @brief Return Measurement Data Ready
+ *
+ * @par Function Description
+ * This function indicate that a measurement data is ready.
+ * This function check if interrupt mode is used then check is done accordingly.
+ * If perform function clear the interrupt, this function will not work,
+ * like in case of @a VL_PerformSingleRangingMeasurement().
+ * The previous function is blocking function, VL_GetMeasurementDataReady
+ * is used for non-blocking capture.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                    Device Handle
+ * @param   pMeasurementDataReady  Pointer to Measurement Data Ready.
+ *  0=data not ready, 1 = data ready
+ * @return  VL_ERROR_NONE      Success
+ * @return  "Other error code"     See ::int8_t
+ */
+VL_API int8_t VL_GetMeasurementDataReady(struct vl_data *Dev,
+	uint8_t *pMeasurementDataReady);
+
+/**
+ * @brief Wait for device ready for a new measurement command.
+ * Blocking function.
+ *
+ * @note This function is not Implemented
+ *
+ * @param   Dev      Device Handle
+ * @param   MaxLoop    Max Number of polling loop (timeout).
+ * @return  VL_ERROR_NOT_IMPLEMENTED   Not implemented
+ */
+VL_API int8_t VL_WaitDeviceReadyForNewMeasurement(
+	struct vl_data *Dev, uint32_t MaxLoop);
+
+/**
+ * @brief Retrieve the Reference Signal after a measurements
+ *
+ * @par Function Description
+ * Get Reference Signal from last successful Ranging measurement
+ * This function return a valid value after that you call the
+ * @a VL_GetRangingMeasurementData().
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                      Device Handle
+ * @param   pMeasurementRefSignal    Pointer to the Ref Signal to fill up.
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"       See ::int8_t
+ */
+VL_API int8_t VL_GetMeasurementRefSignal(struct vl_data *Dev,
+	unsigned int *pMeasurementRefSignal);
+
+/**
+ * @brief Retrieve the measurements from device for a given setup
+ *
+ * @par Function Description
+ * Get data from last successful Ranging measurement
+ * @warning USER should take care about  @a VL_GetNumberOfROIZones()
+ * before get data.
+ * PAL will fill a NumberOfROIZones times the corresponding data
+ * structure used in the measurement function.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                      Device Handle
+ * @param   pRangingMeasurementData  Pointer to the data structure to fill up.
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"       See ::int8_t
+ */
+VL_API int8_t VL_GetRangingMeasurementData(struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData);
+
+/**
+ * @brief Retrieve the measurements from device for a given setup
+ *
+ * @par Function Description
+ * Get data from last successful Histogram measurement
+ * @warning USER should take care about  @a VL_GetNumberOfROIZones()
+ * before get data.
+ * PAL will fill a NumberOfROIZones times the corresponding data structure
+ * used in the measurement function.
+ *
+ * @note This function is not Implemented
+ *
+ * @param   Dev                         Device Handle
+ * @param   pHistogramMeasurementData   Pointer to the histogram data structure.
+ * @return  VL_ERROR_NOT_IMPLEMENTED   Not implemented
+ */
+VL_API int8_t VL_GetHistogramMeasurementData(struct vl_data *Dev,
+	struct VL_HistogramMeasurementData_t *pHistogramMeasurementData);
+
+/**
+ * @brief Performs a single ranging measurement and retrieve the ranging
+ * measurement data
+ *
+ * @par Function Description
+ * This function will change the device mode to
+ * struct vl_data *ICEMODE_SINGLE_RANGING with @a VL_SetDeviceMode(),
+ * It performs measurement with @a VL_PerformSingleMeasurement()
+ * It get data from last successful Ranging measurement with
+ * @a VL_GetRangingMeasurementData.
+ * Finally it clear the interrupt with @a VL_ClearInterruptMask().
+ *
+ * @note This function Access to the device
+ *
+ * @note This function change the device mode to
+ * struct vl_data *ICEMODE_SINGLE_RANGING
+ *
+ * @param   Dev                       Device Handle
+ * @param   pRangingMeasurementData   Pointer to the data structure to fill up.
+ * @return  VL_ERROR_NONE         Success
+ * @return  "Other error code"        See ::int8_t
+ */
+VL_API int8_t VL_PerformSingleRangingMeasurement(
+	struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData);
+
+/**
+ * @brief Performs a single histogram measurement and retrieve the histogram
+ * measurement data
+ *   Is equivalent to VL_PerformSingleMeasurement +
+ *   VL_GetHistogramMeasurementData
+ *
+ * @par Function Description
+ * Get data from last successful Ranging measurement.
+ * This function will clear the interrupt in case of these are enabled.
+ *
+ * @note This function is not Implemented
+ *
+ * @param   Dev                        Device Handle
+ * @param   pHistogramMeasurementData  Pointer to the data structure to fill up.
+ * @return  VL_ERROR_NOT_IMPLEMENTED   Not implemented
+ */
+VL_API int8_t VL_PerformSingleHistogramMeasurement(
+	struct vl_data *Dev,
+	struct VL_HistogramMeasurementData_t *pHistogramMeasurementData);
+
+/**
+ * @brief Set the number of ROI Zones to be used for a specific Device
+ *
+ * @par Function Description
+ * Set the number of ROI Zones to be used for a specific Device.
+ * The programmed value should be less than the max number of ROI Zones given
+ * with @a VL_GetMaxNumberOfROIZones().
+ * This version of API manage only one zone.
+ *
+ * @param   Dev                           Device Handle
+ * @param   NumberOfROIZones              Number of ROI Zones to be used for a
+ *  specific Device.
+ * @return  VL_ERROR_NONE             Success
+ * @return  VL_ERROR_INVALID_PARAMS   This error is returned if
+ * NumberOfROIZones != 1
+ */
+VL_API int8_t VL_SetNumberOfROIZones(struct vl_data *Dev,
+	uint8_t NumberOfROIZones);
+
+/**
+ * @brief Get the number of ROI Zones managed by the Device
+ *
+ * @par Function Description
+ * Get number of ROI Zones managed by the Device
+ * USER should take care about  @a VL_GetNumberOfROIZones()
+ * before get data after a perform measurement.
+ * PAL will fill a NumberOfROIZones times the corresponding data
+ * structure used in the measurement function.
+ *
+ * @note This function doesn't Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   pNumberOfROIZones     Pointer to the Number of ROI Zones value.
+ * @return  VL_ERROR_NONE     Success
+ */
+VL_API int8_t VL_GetNumberOfROIZones(struct vl_data *Dev,
+	uint8_t *pNumberOfROIZones);
+
+/**
+ * @brief Get the Maximum number of ROI Zones managed by the Device
+ *
+ * @par Function Description
+ * Get Maximum number of ROI Zones managed by the Device.
+ *
+ * @note This function doesn't Access to the device
+ *
+ * @param   Dev                    Device Handle
+ * @param   pMaxNumberOfROIZones   Pointer to the Maximum Number
+ *  of ROI Zones value.
+ * @return  VL_ERROR_NONE      Success
+ */
+VL_API int8_t VL_GetMaxNumberOfROIZones(struct vl_data *Dev,
+	uint8_t *pMaxNumberOfROIZones);
+
+/** @} VL_measurement_group */
+
+/** @defgroup VL_interrupt_group VL53L0X Interrupt Functions
+ *  @brief    Functions used for interrupt managements
+ *  @{
+ */
+
+/**
+ * @brief Set the configuration of GPIO pin for a given device
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   Pin                   ID of the GPIO Pin
+ * @param   Functionality         Select Pin functionality.
+ *  Refer to ::uint8_t
+ * @param   DeviceMode            Device Mode associated to the Gpio.
+ * @param   Polarity              Set interrupt polarity. Active high
+ *   or active low see ::uint8_t
+ * @return  VL_ERROR_NONE                            Success
+ * @return  VL_ERROR_GPIO_NOT_EXISTING               Only Pin=0 is accepted.
+ * @return  VL_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED    This error occurs
+ * when Functionality programmed is not in the supported list:
+ *                             Supported value are:
+ *                             VL_GPIOFUNCTIONALITY_OFF,
+ *                             VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_LOW,
+ *                             VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_HIGH,
+ VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_OUT,
+ *                               VL_GPIOFUNCTIONALITY_NEW_MEASURE_READY
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_SetGpioConfig(struct vl_data *Dev, uint8_t Pin,
+	uint8_t DeviceMode, uint8_t Functionality,
+	uint8_t Polarity);
+
+/**
+ * @brief Get current configuration for GPIO pin for a given device
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                   Device Handle
+ * @param   Pin                   ID of the GPIO Pin
+ * @param   pDeviceMode           Pointer to Device Mode associated to the Gpio.
+ * @param   pFunctionality        Pointer to Pin functionality.
+ *  Refer to ::uint8_t
+ * @param   pPolarity             Pointer to interrupt polarity.
+ *  Active high or active low see ::uint8_t
+ * @return  VL_ERROR_NONE                            Success
+ * @return  VL_ERROR_GPIO_NOT_EXISTING               Only Pin=0 is accepted.
+ * @return  VL_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED   This error occurs
+ * when Functionality programmed is not in the supported list:
+ *                      Supported value are:
+ *                      VL_GPIOFUNCTIONALITY_OFF,
+ *                      VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_LOW,
+ *                      VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_HIGH,
+ *                      VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_OUT,
+ *                      VL_GPIOFUNCTIONALITY_NEW_MEASURE_READY
+ * @return  "Other error code"    See ::int8_t
+ */
+VL_API int8_t VL_GetGpioConfig(struct vl_data *Dev, uint8_t Pin,
+	uint8_t *pDeviceMode,
+	uint8_t *pFunctionality,
+	uint8_t *pPolarity);
+
+/**
+ * @brief Set low and high Interrupt thresholds for a given mode
+ * (ranging, ALS, ...) for a given device
+ *
+ * @par Function Description
+ * Set low and high Interrupt thresholds for a given mode (ranging, ALS, ...)
+ * for a given device
+ *
+ * @note This function Access to the device
+ *
+ * @note DeviceMode is ignored for the current device
+ *
+ * @param   Dev              Device Handle
+ * @param   DeviceMode       Device Mode for which change thresholds
+ * @param   ThresholdLow     Low threshold (mm, lux ..., depending on the mode)
+ * @param   ThresholdHigh    High threshold (mm, lux ..., depending on the mode)
+ * @return  VL_ERROR_NONE    Success
+ * @return  "Other error code"   See ::int8_t
+ */
+VL_API int8_t VL_SetInterruptThresholds(struct vl_data *Dev,
+	uint8_t DeviceMode, unsigned int ThresholdLow,
+	unsigned int ThresholdHigh);
+
+/**
+ * @brief  Get high and low Interrupt thresholds for a given mode
+ *  (ranging, ALS, ...) for a given device
+ *
+ * @par Function Description
+ * Get high and low Interrupt thresholds for a given mode (ranging, ALS, ...)
+ * for a given device
+ *
+ * @note This function Access to the device
+ *
+ * @note DeviceMode is ignored for the current device
+ *
+ * @param   Dev              Device Handle
+ * @param   DeviceMode       Device Mode from which read thresholds
+ * @param   pThresholdLow    Low threshold (mm, lux ..., depending on the mode)
+ * @param   pThresholdHigh   High threshold (mm, lux ..., depending on the mode)
+ * @return  VL_ERROR_NONE   Success
+ * @return  "Other error code"  See ::int8_t
+ */
+VL_API int8_t VL_GetInterruptThresholds(struct vl_data *Dev,
+	uint8_t DeviceMode, unsigned int *pThresholdLow,
+	unsigned int *pThresholdHigh);
+
+/**
+ * @brief Return device stop completion status
+ *
+ * @par Function Description
+ * Returns stop completiob status.
+ * User shall call this function after a stop command
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                    Device Handle
+ * @param   pStopStatus            Pointer to status variable to update
+ * @return  VL_ERROR_NONE      Success
+ * @return  "Other error code"     See ::int8_t
+ */
+VL_API int8_t VL_GetStopCompletedStatus(struct vl_data *Dev,
+	uint32_t *pStopStatus);
+
+
+/**
+ * @brief Clear given system interrupt condition
+ *
+ * @par Function Description
+ * Clear given interrupt(s).
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                  Device Handle
+ * @param   InterruptMask        Mask of interrupts to clear
+ * @return  VL_ERROR_NONE    Success
+ * @return  VL_ERROR_INTERRUPT_NOT_CLEARED    Cannot clear interrupts
+ *
+ * @return  "Other error code"   See ::int8_t
+ */
+VL_API int8_t VL_ClearInterruptMask(struct vl_data *Dev,
+	uint32_t InterruptMask);
+
+/**
+ * @brief Return device interrupt status
+ *
+ * @par Function Description
+ * Returns currently raised interrupts by the device.
+ * User shall be able to activate/deactivate interrupts through
+ * @a VL_SetGpioConfig()
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                    Device Handle
+ * @param   pInterruptMaskStatus   Pointer to status variable to update
+ * @return  VL_ERROR_NONE      Success
+ * @return  "Other error code"     See ::int8_t
+ */
+VL_API int8_t VL_GetInterruptMaskStatus(struct vl_data *Dev,
+	uint32_t *pInterruptMaskStatus);
+
+/**
+ * @brief Configure ranging interrupt reported to system
+ *
+ * @note This function is not Implemented
+ *
+ * @param   Dev                  Device Handle
+ * @param   InterruptMask         Mask of interrupt to Enable/disable
+ *  (0:interrupt disabled or 1: interrupt enabled)
+ * @return  VL_ERROR_NOT_IMPLEMENTED   Not implemented
+ */
+VL_API int8_t VL_EnableInterruptMask(struct vl_data *Dev,
+	uint32_t InterruptMask);
+
+/** @} VL_interrupt_group */
+
+/** @defgroup VL_SPADfunctions_group VL53L0X SPAD Functions
+ *  @brief    Functions used for SPAD managements
+ *  @{
+ */
+
+/**
+ * @brief  Set the SPAD Ambient Damper Threshold value
+ *
+ * @par Function Description
+ * This function set the SPAD Ambient Damper Threshold value
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   SpadAmbientDamperThreshold    SPAD Ambient Damper Threshold value
+ * @return  VL_ERROR_NONE             Success
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_SetSpadAmbientDamperThreshold(struct vl_data *Dev,
+	uint16_t SpadAmbientDamperThreshold);
+
+/**
+ * @brief  Get the current SPAD Ambient Damper Threshold value
+ *
+ * @par Function Description
+ * This function get the SPAD Ambient Damper Threshold value
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   pSpadAmbientDamperThreshold   Pointer to programmed
+ *                                        SPAD Ambient Damper Threshold value
+ * @return  VL_ERROR_NONE             Success
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_GetSpadAmbientDamperThreshold(struct vl_data *Dev,
+	uint16_t *pSpadAmbientDamperThreshold);
+
+/**
+ * @brief  Set the SPAD Ambient Damper Factor value
+ *
+ * @par Function Description
+ * This function set the SPAD Ambient Damper Factor value
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   SpadAmbientDamperFactor       SPAD Ambient Damper Factor value
+ * @return  VL_ERROR_NONE             Success
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_SetSpadAmbientDamperFactor(struct vl_data *Dev,
+	uint16_t SpadAmbientDamperFactor);
+
+/**
+ * @brief  Get the current SPAD Ambient Damper Factor value
+ *
+ * @par Function Description
+ * This function get the SPAD Ambient Damper Factor value
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                           Device Handle
+ * @param   pSpadAmbientDamperFactor      Pointer to programmed SPAD Ambient
+ * Damper Factor value
+ * @return  VL_ERROR_NONE             Success
+ * @return  "Other error code"            See ::int8_t
+ */
+VL_API int8_t VL_GetSpadAmbientDamperFactor(struct vl_data *Dev,
+	uint16_t *pSpadAmbientDamperFactor);
+
+/**
+ * @brief Performs Reference Spad Management
+ *
+ * @par Function Description
+ * The reference SPAD initialization procedure determines the minimum amount
+ * of reference spads to be enables to achieve a target reference signal rate
+ * and should be performed once during initialization.
+ *
+ * @note This function Access to the device
+ *
+ * @note This function change the device mode to
+ * struct vl_data *ICEMODE_SINGLE_RANGING
+ *
+ * @param   Dev                          Device Handle
+ * @param   refSpadCount                 Reports ref Spad Count
+ * @param   isApertureSpads              Reports if spads are of type
+ *                                       aperture or non-aperture.
+ *                                       1:=aperture, 0:=Non-Aperture
+ * @return  VL_ERROR_NONE            Success
+ * @return  VL_ERROR_REF_SPAD_INIT   Error in the Ref Spad procedure.
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_PerformRefSpadManagement(struct vl_data *Dev,
+	uint32_t *refSpadCount, uint8_t *isApertureSpads);
+
+/**
+ * @brief Applies Reference SPAD configuration
+ *
+ * @par Function Description
+ * This function applies a given number of reference spads, identified as
+ * either Aperture or Non-Aperture.
+ * The requested spad count and type are stored within the device specific
+ * parameters data for access by the host.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                          Device Handle
+ * @param   refSpadCount                 Number of ref spads.
+ * @param   isApertureSpads              Defines if spads are of type
+ *                                       aperture or non-aperture.
+ *                                       1:=aperture, 0:=Non-Aperture
+ * @return  VL_ERROR_NONE            Success
+ * @return  VL_ERROR_REF_SPAD_INIT   Error in the in the reference
+ *                                       spad configuration.
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_SetReferenceSpads(struct vl_data *Dev,
+	uint32_t refSpadCount, uint8_t isApertureSpads);
+
+/**
+ * @brief Retrieves SPAD configuration
+ *
+ * @par Function Description
+ * This function retrieves the current number of applied reference spads
+ * and also their type : Aperture or Non-Aperture.
+ *
+ * @note This function Access to the device
+ *
+ * @param   Dev                          Device Handle
+ * @param   refSpadCount                 Number ref Spad Count
+ * @param   isApertureSpads              Reports if spads are of type
+ *                                       aperture or non-aperture.
+ *                                       1:=aperture, 0:=Non-Aperture
+ * @return  VL_ERROR_NONE            Success
+ * @return  VL_ERROR_REF_SPAD_INIT   Error in the in the reference
+ *                                       spad configuration.
+ * @return  "Other error code"           See ::int8_t
+ */
+VL_API int8_t VL_GetReferenceSpads(struct vl_data *Dev,
+	uint32_t *refSpadCount, uint8_t *isApertureSpads);
+
+/** @} VL_SPADfunctions_group */
+
+/** @} VL_cut11_group */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VL_API_H_ */
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_api_calibration.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_api_calibration.h
new file mode 100644
index 0000000..5964e0b
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_api_calibration.h
@@ -0,0 +1,75 @@
+/*
+ *  vl53l0x_api_calibration.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef _VL_API_CALIBRATION_H_
+#define _VL_API_CALIBRATION_H_
+
+#include "vl53l0x_def.h"
+#include "vl53l0x_platform.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int8_t VL_perform_xtalk_calibration(struct vl_data *Dev,
+		unsigned int XTalkCalDistance,
+		unsigned int *pXTalkCompensationRateMegaCps);
+
+int8_t VL_perform_offset_calibration(struct vl_data *Dev,
+		unsigned int CalDistanceMilliMeter,
+		int32_t *pOffsetMicroMeter);
+
+int8_t VL_set_offset_calibration_data_micro_meter(struct vl_data *Dev,
+		int32_t OffsetCalibrationDataMicroMeter);
+
+int8_t VL_get_offset_calibration_data_micro_meter(struct vl_data *Dev,
+		int32_t *pOffsetCalibrationDataMicroMeter);
+
+int8_t VL_apply_offset_adjustment(struct vl_data *Dev);
+
+int8_t VL_perform_ref_spad_management(struct vl_data *Dev,
+		uint32_t *refSpadCount, uint8_t *isApertureSpads);
+
+int8_t VL_set_reference_spads(struct vl_data *Dev,
+		uint32_t count, uint8_t isApertureSpads);
+
+int8_t VL_get_reference_spads(struct vl_data *Dev,
+		uint32_t *pSpadCount, uint8_t *pIsApertureSpads);
+
+int8_t VL_perform_phase_calibration(struct vl_data *Dev,
+	uint8_t *pPhaseCal, const uint8_t get_data_enable,
+	const uint8_t restore_config);
+
+int8_t VL_perform_ref_calibration(struct vl_data *Dev,
+	uint8_t *pVhvSettings, uint8_t *pPhaseCal, uint8_t get_data_enable);
+
+int8_t VL_set_ref_calibration(struct vl_data *Dev,
+		uint8_t VhvSettings, uint8_t PhaseCal);
+
+int8_t VL_get_ref_calibration(struct vl_data *Dev,
+		uint8_t *pVhvSettings, uint8_t *pPhaseCal);
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VL_API_CALIBRATION_H_ */
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_api_core.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_api_core.h
new file mode 100644
index 0000000..ac368cf
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_api_core.h
@@ -0,0 +1,98 @@
+/*
+ *  vl53l0x_api_core.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef _VL_API_CORE_H_
+#define _VL_API_CORE_H_
+
+#include "vl53l0x_def.h"
+#include "vl53l0x_platform.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+int8_t VL_reverse_bytes(uint8_t *data, uint32_t size);
+
+int8_t VL_measurement_poll_for_completion(struct vl_data *Dev);
+
+uint8_t VL_encode_vcsel_period(uint8_t vcsel_period_pclks);
+
+uint8_t VL_decode_vcsel_period(uint8_t vcsel_period_reg);
+
+uint32_t VL_isqrt(uint32_t num);
+
+uint32_t VL_quadrature_sum(uint32_t a, uint32_t b);
+
+int8_t VL_get_info_from_device(struct vl_data *Dev, uint8_t option);
+
+int8_t VL_set_vcsel_pulse_period(struct vl_data *Dev,
+	uint8_t VcselPeriodType, uint8_t VCSELPulsePeriodPCLK);
+
+int8_t VL_get_vcsel_pulse_period(struct vl_data *Dev,
+	uint8_t VcselPeriodType, uint8_t *pVCSELPulsePeriodPCLK);
+
+uint32_t VL_decode_timeout(uint16_t encoded_timeout);
+
+int8_t get_sequence_step_timeout(struct vl_data *Dev,
+			uint8_t SequenceStepId,
+			uint32_t *pTimeOutMicroSecs);
+
+int8_t set_sequence_step_timeout(struct vl_data *Dev,
+			uint8_t SequenceStepId,
+			uint32_t TimeOutMicroSecs);
+
+int8_t VL_set_measurement_timing_budget_micro_seconds(
+	struct vl_data *Dev, uint32_t MeasurementTimingBudgetMicroSeconds);
+
+int8_t VL_get_measurement_timing_budget_micro_seconds(
+	struct vl_data *Dev, uint32_t *pMeasurementTimingBudgetMicroSeconds);
+
+int8_t VL_load_tuning_settings(struct vl_data *Dev,
+		uint8_t *pTuningSettingBuffer);
+
+int8_t VL_calc_sigma_estimate(struct vl_data *Dev,
+		struct VL_RangingMeasurementData_t *pRangingMeasurementData,
+		unsigned int *pSigmaEstimate, uint32_t *pDmax_mm);
+
+int8_t VL_get_total_xtalk_rate(struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData,
+	unsigned int *ptotal_xtalk_rate_mcps);
+
+int8_t VL_get_total_signal_rate(struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData,
+	unsigned int *ptotal_signal_rate_mcps);
+
+int8_t VL_get_pal_range_status(struct vl_data *Dev,
+		 uint8_t DeviceRangeStatus,
+		 unsigned int SignalRate,
+		 uint16_t EffectiveSpadRtnCount,
+		 struct VL_RangingMeasurementData_t *pRangingMeasurementData,
+		 uint8_t *pPalRangeStatus);
+
+uint32_t VL_calc_timeout_mclks(struct vl_data *Dev,
+	uint32_t timeout_period_us, uint8_t vcsel_period_pclks);
+
+uint16_t VL_encode_timeout(uint32_t timeout_macro_clks);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VL_API_CORE_H_ */
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_api_ranging.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_api_ranging.h
new file mode 100644
index 0000000..e6b48fc
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_api_ranging.h
@@ -0,0 +1,37 @@
+/*
+ *  vl53l0x_api_ranging.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef _VL_API_RANGING_H_
+#define _VL_API_RANGING_H_
+
+#include "vl53l0x_def.h"
+#include "vl53l0x_platform.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VL_API_RANGING_H_ */
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_api_strings.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_api_strings.h
new file mode 100644
index 0000000..78a2dcd
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_api_strings.h
@@ -0,0 +1,268 @@
+/*
+ *  vl53l0x_api_strings.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+
+#ifndef VL_API_STRINGS_H_
+#define VL_API_STRINGS_H_
+
+#include "vl53l0x_def.h"
+#include "vl53l0x_platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+int8_t VL_get_device_info(struct vl_data *Dev,
+			struct VL_DeviceInfo_t *pVL_DeviceInfo);
+
+int8_t VL_get_device_error_string(uint8_t ErrorCode,
+		char *pDeviceErrorString);
+
+int8_t VL_get_range_status_string(uint8_t RangeStatus,
+		char *pRangeStatusString);
+
+int8_t VL_get_pal_error_string(int8_t PalErrorCode,
+		char *pPalErrorString);
+
+int8_t VL_get_pal_state_string(uint8_t PalStateCode,
+		char *pPalStateString);
+
+int8_t VL_get_sequence_steps_info(
+		uint8_t SequenceStepId,
+		char *pSequenceStepsString);
+
+int8_t VL_get_limit_check_info(struct vl_data *Dev,
+	uint16_t LimitCheckId, char *pLimitCheckString);
+
+
+#ifdef USE_EMPTY_STRING
+	#define  VL_STRING_DEVICE_INFO_NAME                             ""
+	#define  VL_STRING_DEVICE_INFO_NAME_TS0                         ""
+	#define  VL_STRING_DEVICE_INFO_NAME_TS1                         ""
+	#define  VL_STRING_DEVICE_INFO_NAME_TS2                         ""
+	#define  VL_STRING_DEVICE_INFO_NAME_ES1                         ""
+	#define  VL_STRING_DEVICE_INFO_TYPE                             ""
+
+	/* PAL ERROR strings */
+	#define  VL_STRING_ERROR_NONE                                   ""
+	#define  VL_STRING_ERROR_CALIBRATION_WARNING                    ""
+	#define  VL_STRING_ERROR_MIN_CLIPPED                            ""
+	#define  VL_STRING_ERROR_UNDEFINED                              ""
+	#define  VL_STRING_ERROR_INVALID_PARAMS                         ""
+	#define  VL_STRING_ERROR_NOT_SUPPORTED                          ""
+	#define  VL_STRING_ERROR_RANGE_ERROR                            ""
+	#define  VL_STRING_ERROR_TIME_OUT                               ""
+	#define  VL_STRING_ERROR_MODE_NOT_SUPPORTED                     ""
+	#define  VL_STRING_ERROR_BUFFER_TOO_SMALL                       ""
+	#define  VL_STRING_ERROR_GPIO_NOT_EXISTING                      ""
+	#define  VL_STRING_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED       ""
+	#define  VL_STRING_ERROR_CONTROL_INTERFACE                      ""
+	#define  VL_STRING_ERROR_INVALID_COMMAND                        ""
+	#define  VL_STRING_ERROR_DIVISION_BY_ZERO                       ""
+	#define  VL_STRING_ERROR_REF_SPAD_INIT                          ""
+	#define  VL_STRING_ERROR_NOT_IMPLEMENTED                        ""
+
+	#define  VL_STRING_UNKNOWN_ERROR_CODE                            ""
+
+
+
+	/* Range Status */
+	#define  VL_STRING_RANGESTATUS_NONE                             ""
+	#define  VL_STRING_RANGESTATUS_RANGEVALID                       ""
+	#define  VL_STRING_RANGESTATUS_SIGMA                            ""
+	#define  VL_STRING_RANGESTATUS_SIGNAL                           ""
+	#define  VL_STRING_RANGESTATUS_MINRANGE                         ""
+	#define  VL_STRING_RANGESTATUS_PHASE                            ""
+	#define  VL_STRING_RANGESTATUS_HW                               ""
+
+
+	/* Range Status */
+	#define  VL_STRING_STATE_POWERDOWN                              ""
+	#define  VL_STRING_STATE_WAIT_STATICINIT                        ""
+	#define  VL_STRING_STATE_STANDBY                                ""
+	#define  VL_STRING_STATE_IDLE                                   ""
+	#define  VL_STRING_STATE_RUNNING                                ""
+	#define  VL_STRING_STATE_UNKNOWN                                ""
+	#define  VL_STRING_STATE_ERROR                                  ""
+
+
+	/* Device Specific */
+	#define  VL_STRING_DEVICEERROR_NONE                             ""
+	#define  VL_STRING_DEVICEERROR_VCSELCONTINUITYTESTFAILURE       ""
+	#define  VL_STRING_DEVICEERROR_VCSELWATCHDOGTESTFAILURE         ""
+	#define  VL_STRING_DEVICEERROR_NOVHVVALUEFOUND                  ""
+	#define  VL_STRING_DEVICEERROR_MSRCNOTARGET                     ""
+	#define  VL_STRING_DEVICEERROR_SNRCHECK                         ""
+	#define  VL_STRING_DEVICEERROR_RANGEPHASECHECK                  ""
+	#define  VL_STRING_DEVICEERROR_SIGMATHRESHOLDCHECK              ""
+	#define  VL_STRING_DEVICEERROR_TCC                              ""
+	#define  VL_STRING_DEVICEERROR_PHASECONSISTENCY                 ""
+	#define  VL_STRING_DEVICEERROR_MINCLIP                          ""
+	#define  VL_STRING_DEVICEERROR_RANGECOMPLETE                    ""
+	#define  VL_STRING_DEVICEERROR_ALGOUNDERFLOW                    ""
+	#define  VL_STRING_DEVICEERROR_ALGOOVERFLOW                     ""
+	#define  VL_STRING_DEVICEERROR_RANGEIGNORETHRESHOLD             ""
+	#define  VL_STRING_DEVICEERROR_UNKNOWN                          ""
+
+	/* Check Enable */
+	#define  VL_STRING_CHECKENABLE_SIGMA_FINAL_RANGE                ""
+	#define  VL_STRING_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE          ""
+	#define  VL_STRING_CHECKENABLE_SIGNAL_REF_CLIP                  ""
+	#define  VL_STRING_CHECKENABLE_RANGE_IGNORE_THRESHOLD           ""
+
+	/* Sequence Step */
+	#define  VL_STRING_SEQUENCESTEP_TCC                             ""
+	#define  VL_STRING_SEQUENCESTEP_DSS                             ""
+	#define  VL_STRING_SEQUENCESTEP_MSRC                            ""
+	#define  VL_STRING_SEQUENCESTEP_PRE_RANGE                       ""
+	#define  VL_STRING_SEQUENCESTEP_FINAL_RANGE                     ""
+#else
+	#define  VL_STRING_DEVICE_INFO_NAME          "VL53L0X cut1.0"
+	#define  VL_STRING_DEVICE_INFO_NAME_TS0      "VL53L0X TS0"
+	#define  VL_STRING_DEVICE_INFO_NAME_TS1      "VL53L0X TS1"
+	#define  VL_STRING_DEVICE_INFO_NAME_TS2      "VL53L0X TS2"
+	#define  VL_STRING_DEVICE_INFO_NAME_ES1      "VL53L0X ES1 or later"
+	#define  VL_STRING_DEVICE_INFO_TYPE          "VL53L0X"
+
+	/* PAL ERROR strings */
+	#define  VL_STRING_ERROR_NONE \
+			"No Error"
+	#define  VL_STRING_ERROR_CALIBRATION_WARNING \
+			"Calibration Warning Error"
+	#define  VL_STRING_ERROR_MIN_CLIPPED \
+			"Min clipped error"
+	#define  VL_STRING_ERROR_UNDEFINED \
+			"Undefined error"
+	#define  VL_STRING_ERROR_INVALID_PARAMS \
+			"Invalid parameters error"
+	#define  VL_STRING_ERROR_NOT_SUPPORTED \
+			"Not supported error"
+	#define  VL_STRING_ERROR_RANGE_ERROR \
+			"Range error"
+	#define  VL_STRING_ERROR_TIME_OUT \
+			"Time out error"
+	#define  VL_STRING_ERROR_MODE_NOT_SUPPORTED \
+			"Mode not supported error"
+	#define  VL_STRING_ERROR_BUFFER_TOO_SMALL \
+			"Buffer too small"
+	#define  VL_STRING_ERROR_GPIO_NOT_EXISTING \
+			"GPIO not existing"
+	#define  VL_STRING_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED \
+			"GPIO funct not supported"
+	#define  VL_STRING_ERROR_INTERRUPT_NOT_CLEARED \
+			"Interrupt not Cleared"
+	#define  VL_STRING_ERROR_CONTROL_INTERFACE \
+			"Control Interface Error"
+	#define  VL_STRING_ERROR_INVALID_COMMAND \
+			"Invalid Command Error"
+	#define  VL_STRING_ERROR_DIVISION_BY_ZERO \
+			"Division by zero Error"
+	#define  VL_STRING_ERROR_REF_SPAD_INIT \
+			"Reference Spad Init Error"
+	#define  VL_STRING_ERROR_NOT_IMPLEMENTED \
+			"Not implemented error"
+
+	#define  VL_STRING_UNKNOWN_ERROR_CODE \
+			"Unknown Error Code"
+
+
+
+	/* Range Status */
+	#define  VL_STRING_RANGESTATUS_NONE                 "No Update"
+	#define  VL_STRING_RANGESTATUS_RANGEVALID           "Range Valid"
+	#define  VL_STRING_RANGESTATUS_SIGMA                "Sigma Fail"
+	#define  VL_STRING_RANGESTATUS_SIGNAL               "Signal Fail"
+	#define  VL_STRING_RANGESTATUS_MINRANGE          "Min Range Fail"
+	#define  VL_STRING_RANGESTATUS_PHASE                "Phase Fail"
+	#define  VL_STRING_RANGESTATUS_HW                   "Hardware Fail"
+
+
+	/* Range Status */
+	#define  VL_STRING_STATE_POWERDOWN               "POWERDOWN State"
+	#define  VL_STRING_STATE_WAIT_STATICINIT \
+			"Wait for staticinit State"
+	#define  VL_STRING_STATE_STANDBY                 "STANDBY State"
+	#define  VL_STRING_STATE_IDLE                    "IDLE State"
+	#define  VL_STRING_STATE_RUNNING                 "RUNNING State"
+	#define  VL_STRING_STATE_UNKNOWN                 "UNKNOWN State"
+	#define  VL_STRING_STATE_ERROR                   "ERROR State"
+
+
+	/* Device Specific */
+	#define  VL_STRING_DEVICEERROR_NONE                   "No Update"
+	#define  VL_STRING_DEVICEERROR_VCSELCONTINUITYTESTFAILURE \
+			"VCSEL Continuity Test Failure"
+	#define  VL_STRING_DEVICEERROR_VCSELWATCHDOGTESTFAILURE \
+			"VCSEL Watchdog Test Failure"
+	#define  VL_STRING_DEVICEERROR_NOVHVVALUEFOUND \
+			"No VHV Value found"
+	#define  VL_STRING_DEVICEERROR_MSRCNOTARGET \
+			"MSRC No Target Error"
+	#define  VL_STRING_DEVICEERROR_SNRCHECK \
+			"SNR Check Exit"
+	#define  VL_STRING_DEVICEERROR_RANGEPHASECHECK \
+			"Range Phase Check Error"
+	#define  VL_STRING_DEVICEERROR_SIGMATHRESHOLDCHECK \
+			"Sigma Threshold Check Error"
+	#define  VL_STRING_DEVICEERROR_TCC \
+			"TCC Error"
+	#define  VL_STRING_DEVICEERROR_PHASECONSISTENCY \
+			"Phase Consistency Error"
+	#define  VL_STRING_DEVICEERROR_MINCLIP \
+			"Min Clip Error"
+	#define  VL_STRING_DEVICEERROR_RANGECOMPLETE \
+			"Range Complete"
+	#define  VL_STRING_DEVICEERROR_ALGOUNDERFLOW \
+			"Range Algo Underflow Error"
+	#define  VL_STRING_DEVICEERROR_ALGOOVERFLOW \
+			"Range Algo Overlow Error"
+	#define  VL_STRING_DEVICEERROR_RANGEIGNORETHRESHOLD \
+			"Range Ignore Threshold Error"
+	#define  VL_STRING_DEVICEERROR_UNKNOWN \
+			"Unknown error code"
+
+	/* Check Enable */
+	#define  VL_STRING_CHECKENABLE_SIGMA_FINAL_RANGE \
+			"SIGMA FINAL RANGE"
+	#define  VL_STRING_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE \
+			"SIGNAL RATE FINAL RANGE"
+	#define  VL_STRING_CHECKENABLE_SIGNAL_REF_CLIP \
+			"SIGNAL REF CLIP"
+	#define  VL_STRING_CHECKENABLE_RANGE_IGNORE_THRESHOLD \
+			"RANGE IGNORE THRESHOLD"
+	#define  VL_STRING_CHECKENABLE_SIGNAL_RATE_MSRC \
+			"SIGNAL RATE MSRC"
+	#define  VL_STRING_CHECKENABLE_SIGNAL_RATE_PRE_RANGE \
+			"SIGNAL RATE PRE RANGE"
+
+	/* Sequence Step */
+	#define  VL_STRING_SEQUENCESTEP_TCC                   "TCC"
+	#define  VL_STRING_SEQUENCESTEP_DSS                   "DSS"
+	#define  VL_STRING_SEQUENCESTEP_MSRC                  "MSRC"
+	#define  VL_STRING_SEQUENCESTEP_PRE_RANGE             "PRE RANGE"
+	#define  VL_STRING_SEQUENCESTEP_FINAL_RANGE           "FINAL RANGE"
+#endif /* USE_EMPTY_STRING */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_def.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_def.h
new file mode 100644
index 0000000..50495a0
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_def.h
@@ -0,0 +1,619 @@
+/*
+ *  vl53l0x_def.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+/**
+ * @file VL_def.h
+ *
+ * @brief Type definitions for VL53L0X API.
+ *
+ */
+
+
+#ifndef _VL_DEF_H_
+#define _VL_DEF_H_
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @defgroup VL_globaldefine_group VL53L0X Defines
+ *	@brief	  VL53L0X Defines
+ *	@{
+ */
+
+
+/** PAL SPECIFICATION major version */
+#define VL53L0X10_SPECIFICATION_VER_MAJOR   1
+/** PAL SPECIFICATION minor version */
+#define VL53L0X10_SPECIFICATION_VER_MINOR   2
+/** PAL SPECIFICATION sub version */
+#define VL53L0X10_SPECIFICATION_VER_SUB	   7
+/** PAL SPECIFICATION sub version */
+#define VL53L0X10_SPECIFICATION_VER_REVISION 1440
+
+/** VL53L0X PAL IMPLEMENTATION major version */
+#define VL53L0X10_IMPLEMENTATION_VER_MAJOR	1
+/** VL53L0X PAL IMPLEMENTATION minor version */
+#define VL53L0X10_IMPLEMENTATION_VER_MINOR	0
+/** VL53L0X PAL IMPLEMENTATION sub version */
+#define VL53L0X10_IMPLEMENTATION_VER_SUB		9
+/** VL53L0X PAL IMPLEMENTATION sub version */
+#define VL53L0X10_IMPLEMENTATION_VER_REVISION	3673
+
+/** PAL SPECIFICATION major version */
+#define VL_SPECIFICATION_VER_MAJOR	 1
+/** PAL SPECIFICATION minor version */
+#define VL_SPECIFICATION_VER_MINOR	 2
+/** PAL SPECIFICATION sub version */
+#define VL_SPECIFICATION_VER_SUB	 7
+/** PAL SPECIFICATION sub version */
+#define VL_SPECIFICATION_VER_REVISION 1440
+
+/** VL53L0X PAL IMPLEMENTATION major version */
+#define VL_IMPLEMENTATION_VER_MAJOR	  1
+/** VL53L0X PAL IMPLEMENTATION minor version */
+#define VL_IMPLEMENTATION_VER_MINOR	  0
+/** VL53L0X PAL IMPLEMENTATION sub version */
+#define VL_IMPLEMENTATION_VER_SUB	  2
+/** VL53L0X PAL IMPLEMENTATION sub version */
+#define VL_IMPLEMENTATION_VER_REVISION	  4823
+#define VL_DEFAULT_MAX_LOOP 2000
+#define VL_MAX_STRING_LENGTH 32
+
+
+#include "vl53l0x_device.h"
+#include "vl53l0x_types.h"
+
+
+/****************************************
+ * PRIVATE define do not edit
+ ****************************************/
+
+/** @brief Defines the parameters of the Get Version Functions
+ */
+struct VL_Version_t {
+	uint32_t	 revision; /*!< revision number */
+	uint8_t		 major;	   /*!< major number */
+	uint8_t		 minor;	   /*!< minor number */
+	uint8_t		 build;	   /*!< build number */
+};
+
+
+/** @brief Defines the parameters of the Get Device Info Functions
+ */
+struct VL_DeviceInfo_t {
+	char Name[VL_MAX_STRING_LENGTH];
+		/*!< Name of the Device e.g. Left_Distance */
+	char Type[VL_MAX_STRING_LENGTH];
+		/*!< Type of the Device e.g VL53L0X */
+	char ProductId[VL_MAX_STRING_LENGTH];
+		/*!< Product Identifier String	*/
+	uint8_t ProductType;
+		/*!< Product Type, VL53L0X = 1, VL53L1 = 2 */
+	uint8_t ProductRevisionMajor;
+		/*!< Product revision major */
+	uint8_t ProductRevisionMinor;
+		/*!< Product revision minor */
+};
+
+
+/** @defgroup VL_define_Error_group Error and Warning code returned by API
+ *	The following DEFINE are used to identify the PAL ERROR
+ *	@{
+ */
+
+#define VL_ERROR_NONE		((int8_t)	0)
+#define VL_ERROR_CALIBRATION_WARNING	((int8_t) -1)
+	/*!< Warning invalid calibration data may be in used*/
+		/*\a VL_InitData()*/
+		/*\a VL_GetOffsetCalibrationData*/
+		/*\a VL_SetOffsetCalibrationData */
+#define VL_ERROR_MIN_CLIPPED			((int8_t) -2)
+	/*!< Warning parameter passed was clipped to min before to be applied */
+
+#define VL_ERROR_UNDEFINED				((int8_t) -3)
+	/*!< Unqualified error */
+#define VL_ERROR_INVALID_PARAMS			((int8_t) -4)
+	/*!< Parameter passed is invalid or out of range */
+#define VL_ERROR_NOT_SUPPORTED			((int8_t) -5)
+	/*!< Function is not supported in current mode or configuration */
+#define VL_ERROR_RANGE_ERROR			((int8_t) -6)
+	/*!< Device report a ranging error interrupt status */
+#define VL_ERROR_TIME_OUT				((int8_t) -7)
+	/*!< Aborted due to time out */
+#define VL_ERROR_MODE_NOT_SUPPORTED		((int8_t) -8)
+	/*!< Asked mode is not supported by the device */
+#define VL_ERROR_BUFFER_TOO_SMALL			((int8_t) -9)
+	/*!< ... */
+#define VL_ERROR_GPIO_NOT_EXISTING			((int8_t) -10)
+	/*!< User tried to setup a non-existing GPIO pin */
+#define VL_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED  ((int8_t) -11)
+	/*!< unsupported GPIO functionality */
+#define VL_ERROR_INTERRUPT_NOT_CLEARED		((int8_t) -12)
+	/*!< Error during interrupt clear */
+#define VL_ERROR_CONTROL_INTERFACE			((int8_t) -20)
+	/*!< error reported from IO functions */
+#define VL_ERROR_INVALID_COMMAND			((int8_t) -30)
+	/*!< The command is not allowed in the current device state*/
+	/* *	(power down) */
+#define VL_ERROR_DIVISION_BY_ZERO			((int8_t) -40)
+	/*!< In the function a division by zero occurs */
+#define VL_ERROR_REF_SPAD_INIT			((int8_t) -50)
+	/*!< Error during reference SPAD initialization */
+#define VL_ERROR_NOT_IMPLEMENTED			((int8_t) -99)
+	/*!< Tells requested functionality has not been implemented yet or*/
+	/* * not compatible with the device */
+/** @} VL_define_Error_group */
+
+
+/** @defgroup VL_define_DeviceModes_group Defines Device modes
+ *	Defines all possible modes for the device
+ *	@{
+ */
+
+#define VL_DEVICEMODE_SINGLE_RANGING	((uint8_t)  0)
+#define VL_DEVICEMODE_CONTINUOUS_RANGING	((uint8_t)  1)
+#define VL_DEVICEMODE_SINGLE_HISTOGRAM	((uint8_t)  2)
+#define VL_DEVICEMODE_CONTINUOUS_TIMED_RANGING ((uint8_t) 3)
+#define VL_DEVICEMODE_SINGLE_ALS		((uint8_t) 10)
+#define VL_DEVICEMODE_GPIO_DRIVE		((uint8_t) 20)
+#define VL_DEVICEMODE_GPIO_OSC		((uint8_t) 21)
+	/* ... Modes to be added depending on device */
+/** @} VL_define_DeviceModes_group */
+
+
+
+/** @defgroup VL_define_HistogramModes_group Defines Histogram modes
+ *	Defines all possible Histogram modes for the device
+ *	@{
+ */
+
+#define VL_HISTOGRAMMODE_DISABLED		((uint8_t) 0)
+	/*!< Histogram Disabled */
+#define VL_HISTOGRAMMODE_REFERENCE_ONLY	((uint8_t) 1)
+	/*!< Histogram Reference array only */
+#define VL_HISTOGRAMMODE_RETURN_ONLY	((uint8_t) 2)
+	/*!< Histogram Return array only */
+#define VL_HISTOGRAMMODE_BOTH		((uint8_t) 3)
+	/*!< Histogram both Reference and Return Arrays */
+	/* ... Modes to be added depending on device */
+/** @} VL_define_HistogramModes_group */
+
+
+/** @defgroup VL_define_PowerModes_group List of available Power Modes
+ *	List of available Power Modes
+ *	@{
+ */
+
+#define VL_POWERMODE_STANDBY_LEVEL1 ((uint8_t) 0)
+	/*!< Standby level 1 */
+#define VL_POWERMODE_STANDBY_LEVEL2 ((uint8_t) 1)
+	/*!< Standby level 2 */
+#define VL_POWERMODE_IDLE_LEVEL1	((uint8_t) 2)
+	/*!< Idle level 1 */
+#define VL_POWERMODE_IDLE_LEVEL2	((uint8_t) 3)
+	/*!< Idle level 2 */
+
+/** @} VL_define_PowerModes_group */
+
+
+/** @brief Defines all parameters for the device
+ */
+struct VL_DeviceParameters_t {
+	uint8_t DeviceMode;
+	/*!< Defines type of measurement to be done for the next measure */
+	uint8_t HistogramMode;
+	/*!< Defines type of histogram measurement to be done for the next*/
+	/* *	measure */
+	uint32_t MeasurementTimingBudgetMicroSeconds;
+	/*!< Defines the allowed total time for a single measurement */
+	uint32_t InterMeasurementPeriodMilliSeconds;
+	/*!< Defines time between two consecutive measurements (between two*/
+	/* *	measurement starts). If set to 0 means back-to-back mode */
+	uint8_t XTalkCompensationEnable;
+	/*!< Tells if Crosstalk compensation shall be enable or not	 */
+	uint16_t XTalkCompensationRangeMilliMeter;
+	/*!< CrossTalk compensation range in millimeter	 */
+	unsigned int XTalkCompensationRateMegaCps;
+	/*!< CrossTalk compensation rate in Mega counts per seconds.*/
+	/* *	Expressed in 16.16 fixed point format.	*/
+	int32_t RangeOffsetMicroMeters;
+	/*!< Range offset adjustment (mm).	*/
+
+	uint8_t LimitChecksEnable[VL_CHECKENABLE_NUMBER_OF_CHECKS];
+	/*!< This Array store all the Limit Check enable for this device. */
+	uint8_t LimitChecksStatus[VL_CHECKENABLE_NUMBER_OF_CHECKS];
+	/*!< This Array store all the Status of the check linked to last*/
+	/** measurement. */
+	unsigned int LimitChecksValue[VL_CHECKENABLE_NUMBER_OF_CHECKS];
+	/*!< This Array store all the Limit Check value for this device */
+
+	uint8_t WrapAroundCheckEnable;
+	/*!< Tells if Wrap Around Check shall be enable or not */
+};
+
+
+/** @defgroup VL_define_State_group Defines the current status
+ *	of the device Defines the current status of the device
+ *	@{
+ */
+
+#define VL_STATE_POWERDOWN		 ((uint8_t)  0)
+	/*!< Device is in HW reset	*/
+#define VL_STATE_WAIT_STATICINIT ((uint8_t)  1)
+	/*!< Device is initialized and wait for static initialization  */
+#define VL_STATE_STANDBY		 ((uint8_t)  2)
+	/*!< Device is in Low power Standby mode   */
+#define VL_STATE_IDLE			 ((uint8_t)  3)
+	/*!< Device has been initialized and ready to do measurements  */
+#define VL_STATE_RUNNING		 ((uint8_t)  4)
+	/*!< Device is performing measurement */
+#define VL_STATE_UNKNOWN		 ((uint8_t)  98)
+	/*!< Device is in unknown state and need to be rebooted	 */
+#define VL_STATE_ERROR			 ((uint8_t)  99)
+	/*!< Device is in error state and need to be rebooted  */
+
+/** @} VL_define_State_group */
+
+
+/** @brief Structure containing the Dmax computation parameters and data
+ */
+struct VL_DMaxData_t {
+	int32_t AmbTuningWindowFactor_K;
+		/*!<  internal algo tuning (*1000) */
+	int32_t RetSignalAt0mm;
+		/*!< intermediate dmax computation value caching */
+};
+
+/**
+ * @struct VL_RangeData_t
+ * @brief Range measurement data.
+ */
+struct VL_RangingMeasurementData_t {
+	uint32_t TimeStamp;		/*!< 32-bit time stamp. */
+	uint32_t MeasurementTimeUsec;
+		/*!< Give the Measurement time needed by the device to do the */
+		/** measurement.*/
+
+
+	uint16_t RangeMilliMeter;	/*!< range distance in millimeter. */
+
+	uint16_t RangeDMaxMilliMeter;
+		/*!< Tells what is the maximum detection distance of */
+		/* the device */
+		/* * in current setup and environment conditions (Filled when */
+		/* *	applicable) */
+
+	unsigned int SignalRateRtnMegaCps;
+		/*!< Return signal rate (MCPS)\n these is a 16.16 fix point */
+		/* *	value, which is effectively a measure of target */
+		/* *	 reflectance.*/
+	unsigned int AmbientRateRtnMegaCps;
+		/*!< Return ambient rate (MCPS)\n these is a 16.16 fix point */
+		/* *	value, which is effectively a measure of the ambien */
+		/* *	t light.*/
+
+	uint16_t EffectiveSpadRtnCount;
+		/*!< Return the effective SPAD count for the return signal. */
+		/* *	To obtain Real value it should be divided by 256 */
+
+	uint8_t ZoneId;
+		/*!< Denotes which zone and range scheduler stage the range */
+		/* *	data relates to. */
+	uint8_t RangeFractionalPart;
+		/*!< Fractional part of range distance. Final value is a */
+		/* *	FixPoint168 value. */
+	uint8_t RangeStatus;
+		/*!< Range Status for the current measurement. This is device */
+		/* *	dependent. Value = 0 means value is valid. */
+		/* *	See \ref RangeStatusPage */
+};
+
+
+#define VL_HISTOGRAM_BUFFER_SIZE 24
+
+/**
+ * @struct VL_HistogramData_t
+ * @brief Histogram measurement data.
+ */
+struct VL_HistogramMeasurementData_t {
+	/* Histogram Measurement data */
+	uint32_t HistogramData[VL_HISTOGRAM_BUFFER_SIZE];
+	/*!< Histogram data */
+	uint8_t HistogramType; /*!< Indicate the types of histogram data : */
+	/*Return only, Reference only, both Return and Reference */
+	uint8_t FirstBin; /*!< First Bin value */
+	uint8_t BufferSize; /*!< Buffer Size - Set by the user.*/
+	uint8_t NumberOfBins;
+	/*!< Number of bins filled by the histogram measurement */
+
+	uint8_t ErrorStatus;
+	/*!< Error status of the current measurement. \n */
+	/* see @a ::uint8_t @a VL_GetStatusErrorString() */
+};
+
+#define VL_REF_SPAD_BUFFER_SIZE 6
+
+/**
+ * @struct VL_SpadData_t
+ * @brief Spad Configuration Data.
+ */
+struct VL_SpadData_t {
+	uint8_t RefSpadEnables[VL_REF_SPAD_BUFFER_SIZE];
+	/*!< Reference Spad Enables */
+	uint8_t RefGoodSpadMap[VL_REF_SPAD_BUFFER_SIZE];
+	/*!< Reference Spad Good Spad Map */
+};
+
+struct VL_DeviceSpecificParameters_t {
+	unsigned int OscFrequencyMHz; /* Frequency used */
+
+	uint16_t LastEncodedTimeout;
+	/* last encoded Time out used for timing budget*/
+
+	uint8_t Pin0GpioFunctionality;
+	/* store the functionality of the GPIO: pin0 */
+
+	uint32_t FinalRangeTimeoutMicroSecs;
+	 /*!< Execution time of the final range*/
+	uint8_t FinalRangeVcselPulsePeriod;
+	 /*!< Vcsel pulse period (pll clocks) for the final range measurement*/
+	uint32_t PreRangeTimeoutMicroSecs;
+	 /*!< Execution time of the final range*/
+	uint8_t PreRangeVcselPulsePeriod;
+	 /*!< Vcsel pulse period (pll clocks) for the pre-range measurement*/
+
+	uint16_t SigmaEstRefArray;
+	 /*!< Reference array sigma value in 1/100th of [mm] e.g. 100 = 1mm */
+	uint16_t SigmaEstEffPulseWidth;
+	 /*!< Effective Pulse width for sigma estimate in 1/100th */
+	 /* * of ns e.g. 900 = 9.0ns */
+	uint16_t SigmaEstEffAmbWidth;
+	 /*!< Effective Ambient width for sigma estimate in 1/100th of ns */
+	 /* * e.g. 500 = 5.0ns */
+
+
+	uint8_t ReadDataFromDeviceDone; /* Indicate if read from device has */
+	/*been done (==1) or not (==0) */
+	uint8_t ModuleId; /* Module ID */
+	uint8_t Revision; /* test Revision */
+	char ProductId[VL_MAX_STRING_LENGTH];
+		/* Product Identifier String  */
+	uint8_t ReferenceSpadCount; /* used for ref spad management */
+	uint8_t ReferenceSpadType;	/* used for ref spad management */
+	uint8_t RefSpadsInitialised; /* reports if ref spads are initialised. */
+	uint32_t PartUIDUpper; /*!< Unique Part ID Upper */
+	uint32_t PartUIDLower; /*!< Unique Part ID Lower */
+	unsigned int SignalRateMeasFixed400mm; /*!< Peek Signal rate at 400 mm*/
+
+};
+
+/**
+ * @struct VL_DevData_t
+ *
+ * @brief VL53L0X PAL device ST private data structure \n
+ * End user should never access any of these field directly
+ *
+ * These must never access directly but only via macro
+ */
+struct VL_DevData_t {
+	struct VL_DMaxData_t DMaxData;
+	/*!< Dmax Data */
+	int32_t	 Part2PartOffsetNVMMicroMeter;
+	/*!< backed up NVM value */
+	int32_t	 Part2PartOffsetAdjustmentNVMMicroMeter;
+	/*!< backed up NVM value representing additional offset adjustment */
+	struct VL_DeviceParameters_t CurrentParameters;
+	/*!< Current Device Parameter */
+	struct VL_RangingMeasurementData_t LastRangeMeasure;
+	/*!< Ranging Data */
+	struct VL_HistogramMeasurementData_t LastHistogramMeasure;
+	/*!< Histogram Data */
+	struct VL_DeviceSpecificParameters_t DeviceSpecificParameters;
+	/*!< Parameters specific to the device */
+	struct VL_SpadData_t SpadData;
+	/*!< Spad Data */
+	uint8_t SequenceConfig;
+	/*!< Internal value for the sequence config */
+	uint8_t RangeFractionalEnable;
+	/*!< Enable/Disable fractional part of ranging data */
+	uint8_t PalState;
+	/*!< Current state of the PAL for this device */
+	uint8_t PowerMode;
+	/*!< Current Power Mode	 */
+	uint16_t SigmaEstRefArray;
+	/*!< Reference array sigma value in 1/100th of [mm] e.g. 100 = 1mm */
+	uint16_t SigmaEstEffPulseWidth;
+	/*!< Effective Pulse width for sigma estimate in 1/100th */
+	/* of ns e.g. 900 = 9.0ns */
+	uint16_t SigmaEstEffAmbWidth;
+	/*!< Effective Ambient width for sigma estimate in 1/100th of ns */
+	/* * e.g. 500 = 5.0ns */
+	uint8_t StopVariable;
+	/*!< StopVariable used during the stop sequence */
+	uint16_t targetRefRate;
+	/*!< Target Ambient Rate for Ref spad management */
+	unsigned int SigmaEstimate;
+	/*!< Sigma Estimate - based on ambient & VCSEL rates and */
+	/** signal_total_events */
+	unsigned int SignalEstimate;
+	/*!< Signal Estimate - based on ambient & VCSEL rates and cross talk */
+	unsigned int LastSignalRefMcps;
+	/*!< Latest Signal ref in Mcps */
+	uint8_t *pTuningSettingsPointer;
+	/*!< Pointer for Tuning Settings table */
+	uint8_t UseInternalTuningSettings;
+	/*!< Indicate if we use	 Tuning Settings table */
+	uint16_t LinearityCorrectiveGain;
+	/*!< Linearity Corrective Gain value in x1000 */
+	uint16_t DmaxCalRangeMilliMeter;
+	/*!< Dmax Calibration Range millimeter */
+	unsigned int DmaxCalSignalRateRtnMegaCps;
+	/*!< Dmax Calibration Signal Rate Return MegaCps */
+
+};
+
+
+/** @defgroup VL_define_InterruptPolarity_group Defines the Polarity
+ * of the Interrupt
+ *	Defines the Polarity of the Interrupt
+ *	@{
+ */
+
+#define VL_INTERRUPTPOLARITY_LOW	   ((uint8_t)	0)
+/*!< Set active low polarity best setup for falling edge. */
+#define VL_INTERRUPTPOLARITY_HIGH	   ((uint8_t)	1)
+/*!< Set active high polarity best setup for rising edge. */
+
+/** @} VL_define_InterruptPolarity_group */
+
+
+/** @defgroup VL_define_VcselPeriod_group Vcsel Period Defines
+ *	Defines the range measurement for which to access the vcsel period.
+ *	@{
+ */
+
+#define VL_VCSEL_PERIOD_PRE_RANGE	((uint8_t) 0)
+/*!<Identifies the pre-range vcsel period. */
+#define VL_VCSEL_PERIOD_FINAL_RANGE ((uint8_t) 1)
+/*!<Identifies the final range vcsel period. */
+
+/** @} VL_define_VcselPeriod_group */
+
+/** @defgroup VL_define_SchedulerSequence_group Defines the steps
+ * carried out by the scheduler during a range measurement.
+ *	@{
+ *	Defines the states of all the steps in the scheduler
+ *	i.e. enabled/disabled.
+ */
+struct VL_SchedulerSequenceSteps_t {
+	uint8_t		 TccOn;	   /*!<Reports if Target Centre Check On  */
+	uint8_t		 MsrcOn;	   /*!<Reports if MSRC On  */
+	uint8_t		 DssOn;		   /*!<Reports if DSS On  */
+	uint8_t		 PreRangeOn;   /*!<Reports if Pre-Range On	*/
+	uint8_t		 FinalRangeOn; /*!<Reports if Final-Range On  */
+};
+
+/** @} VL_define_SchedulerSequence_group */
+
+/** @defgroup VL_define_SequenceStepId_group Defines the Polarity
+ *	of the Interrupt
+ *	Defines the the sequence steps performed during ranging..
+ *	@{
+ */
+
+#define	 VL_SEQUENCESTEP_TCC		 ((uint8_t) 0)
+/*!<Target CentreCheck identifier. */
+#define	 VL_SEQUENCESTEP_DSS		 ((uint8_t) 1)
+/*!<Dynamic Spad Selection function Identifier. */
+#define	 VL_SEQUENCESTEP_MSRC		 ((uint8_t) 2)
+/*!<Minimum Signal Rate Check function Identifier. */
+#define	 VL_SEQUENCESTEP_PRE_RANGE	 ((uint8_t) 3)
+/*!<Pre-Range check Identifier. */
+#define	 VL_SEQUENCESTEP_FINAL_RANGE ((uint8_t) 4)
+/*!<Final Range Check Identifier. */
+
+#define	 VL_SEQUENCESTEP_NUMBER_OF_CHECKS			 5
+/*!<Number of Sequence Step Managed by the API. */
+
+/** @} VL_define_SequenceStepId_group */
+
+
+/* MACRO Definitions */
+/** @defgroup VL_define_GeneralMacro_group General Macro Defines
+ *	General Macro Defines
+ *	@{
+ */
+
+/* Defines */
+#define VL_SETPARAMETERFIELD(Dev, field, value) \
+	PALDevDataSet(Dev, CurrentParameters.field, value)
+
+#define VL_GETPARAMETERFIELD(Dev, field, variable) \
+	(variable = PALDevDataGet(Dev, CurrentParameters).field)
+
+
+#define VL_SETARRAYPARAMETERFIELD(Dev, field, index, value) \
+	PALDevDataSet(Dev, CurrentParameters.field[index], value)
+
+#define VL_GETARRAYPARAMETERFIELD(Dev, field, index, variable) \
+	(variable = PALDevDataGet(Dev, CurrentParameters).field[index])
+
+
+#define VL_SETDEVICESPECIFICPARAMETER(Dev, field, value) \
+		PALDevDataSet(Dev, DeviceSpecificParameters.field, value)
+
+#define VL_GETDEVICESPECIFICPARAMETER(Dev, field) \
+		PALDevDataGet(Dev, DeviceSpecificParameters).field
+
+
+#define VL_FIXPOINT1616TOFIXPOINT97(Value) \
+	(uint16_t)((Value>>9)&0xFFFF)
+#define VL_FIXPOINT97TOFIXPOINT1616(Value) \
+	(unsigned int)(Value<<9)
+
+#define VL_FIXPOINT1616TOFIXPOINT88(Value) \
+	(uint16_t)((Value>>8)&0xFFFF)
+#define VL_FIXPOINT88TOFIXPOINT1616(Value) \
+	(unsigned int)(Value<<8)
+
+#define VL_FIXPOINT1616TOFIXPOINT412(Value) \
+	(uint16_t)((Value>>4)&0xFFFF)
+#define VL_FIXPOINT412TOFIXPOINT1616(Value) \
+	(unsigned int)(Value<<4)
+
+#define VL_FIXPOINT1616TOFIXPOINT313(Value) \
+	(uint16_t)((Value>>3)&0xFFFF)
+#define VL_FIXPOINT313TOFIXPOINT1616(Value) \
+	(unsigned int)(Value<<3)
+
+#define VL_FIXPOINT1616TOFIXPOINT08(Value) \
+	(uint8_t)((Value>>8)&0x00FF)
+#define VL_FIXPOINT08TOFIXPOINT1616(Value) \
+	(unsigned int)(Value<<8)
+
+#define VL_FIXPOINT1616TOFIXPOINT53(Value) \
+	(uint8_t)((Value>>13)&0x00FF)
+#define VL_FIXPOINT53TOFIXPOINT1616(Value) \
+	(unsigned int)(Value<<13)
+
+#define VL_FIXPOINT1616TOFIXPOINT102(Value) \
+	(uint16_t)((Value>>14)&0x0FFF)
+#define VL_FIXPOINT102TOFIXPOINT1616(Value) \
+	(unsigned int)(Value<<12)
+
+#define VL_MAKEUINT16(lsb, msb) (uint16_t)((((uint16_t)msb)<<8) + \
+		(uint16_t)lsb)
+
+/** @} VL_define_GeneralMacro_group */
+
+/** @} VL_globaldefine_group */
+
+
+
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _VL_DEF_H_ */
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_device.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_device.h
new file mode 100644
index 0000000..b296ff8
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_device.h
@@ -0,0 +1,246 @@
+/*
+ *  vl53l0x_device.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+/**
+ * Device specific defines. To be adapted by implementer for the targeted
+ * device.
+ */
+
+#ifndef _VL_DEVICE_H_
+#define _VL_DEVICE_H_
+
+#include "vl53l0x_types.h"
+
+
+/** @defgroup VL_DevSpecDefines_group VL53L0X cut1.1
+ *  Device Specific Defines
+ *  @brief VL53L0X cut1.1 Device Specific Defines
+ *  @{
+ */
+
+
+/** @defgroup uint8_t_group Device Error
+ *  @brief Device Error code
+ *
+ *  This enum is Device specific it should be updated in the implementation
+ *  Use @a VL_GetStatusErrorString() to get the string.
+ *  It is related to Status Register of the Device.
+ *  @{
+ */
+
+#define VL_DEVICEERROR_NONE                     ((uint8_t) 0)
+	/*!< 0  NoError  */
+#define VL_DEVICEERROR_VCSELCONTINUITYTESTFAILURE ((uint8_t) 1)
+#define VL_DEVICEERROR_VCSELWATCHDOGTESTFAILURE ((uint8_t) 2)
+#define VL_DEVICEERROR_NOVHVVALUEFOUND         ((uint8_t) 3)
+#define VL_DEVICEERROR_MSRCNOTARGET            ((uint8_t) 4)
+#define VL_DEVICEERROR_SNRCHECK                ((uint8_t) 5)
+#define VL_DEVICEERROR_RANGEPHASECHECK         ((uint8_t) 6)
+#define VL_DEVICEERROR_SIGMATHRESHOLDCHECK     ((uint8_t) 7)
+#define VL_DEVICEERROR_TCC                     ((uint8_t) 8)
+#define VL_DEVICEERROR_PHASECONSISTENCY        ((uint8_t) 9)
+#define VL_DEVICEERROR_MINCLIP                 ((uint8_t) 10)
+#define VL_DEVICEERROR_RANGECOMPLETE           ((uint8_t) 11)
+#define VL_DEVICEERROR_ALGOUNDERFLOW           ((uint8_t) 12)
+#define VL_DEVICEERROR_ALGOOVERFLOW            ((uint8_t) 13)
+#define VL_DEVICEERROR_RANGEIGNORETHRESHOLD    ((uint8_t) 14)
+
+/** @} end of uint8_t_group */
+
+
+/** @defgroup VL_CheckEnable_group Check Enable list
+ *  @brief Check Enable code
+ *
+ *  Define used to specify the LimitCheckId.
+ *  Use @a VL_GetLimitCheckInfo() to get the string.
+ *  @{
+ */
+
+#define VL_CHECKENABLE_SIGMA_FINAL_RANGE           0
+#define VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE     1
+#define VL_CHECKENABLE_SIGNAL_REF_CLIP             2
+#define VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD      3
+#define VL_CHECKENABLE_SIGNAL_RATE_MSRC            4
+#define VL_CHECKENABLE_SIGNAL_RATE_PRE_RANGE       5
+
+#define VL_CHECKENABLE_NUMBER_OF_CHECKS            6
+
+/** @}  end of VL_CheckEnable_group */
+
+
+/** @defgroup uint8_t_group Gpio Functionality
+ *  @brief Defines the different functionalities for the device GPIO(s)
+ *  @{
+ */
+
+#define VL_GPIOFUNCTIONALITY_OFF                     \
+	((uint8_t)  0) /*!< NO Interrupt  */
+#define VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_LOW   \
+	((uint8_t)  1) /*!< Level Low (value < thresh_low) */
+#define VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_HIGH   \
+	((uint8_t)  2)/*!< Level High (value > thresh_high)*/
+#define VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_OUT    \
+	((uint8_t)  3)
+	/*!< Out Of Window (value < thresh_low OR value > thresh_high)  */
+#define VL_GPIOFUNCTIONALITY_NEW_MEASURE_READY        \
+	((uint8_t)  4) /*!< New Sample Ready  */
+
+/** @} end of uint8_t_group */
+
+
+/* Device register map */
+
+/** @defgroup VL_DefineRegisters_group Define Registers
+ *  @brief List of all the defined registers
+ *  @{
+ */
+#define VL_REG_SYSRANGE_START                        0x000
+	/** mask existing bit in #VL_REG_SYSRANGE_START*/
+	#define VL_REG_SYSRANGE_MODE_MASK          0x0F
+	/** bit 0 in #VL_REG_SYSRANGE_START write 1 toggle state in */
+	/* continuous mode and arm next shot in single shot mode */
+	#define VL_REG_SYSRANGE_MODE_START_STOP    0x01
+	/** bit 1 write 0 in #VL_REG_SYSRANGE_START set single shot mode */
+	#define VL_REG_SYSRANGE_MODE_SINGLESHOT    0x00
+	/** bit 1 write 1 in #VL_REG_SYSRANGE_START set back-to-back */
+	/*  operation mode */
+	#define VL_REG_SYSRANGE_MODE_BACKTOBACK    0x02
+	/** bit 2 write 1 in #VL_REG_SYSRANGE_START set timed operation */
+	/* *  mode */
+	#define VL_REG_SYSRANGE_MODE_TIMED         0x04
+	/** bit 3 write 1 in #VL_REG_SYSRANGE_START set histogram operation */
+	/* *  mode */
+	#define VL_REG_SYSRANGE_MODE_HISTOGRAM     0x08
+
+
+#define VL_REG_SYSTEM_THRESH_HIGH               0x000C
+#define VL_REG_SYSTEM_THRESH_LOW                0x000E
+
+
+#define VL_REG_SYSTEM_SEQUENCE_CONFIG		0x0001
+#define VL_REG_SYSTEM_RANGE_CONFIG			0x0009
+#define VL_REG_SYSTEM_INTERMEASUREMENT_PERIOD	0x0004
+
+
+#define VL_REG_SYSTEM_INTERRUPT_CONFIG_GPIO               0x000A
+	#define VL_REG_SYSTEM_INTERRUPT_GPIO_DISABLED	0x00
+	#define VL_REG_SYSTEM_INTERRUPT_GPIO_LEVEL_LOW	0x01
+	#define VL_REG_SYSTEM_INTERRUPT_GPIO_LEVEL_HIGH	0x02
+	#define VL_REG_SYSTEM_INTERRUPT_GPIO_OUT_OF_WINDOW	0x03
+	#define VL_REG_SYSTEM_INTERRUPT_GPIO_NEW_SAMPLE_READY	0x04
+
+#define VL_REG_GPIO_HV_MUX_ACTIVE_HIGH          0x0084
+
+
+#define VL_REG_SYSTEM_INTERRUPT_CLEAR           0x000B
+
+/* Result registers */
+#define VL_REG_RESULT_INTERRUPT_STATUS          0x0013
+#define VL_REG_RESULT_RANGE_STATUS              0x0014
+
+#define VL_REG_RESULT_CORE_PAGE  1
+#define VL_REG_RESULT_CORE_AMBIENT_WINDOW_EVENTS_RTN   0x00BC
+#define VL_REG_RESULT_CORE_RANGING_TOTAL_EVENTS_RTN    0x00C0
+#define VL_REG_RESULT_CORE_AMBIENT_WINDOW_EVENTS_REF   0x00D0
+#define VL_REG_RESULT_CORE_RANGING_TOTAL_EVENTS_REF    0x00D4
+#define VL_REG_RESULT_PEAK_SIGNAL_RATE_REF             0x00B6
+
+/* Algo register */
+
+#define VL_REG_ALGO_PART_TO_PART_RANGE_OFFSET_MM       0x0028
+
+#define VL_REG_I2C_SLAVE_DEVICE_ADDRESS                0x008a
+
+/* Check Limit registers */
+#define VL_REG_MSRC_CONFIG_CONTROL                     0x0060
+
+#define VL_REG_PRE_RANGE_CONFIG_MIN_SNR                      0X0027
+#define VL_REG_PRE_RANGE_CONFIG_VALID_PHASE_LOW              0x0056
+#define VL_REG_PRE_RANGE_CONFIG_VALID_PHASE_HIGH             0x0057
+#define VL_REG_PRE_RANGE_MIN_COUNT_RATE_RTN_LIMIT            0x0064
+
+#define VL_REG_FINAL_RANGE_CONFIG_MIN_SNR                    0X0067
+#define VL_REG_FINAL_RANGE_CONFIG_VALID_PHASE_LOW            0x0047
+#define VL_REG_FINAL_RANGE_CONFIG_VALID_PHASE_HIGH           0x0048
+#define VL_REG_FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT   0x0044
+
+
+#define VL_REG_PRE_RANGE_CONFIG_SIGMA_THRESH_HI              0X0061
+#define VL_REG_PRE_RANGE_CONFIG_SIGMA_THRESH_LO              0X0062
+
+/* PRE RANGE registers */
+#define VL_REG_PRE_RANGE_CONFIG_VCSEL_PERIOD                 0x0050
+#define VL_REG_PRE_RANGE_CONFIG_TIMEOUT_MACROP_HI            0x0051
+#define VL_REG_PRE_RANGE_CONFIG_TIMEOUT_MACROP_LO            0x0052
+
+#define VL_REG_SYSTEM_HISTOGRAM_BIN                          0x0081
+#define VL_REG_HISTOGRAM_CONFIG_INITIAL_PHASE_SELECT         0x0033
+#define VL_REG_HISTOGRAM_CONFIG_READOUT_CTRL                 0x0055
+
+#define VL_REG_FINAL_RANGE_CONFIG_VCSEL_PERIOD               0x0070
+#define VL_REG_FINAL_RANGE_CONFIG_TIMEOUT_MACROP_HI          0x0071
+#define VL_REG_FINAL_RANGE_CONFIG_TIMEOUT_MACROP_LO          0x0072
+#define VL_REG_CROSSTALK_COMPENSATION_PEAK_RATE_MCPS         0x0020
+
+#define VL_REG_MSRC_CONFIG_TIMEOUT_MACROP                    0x0046
+
+
+#define VL_REG_SOFT_RESET_GO2_SOFT_RESET_N	                 0x00bf
+#define VL_REG_IDENTIFICATION_MODEL_ID                       0x00c0
+#define VL_REG_IDENTIFICATION_REVISION_ID                    0x00c2
+
+#define VL_REG_OSC_CALIBRATE_VAL                             0x00f8
+
+
+#define VL_SIGMA_ESTIMATE_MAX_VALUE                          65535
+/* equivalent to a range sigma of 655.35mm */
+
+#define VL_REG_GLOBAL_CONFIG_VCSEL_WIDTH          0x032
+#define VL_REG_GLOBAL_CONFIG_SPAD_ENABLES_REF_0   0x0B0
+#define VL_REG_GLOBAL_CONFIG_SPAD_ENABLES_REF_1   0x0B1
+#define VL_REG_GLOBAL_CONFIG_SPAD_ENABLES_REF_2   0x0B2
+#define VL_REG_GLOBAL_CONFIG_SPAD_ENABLES_REF_3   0x0B3
+#define VL_REG_GLOBAL_CONFIG_SPAD_ENABLES_REF_4   0x0B4
+#define VL_REG_GLOBAL_CONFIG_SPAD_ENABLES_REF_5   0x0B5
+
+#define VL_REG_GLOBAL_CONFIG_REF_EN_START_SELECT   0xB6
+#define VL_REG_DYNAMIC_SPAD_NUM_REQUESTED_REF_SPAD 0x4E /* 0x14E */
+#define VL_REG_DYNAMIC_SPAD_REF_EN_START_OFFSET    0x4F /* 0x14F */
+#define VL_REG_POWER_MANAGEMENT_GO1_POWER_FORCE    0x80
+
+/*
+ * Speed of light in um per 1E-10 Seconds
+ */
+
+#define VL_SPEED_OF_LIGHT_IN_AIR 2997
+
+#define VL_REG_VHV_CONFIG_PAD_SCL_SDA__EXTSUP_HV     0x0089
+
+#define VL_REG_ALGO_PHASECAL_LIM                         0x0030 /* 0x130 */
+#define VL_REG_ALGO_PHASECAL_CONFIG_TIMEOUT              0x0030
+
+/** @} VL_DefineRegisters_group */
+
+/** @} VL_DevSpecDefines_group */
+
+
+#endif
+
+/* _VL_DEVICE_H_ */
+
+
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_i2c_platform.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_i2c_platform.h
new file mode 100644
index 0000000..3cd5d69
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_i2c_platform.h
@@ -0,0 +1,397 @@
+/*
+ *  vl53l0x_i2c_platform.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+/**
+ * @file   VL_i2c_platform.h
+ * @brief  Function prototype definitions for EWOK Platform layer.
+ *
+ */
+
+
+#ifndef _VL_I2C_PLATFORM_H_
+#define _VL_I2C_PLATFORM_H_
+
+#include "vl53l0x_def.h"
+
+
+/** Maximum buffer size to be used in i2c */
+#define VL_MAX_I2C_XFER_SIZE   64
+
+/**
+ *  @brief Typedef defining .\n
+ * The developer should modify this to suit the platform being deployed.
+ *
+ */
+
+/**
+ * @brief Typedef defining 8 bit unsigned char type.\n
+ * The developer should modify this to suit the platform being deployed.
+ *
+ */
+#define	   I2C                0x01
+#define	   SPI                0x00
+
+#define    COMMS_BUFFER_SIZE    64
+/*MUST be the same size as the SV task buffer */
+
+#define    BYTES_PER_WORD        2
+#define    BYTES_PER_DWORD       4
+
+#define    VL_MAX_STRING_LENGTH_PLT       256
+
+/**
+ * @brief  Initialise platform comms.
+ *
+ * @param  comms_type      - selects between I2C and SPI
+ * @param  comms_speed_khz - unsigned short containing the I2C speed in kHz
+ *
+ * @return status - status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_comms_initialise(uint8_t  comms_type,
+			uint16_t comms_speed_khz);
+
+/**
+ * @brief  Close platform comms.
+ *
+ * @return status - status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_comms_close(void);
+
+/**
+ * @brief  Cycle Power to Device
+ *
+ * @return status - status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_cycle_power(void);
+
+int32_t VL_set_page(struct vl_data *dev, uint8_t page_data);
+
+/**
+ * @brief Writes the supplied byte buffer to the device
+ *
+ * Wrapper for SystemVerilog Write Multi task
+ *
+ * @code
+ *
+ * Example:
+ *
+ * uint8_t *spad_enables;
+ *
+ * int status = VL_write_multi(RET_SPAD_EN_0, spad_enables, 36);
+ *
+ * @endcode
+ *
+ * @param  address - uint8_t device address value
+ * @param  index - uint8_t register index value
+ * @param  pdata - pointer to uint8_t buffer containing the data to be written
+ * @param  count - number of bytes in the supplied byte buffer
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_write_multi(struct vl_data *dev, uint8_t index, uint8_t  *pdata,
+		int32_t count);
+
+
+/**
+ * @brief  Reads the requested number of bytes from the device
+ *
+ * Wrapper for SystemVerilog Read Multi task
+ *
+ * @code
+ *
+ * Example:
+ *
+ * uint8_t buffer[COMMS_BUFFER_SIZE];
+ *
+ * int status = status  = VL_read_multi(DEVICE_ID, buffer, 2)
+ *
+ * @endcode
+ *
+ * @param  address - uint8_t device address value
+ * @param  index - uint8_t register index value
+ * @param  pdata - pointer to the uint8_t buffer to store read data
+ * @param  count - number of uint8_t's to read
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_read_multi(struct vl_data *dev,  uint8_t index, uint8_t  *pdata,
+		int32_t count);
+
+
+/**
+ * @brief  Writes a single byte to the device
+ *
+ * Wrapper for SystemVerilog Write Byte task
+ *
+ * @code
+ *
+ * Example:
+ *
+ * uint8_t page_number = MAIN_SELECT_PAGE;
+ *
+ * int status = VL_write_byte(PAGE_SELECT, page_number);
+ *
+ * @endcode
+ *
+ * @param  address - uint8_t device address value
+ * @param  index - uint8_t register index value
+ * @param  data  - uint8_t data value to write
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_write_byte(struct vl_data *dev,  uint8_t index, uint8_t   data);
+
+
+/**
+ * @brief  Writes a single word (16-bit unsigned) to the device
+ *
+ * Manages the big-endian nature of the device (first byte written is the
+ * MS byte).
+ * Uses SystemVerilog Write Multi task.
+ *
+ * @code
+ *
+ * Example:
+ *
+ * uint16_t nvm_ctrl_pulse_width = 0x0004;
+ *
+ * int status = VL_write_word(NVM_CTRL__PULSE_WIDTH_MSB,
+ * nvm_ctrl_pulse_width);
+ *
+ * @endcode
+ *
+ * @param  address - uint8_t device address value
+ * @param  index - uint8_t register index value
+ * @param  data  - uin16_t data value write
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_write_word(struct vl_data *dev,  uint8_t index, uint16_t  data);
+
+
+/**
+ * @brief  Writes a single dword (32-bit unsigned) to the device
+ *
+ * Manages the big-endian nature of the device (first byte written is the
+ * MS byte).
+ * Uses SystemVerilog Write Multi task.
+ *
+ * @code
+ *
+ * Example:
+ *
+ * uint32_t nvm_data = 0x0004;
+ *
+ * int status = VL_write_dword(NVM_CTRL__DATAIN_MMM, nvm_data);
+ *
+ * @endcode
+ *
+ * @param  address - uint8_t device address value
+ * @param  index - uint8_t register index value
+ * @param  data  - uint32_t data value to write
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_write_dword(struct vl_data *dev, uint8_t index, uint32_t  data);
+
+
+
+/**
+ * @brief  Reads a single byte from the device
+ *
+ * Uses SystemVerilog Read Byte task.
+ *
+ * @code
+ *
+ * Example:
+ *
+ * uint8_t device_status = 0;
+ *
+ * int status = VL_read_byte(STATUS, &device_status);
+ *
+ * @endcode
+ *
+ * @param  address - uint8_t device address value
+ * @param  index  - uint8_t register index value
+ * @param  pdata  - pointer to uint8_t data value
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_read_byte(struct vl_data *dev,  uint8_t index, uint8_t  *pdata);
+
+
+/**
+ * @brief  Reads a single word (16-bit unsigned) from the device
+ *
+ * Manages the big-endian nature of the device (first byte read is the MS byte).
+ * Uses SystemVerilog Read Multi task.
+ *
+ * @code
+ *
+ * Example:
+ *
+ * uint16_t timeout = 0;
+ *
+ * int status = VL_read_word(TIMEOUT_OVERALL_PERIODS_MSB, &timeout);
+ *
+ * @endcode
+ *
+ * @param  address - uint8_t device address value
+ * @param  index  - uint8_t register index value
+ * @param  pdata  - pointer to uint16_t data value
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_read_word(struct vl_data *dev,  uint8_t index, uint16_t *pdata);
+
+
+/**
+ * @brief  Reads a single dword (32-bit unsigned) from the device
+ *
+ * Manages the big-endian nature of the device (first byte read is the MS byte).
+ * Uses SystemVerilog Read Multi task.
+ *
+ * @code
+ *
+ * Example:
+ *
+ * uint32_t range_1 = 0;
+ *
+ * int status = VL_read_dword(RANGE_1_MMM, &range_1);
+ *
+ * @endcode
+ *
+ * @param  address - uint8_t device address value
+ * @param  index - uint8_t register index value
+ * @param  pdata - pointer to uint32_t data value
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_read_dword(struct vl_data *dev, uint8_t index, uint32_t *pdata);
+
+
+/**
+ * @brief  Implements a programmable wait in us
+ *
+ * Wrapper for SystemVerilog Wait in micro seconds task
+ *
+ * @param  wait_us - integer wait in micro seconds
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_platform_wait_us(int32_t wait_us);
+
+
+/**
+ * @brief  Implements a programmable wait in ms
+ *
+ * Wrapper for SystemVerilog Wait in milli seconds task
+ *
+ * @param  wait_ms - integer wait in milli seconds
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_wait_ms(int32_t wait_ms);
+
+
+/**
+ * @brief Set GPIO value
+ *
+ * @param  level  - input  level - either 0 or 1
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_set_gpio(uint8_t  level);
+
+
+/**
+ * @brief Get GPIO value
+ *
+ * @param  plevel - uint8_t pointer to store GPIO level (0 or 1)
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_get_gpio(uint8_t *plevel);
+
+/**
+ * @brief Release force on GPIO
+ *
+ * @return status - SystemVerilog status 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_release_gpio(void);
+
+
+/**
+ * @brief Get the frequency of the timer used for ranging results time stamps
+ *
+ * @param[out] ptimer_freq_hz : pointer for timer frequency
+ *
+ * @return status : 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_get_timer_frequency(int32_t *ptimer_freq_hz);
+
+/**
+ * @brief Get the timer value in units of timer_freq_hz
+ * (see VL_get_timestamp_frequency())
+ *
+ * @param[out] ptimer_count : pointer for timer count value
+ *
+ * @return status : 0 = ok, 1 = error
+ *
+ */
+
+int32_t VL_get_timer_value(int32_t *ptimer_count);
+int VL_I2CWrite(struct vl_data *dev, uint8_t *buff, uint8_t len);
+int VL_I2CRead(struct vl_data *dev, uint8_t *buff, uint8_t len);
+
+#endif /* _VL_I2C_PLATFORM_H_ */
+
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_interrupt_threshold_settings.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_interrupt_threshold_settings.h
new file mode 100644
index 0000000..ecfc994
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_interrupt_threshold_settings.h
@@ -0,0 +1,183 @@
+/*
+ *  vl53l0x_interrupt_threshold_settings.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef _VL_INTERRUPT_THRESHOLD_SETTINGS_H_
+#define _VL_INTERRUPT_THRESHOLD_SETTINGS_H_
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+uint8_t InterruptThresholdSettings[] = {
+
+	/* Start of Interrupt Threshold Settings */
+	0x1, 0xff, 0x00,
+	0x1, 0x80, 0x01,
+	0x1, 0xff, 0x01,
+	0x1, 0x00, 0x00,
+	0x1, 0xff, 0x01,
+	0x1, 0x4f, 0x02,
+	0x1, 0xFF, 0x0E,
+	0x1, 0x00, 0x03,
+	0x1, 0x01, 0x84,
+	0x1, 0x02, 0x0A,
+	0x1, 0x03, 0x03,
+	0x1, 0x04, 0x08,
+	0x1, 0x05, 0xC8,
+	0x1, 0x06, 0x03,
+	0x1, 0x07, 0x8D,
+	0x1, 0x08, 0x08,
+	0x1, 0x09, 0xC6,
+	0x1, 0x0A, 0x01,
+	0x1, 0x0B, 0x02,
+	0x1, 0x0C, 0x00,
+	0x1, 0x0D, 0xD5,
+	0x1, 0x0E, 0x18,
+	0x1, 0x0F, 0x12,
+	0x1, 0x10, 0x01,
+	0x1, 0x11, 0x82,
+	0x1, 0x12, 0x00,
+	0x1, 0x13, 0xD5,
+	0x1, 0x14, 0x18,
+	0x1, 0x15, 0x13,
+	0x1, 0x16, 0x03,
+	0x1, 0x17, 0x86,
+	0x1, 0x18, 0x0A,
+	0x1, 0x19, 0x09,
+	0x1, 0x1A, 0x08,
+	0x1, 0x1B, 0xC2,
+	0x1, 0x1C, 0x03,
+	0x1, 0x1D, 0x8F,
+	0x1, 0x1E, 0x0A,
+	0x1, 0x1F, 0x06,
+	0x1, 0x20, 0x01,
+	0x1, 0x21, 0x02,
+	0x1, 0x22, 0x00,
+	0x1, 0x23, 0xD5,
+	0x1, 0x24, 0x18,
+	0x1, 0x25, 0x22,
+	0x1, 0x26, 0x01,
+	0x1, 0x27, 0x82,
+	0x1, 0x28, 0x00,
+	0x1, 0x29, 0xD5,
+	0x1, 0x2A, 0x18,
+	0x1, 0x2B, 0x0B,
+	0x1, 0x2C, 0x28,
+	0x1, 0x2D, 0x78,
+	0x1, 0x2E, 0x28,
+	0x1, 0x2F, 0x91,
+	0x1, 0x30, 0x00,
+	0x1, 0x31, 0x0B,
+	0x1, 0x32, 0x00,
+	0x1, 0x33, 0x0B,
+	0x1, 0x34, 0x00,
+	0x1, 0x35, 0xA1,
+	0x1, 0x36, 0x00,
+	0x1, 0x37, 0xA0,
+	0x1, 0x38, 0x00,
+	0x1, 0x39, 0x04,
+	0x1, 0x3A, 0x28,
+	0x1, 0x3B, 0x30,
+	0x1, 0x3C, 0x0C,
+	0x1, 0x3D, 0x04,
+	0x1, 0x3E, 0x0F,
+	0x1, 0x3F, 0x79,
+	0x1, 0x40, 0x28,
+	0x1, 0x41, 0x1E,
+	0x1, 0x42, 0x2F,
+	0x1, 0x43, 0x87,
+	0x1, 0x44, 0x00,
+	0x1, 0x45, 0x0B,
+	0x1, 0x46, 0x00,
+	0x1, 0x47, 0x0B,
+	0x1, 0x48, 0x00,
+	0x1, 0x49, 0xA7,
+	0x1, 0x4A, 0x00,
+	0x1, 0x4B, 0xA6,
+	0x1, 0x4C, 0x00,
+	0x1, 0x4D, 0x04,
+	0x1, 0x4E, 0x01,
+	0x1, 0x4F, 0x00,
+	0x1, 0x50, 0x00,
+	0x1, 0x51, 0x80,
+	0x1, 0x52, 0x09,
+	0x1, 0x53, 0x08,
+	0x1, 0x54, 0x01,
+	0x1, 0x55, 0x00,
+	0x1, 0x56, 0x0F,
+	0x1, 0x57, 0x79,
+	0x1, 0x58, 0x09,
+	0x1, 0x59, 0x05,
+	0x1, 0x5A, 0x00,
+	0x1, 0x5B, 0x60,
+	0x1, 0x5C, 0x05,
+	0x1, 0x5D, 0xD1,
+	0x1, 0x5E, 0x0C,
+	0x1, 0x5F, 0x3C,
+	0x1, 0x60, 0x00,
+	0x1, 0x61, 0xD0,
+	0x1, 0x62, 0x0B,
+	0x1, 0x63, 0x03,
+	0x1, 0x64, 0x28,
+	0x1, 0x65, 0x10,
+	0x1, 0x66, 0x2A,
+	0x1, 0x67, 0x39,
+	0x1, 0x68, 0x0B,
+	0x1, 0x69, 0x02,
+	0x1, 0x6A, 0x28,
+	0x1, 0x6B, 0x10,
+	0x1, 0x6C, 0x2A,
+	0x1, 0x6D, 0x61,
+	0x1, 0x6E, 0x0C,
+	0x1, 0x6F, 0x00,
+	0x1, 0x70, 0x0F,
+	0x1, 0x71, 0x79,
+	0x1, 0x72, 0x00,
+	0x1, 0x73, 0x0B,
+	0x1, 0x74, 0x00,
+	0x1, 0x75, 0x0B,
+	0x1, 0x76, 0x00,
+	0x1, 0x77, 0xA1,
+	0x1, 0x78, 0x00,
+	0x1, 0x79, 0xA0,
+	0x1, 0x7A, 0x00,
+	0x1, 0x7B, 0x04,
+	0x1, 0xFF, 0x04,
+	0x1, 0x79, 0x1D,
+	0x1, 0x7B, 0x27,
+	0x1, 0x96, 0x0E,
+	0x1, 0x97, 0xFE,
+	0x1, 0x98, 0x03,
+	0x1, 0x99, 0xEF,
+	0x1, 0x9A, 0x02,
+	0x1, 0x9B, 0x44,
+	0x1, 0x73, 0x07,
+	0x1, 0x70, 0x01,
+	0x1, 0xff, 0x01,
+	0x1, 0x00, 0x01,
+	0x1, 0xff, 0x00,
+	0x00, 0x00, 0x00
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VL_INTERRUPT_THRESHOLD_SETTINGS_H_ */
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_platform.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_platform.h
new file mode 100644
index 0000000..d896fe8
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_platform.h
@@ -0,0 +1,213 @@
+/*
+ *  vl53l0x_platform.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef _VL_PLATFORM_H_
+#define _VL_PLATFORM_H_
+
+#include <linux/delay.h>
+#include "vl53l0x_def.h"
+#include "vl53l0x_platform_log.h"
+
+#include "stmvl53l0x-i2c.h"
+#include "stmvl53l0x-cci.h"
+#include "stmvl53l0x.h"
+
+/**
+ * @file vl53l0x_platform.h
+ *
+ * @brief All end user OS/platform/application porting
+ */
+
+/**
+ * @defgroup VL_platform_group VL53L0 Platform Functions
+ * @brief    VL53L0 Platform Functions
+ *  @{
+ */
+
+/**
+ * @def PALDevDataGet
+ * @brief Get ST private structure @a struct VL_DevData_t data access
+ *
+ * @param Dev       Device Handle
+ * @param field     ST structure field name
+ * It maybe used and as real data "ref" not just as "get" for sub-structure item
+ * like PALDevDataGet(FilterData.field)[i] or
+ * PALDevDataGet(FilterData.MeasurementIndex)++
+ */
+#define PALDevDataGet(Dev, field) (Dev->Data.field)
+
+/**
+ * @def PALDevDataSet(Dev, field, data)
+ * @brief  Set ST private structure @a struct VL_DevData_t data field
+ * @param Dev       Device Handle
+ * @param field     ST structure field na*me
+ * @param data      Data to be set
+ */
+#define PALDevDataSet(Dev, field, data) ((Dev->Data.field) = (data))
+
+
+/**
+ * @defgroup VL_registerAccess_group PAL Register Access Functions
+ * @brief    PAL Register Access Functions
+ *  @{
+ */
+
+/**
+ * Lock comms interface to serialize all commands to a shared I2C interface
+ * for a specific device
+ * @param   Dev       Device Handle
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_LockSequenceAccess(struct vl_data *Dev);
+
+/**
+ * Unlock comms interface to serialize all commands to a shared I2C interface
+ * for a specific device
+ * @param   Dev       Device Handle
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_UnlockSequenceAccess(struct vl_data *Dev);
+
+
+/**
+ * Writes the supplied byte buffer to the device
+ * @param   Dev       Device Handle
+ * @param   index     The register index
+ * @param   pdata     Pointer to uint8_t buffer containing the data
+ * to be written
+ * @param   count     Number of bytes in the supplied byte buffer
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_WriteMulti(struct vl_data *Dev, uint8_t index,
+		uint8_t *pdata, uint32_t count);
+
+/**
+ * Reads the requested number of bytes from the device
+ * @param   Dev       Device Handle
+ * @param   index     The register index
+ * @param   pdata     Pointer to the uint8_t buffer to store read data
+ * @param   count     Number of uint8_t's to read
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_ReadMulti(struct vl_data *Dev, uint8_t index,
+		uint8_t *pdata, uint32_t count);
+
+/**
+ * Write single byte register
+ * @param   Dev       Device Handle
+ * @param   index     The register index
+ * @param   data      8 bit register data
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_WrByte(struct vl_data *Dev, uint8_t index, uint8_t data);
+
+/**
+ * Write word register
+ * @param   Dev       Device Handle
+ * @param   index     The register index
+ * @param   data      16 bit register data
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_WrWord(struct vl_data *Dev, uint8_t index, uint16_t data);
+
+/**
+ * Write double word (4 byte) register
+ * @param   Dev       Device Handle
+ * @param   index     The register index
+ * @param   data      32 bit register data
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_WrDWord(struct vl_data *Dev, uint8_t index, uint32_t data);
+
+/**
+ * Read single byte register
+ * @param   Dev       Device Handle
+ * @param   index     The register index
+ * @param   data      pointer to 8 bit data
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_RdByte(struct vl_data *Dev, uint8_t index, uint8_t *data);
+
+/**
+ * Read word (2byte) register
+ * @param   Dev       Device Handle
+ * @param   index     The register index
+ * @param   data      pointer to 16 bit data
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_RdWord(struct vl_data *Dev, uint8_t index, uint16_t *data);
+
+/**
+ * Read dword (4byte) register
+ * @param   Dev       Device Handle
+ * @param   index     The register index
+ * @param   data      pointer to 32 bit data
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_RdDWord(struct vl_data *Dev, uint8_t index, uint32_t *data);
+
+/**
+ * Threat safe Update (read/modify/write) single byte register
+ *
+ * Final_reg = (Initial_reg & and_data) |or_data
+ *
+ * @param   Dev        Device Handle
+ * @param   index      The register index
+ * @param   AndData    8 bit and data
+ * @param   OrData     8 bit or data
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_UpdateByte(struct vl_data *Dev, uint8_t index,
+		uint8_t AndData, uint8_t OrData);
+
+/** @} end of VL_registerAccess_group */
+
+
+/**
+ * @brief execute delay in all polling API call
+ *
+ * A typical multi-thread or RTOs implementation is to sleep the task for
+ * some 5ms (with 100Hz max rate faster polling is not needed)
+ * if nothing specific is need you can define it as an empty/void macro
+ * @code
+ * #define VL_PollingDelay(...) (void)0
+ * @endcode
+ * @param Dev       Device Handle
+ * @return  VL_ERROR_NONE        Success
+ * @return  "Other error code"    See ::int8_t
+ */
+int8_t VL_PollingDelay(struct vl_data *Dev);
+/* usually best implemented as a real function */
+
+/** @} end of VL_platform_group */
+
+#endif  /* _VL_PLATFORM_H_ */
+
+
+
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_platform_log.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_platform_log.h
new file mode 100644
index 0000000..a8f22c5
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_platform_log.h
@@ -0,0 +1,99 @@
+/*
+ *  vl53l0x_platform_log.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef _VL_PLATFORM_LOG_H_
+#define _VL_PLATFORM_LOG_H_
+
+#include <linux/string.h>
+/* LOG Functions */
+
+
+/**
+ * @file vl53l0x_platform_log.h
+ *
+ * @brief platform log function definition
+ */
+
+/* #define VL_LOG_ENABLE */
+
+enum {
+	TRACE_LEVEL_NONE,
+	TRACE_LEVEL_ERRORS,
+	TRACE_LEVEL_WARNING,
+	TRACE_LEVEL_INFO,
+	TRACE_LEVEL_DEBUG,
+	TRACE_LEVEL_ALL,
+	TRACE_LEVEL_IGNORE
+};
+
+enum {
+	TRACE_FUNCTION_NONE = 0,
+	TRACE_FUNCTION_I2C  = 1,
+	TRACE_FUNCTION_ALL  = 0x7fffffff /* all bits except sign */
+};
+
+enum {
+	TRACE_MODULE_NONE              = 0x0,
+	TRACE_MODULE_API               = 0x1,
+	TRACE_MODULE_PLATFORM          = 0x2,
+	TRACE_MODULE_ALL               = 0x7fffffff /* all bits except sign */
+};
+
+
+#ifdef VL_LOG_ENABLE
+
+#include <linux/module.h>
+
+
+extern uint32_t _trace_level;
+
+
+
+int32_t VL_trace_config(char *filename, uint32_t modules,
+			uint32_t level, uint32_t functions);
+
+#define trace_print_module_function(...)
+
+#define LOG_GET_TIME()	0
+#define _LOG_FUNCTION_START(module, fmt, ...) \
+		dbg("beg %s start @%d\t" fmt "\n", \
+		__func__, LOG_GET_TIME(), ##__VA_ARGS__)
+
+#define _LOG_FUNCTION_END(module, status, ...)\
+		dbg("end %s start @%d Status %d\n", \
+		 __func__, LOG_GET_TIME(), (int)status)
+
+#define _LOG_FUNCTION_END_FMT(module, status, fmt, ...)\
+		dbg("End %s @%d %d\t"fmt"\n", \
+		__func__, LOG_GET_TIME(), (int)status, ##__VA_ARGS__)
+
+
+#else /* VL_LOG_ENABLE no logging */
+	#define VL_ErrLog(...) (void)0
+	#define _LOG_FUNCTION_START(module, fmt, ...) (void)0
+	#define _LOG_FUNCTION_END(module, status, ...) (void)0
+	#define _LOG_FUNCTION_END_FMT(module, status, fmt, ...) (void)0
+#endif /* else */
+
+#define VL_COPYSTRING(str, ...) strlcpy(str, ##__VA_ARGS__, sizeof(str))
+
+
+#endif  /* _VL_PLATFORM_LOG_H_ */
+
+
+
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_tuning.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_tuning.h
new file mode 100644
index 0000000..ba2998a
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_tuning.h
@@ -0,0 +1,135 @@
+/*
+ *  vl53l0x_tuning.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef _VL_TUNING_H_
+#define _VL_TUNING_H_
+
+#include "vl53l0x_def.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+uint8_t DefaultTuningSettings[] = {
+
+	/* update 02/11/2015_v36 */
+	0x01, 0xFF, 0x01,
+	0x01, 0x00, 0x00,
+
+	0x01, 0xFF, 0x00,
+	0x01, 0x09, 0x00,
+	0x01, 0x10, 0x00,
+	0x01, 0x11, 0x00,
+
+	0x01, 0x24, 0x01,
+	0x01, 0x25, 0xff,
+	0x01, 0x75, 0x00,
+
+	0x01, 0xFF, 0x01,
+	0x01, 0x4e, 0x2c,
+	0x01, 0x48, 0x00,
+	0x01, 0x30, 0x20,
+
+	0x01, 0xFF, 0x00,
+	0x01, 0x30, 0x09, /* mja changed from 0x64. */
+	0x01, 0x54, 0x00,
+	0x01, 0x31, 0x04,
+	0x01, 0x32, 0x03,
+	0x01, 0x40, 0x83,
+	0x01, 0x46, 0x25,
+	0x01, 0x60, 0x00,
+	0x01, 0x27, 0x00,
+	0x01, 0x50, 0x06,
+	0x01, 0x51, 0x00,
+	0x01, 0x52, 0x96,
+	0x01, 0x56, 0x08,
+	0x01, 0x57, 0x30,
+	0x01, 0x61, 0x00,
+	0x01, 0x62, 0x00,
+	0x01, 0x64, 0x00,
+	0x01, 0x65, 0x00,
+	0x01, 0x66, 0xa0,
+
+	0x01, 0xFF, 0x01,
+	0x01, 0x22, 0x32,
+	0x01, 0x47, 0x14,
+	0x01, 0x49, 0xff,
+	0x01, 0x4a, 0x00,
+
+	0x01, 0xFF, 0x00,
+	0x01, 0x7a, 0x0a,
+	0x01, 0x7b, 0x00,
+	0x01, 0x78, 0x21,
+
+	0x01, 0xFF, 0x01,
+	0x01, 0x23, 0x34,
+	0x01, 0x42, 0x00,
+	0x01, 0x44, 0xff,
+	0x01, 0x45, 0x26,
+	0x01, 0x46, 0x05,
+	0x01, 0x40, 0x40,
+	0x01, 0x0E, 0x06,
+	0x01, 0x20, 0x1a,
+	0x01, 0x43, 0x40,
+
+	0x01, 0xFF, 0x00,
+	0x01, 0x34, 0x03,
+	0x01, 0x35, 0x44,
+
+	0x01, 0xFF, 0x01,
+	0x01, 0x31, 0x04,
+	0x01, 0x4b, 0x09,
+	0x01, 0x4c, 0x05,
+	0x01, 0x4d, 0x04,
+
+
+	0x01, 0xFF, 0x00,
+	0x01, 0x44, 0x00,
+	0x01, 0x45, 0x20,
+	0x01, 0x47, 0x08,
+	0x01, 0x48, 0x28,
+	0x01, 0x67, 0x00,
+	0x01, 0x70, 0x04,
+	0x01, 0x71, 0x01,
+	0x01, 0x72, 0xfe,
+	0x01, 0x76, 0x00,
+	0x01, 0x77, 0x00,
+
+	0x01, 0xFF, 0x01,
+	0x01, 0x0d, 0x01,
+
+	0x01, 0xFF, 0x00,
+	0x01, 0x80, 0x01,
+	0x01, 0x01, 0xF8,
+
+	0x01, 0xFF, 0x01,
+	0x01, 0x8e, 0x01,
+	0x01, 0x00, 0x01,
+	0x01, 0xFF, 0x00,
+	0x01, 0x80, 0x00,
+
+	0x00, 0x00, 0x00
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VL_TUNING_H_ */
diff --git a/drivers/input/misc/vl53l0x/inc/vl53l0x_types.h b/drivers/input/misc/vl53l0x/inc/vl53l0x_types.h
new file mode 100644
index 0000000..be40e83
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/inc/vl53l0x_types.h
@@ -0,0 +1,54 @@
+/*
+ *  vl53l0x_types.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef VL_TYPES_H_
+#define VL_TYPES_H_
+
+#include <linux/types.h>
+
+#ifndef NULL
+#error "TODO review  NULL definition or add required include "
+#define NULL 0
+#endif
+
+#if !defined(STDINT_H) &&  !defined(_GCC_STDINT_H) \
+	&& !defined(_STDINT_H) && !defined(_LINUX_TYPES_H)
+
+#pragma message(
+"Review type definition of STDINT define for your platform and add to above")
+
+/*
+ *  target platform do not provide stdint or use a different #define than above
+ *  to avoid seeing the message below addapt the #define list above or implement
+ *  all type and delete these pragma
+ */
+
+unsigned int uint32_t;
+int int32_t;
+
+unsigned short uint16_t;
+short int16_t;
+
+unsigned char uint8_t;
+
+signed char int8_t;
+
+
+#endif /* _STDINT_H */
+
+#endif /* VL_TYPES_H_ */
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_api.c b/drivers/input/misc/vl53l0x/src/vl53l0x_api.c
new file mode 100644
index 0000000..888947f
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_api.c
@@ -0,0 +1,3096 @@
+/*
+ *  vl53l0x_api.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include "vl53l0x_api.h"
+#include "vl53l0x_tuning.h"
+#include "vl53l0x_interrupt_threshold_settings.h"
+#include "vl53l0x_api_core.h"
+#include "vl53l0x_api_calibration.h"
+#include "vl53l0x_api_strings.h"
+
+#ifndef __KERNEL__
+#include <stdlib.h>
+#endif
+#define LOG_FUNCTION_START(fmt, ...) \
+	_LOG_FUNCTION_START(TRACE_MODULE_API, fmt, ##__VA_ARGS__)
+#define LOG_FUNCTION_END(status, ...) \
+	_LOG_FUNCTION_END(TRACE_MODULE_API, status, ##__VA_ARGS__)
+#define LOG_FUNCTION_END_FMT(status, fmt, ...) \
+	_LOG_FUNCTION_END_FMT(TRACE_MODULE_API, status, fmt, ##__VA_ARGS__)
+
+#ifdef VL_LOG_ENABLE
+#define trace_print(level, ...) trace_print_module_function(TRACE_MODULE_API, \
+	level, TRACE_FUNCTION_NONE, ##__VA_ARGS__)
+#endif
+
+/* Group PAL General Functions */
+
+int8_t VL_GetVersion(struct VL_Version_t *pVersion)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	pVersion->major = VL_IMPLEMENTATION_VER_MAJOR;
+	pVersion->minor = VL_IMPLEMENTATION_VER_MINOR;
+	pVersion->build = VL_IMPLEMENTATION_VER_SUB;
+
+	pVersion->revision = VL_IMPLEMENTATION_VER_REVISION;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetPalSpecVersion(struct VL_Version_t *pPalSpecVersion)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	pPalSpecVersion->major = VL_SPECIFICATION_VER_MAJOR;
+	pPalSpecVersion->minor = VL_SPECIFICATION_VER_MINOR;
+	pPalSpecVersion->build = VL_SPECIFICATION_VER_SUB;
+
+	pPalSpecVersion->revision = VL_SPECIFICATION_VER_REVISION;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetProductRevision(struct vl_data *Dev,
+	uint8_t *pProductRevisionMajor, uint8_t *pProductRevisionMinor)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t revision_id;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdByte(Dev, VL_REG_IDENTIFICATION_REVISION_ID,
+		&revision_id);
+	*pProductRevisionMajor = 1;
+	*pProductRevisionMinor = (revision_id & 0xF0) >> 4;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+
+}
+
+int8_t VL_GetDeviceInfo(struct vl_data *Dev,
+	struct VL_DeviceInfo_t *pVL_DeviceInfo)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_device_info(Dev, pVL_DeviceInfo);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetDeviceErrorStatus(struct vl_data *Dev,
+	uint8_t *pDeviceErrorStatus)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t RangeStatus;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdByte(Dev, VL_REG_RESULT_RANGE_STATUS,
+		&RangeStatus);
+
+	*pDeviceErrorStatus = (uint8_t)((RangeStatus & 0x78) >> 3);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+
+int8_t VL_GetDeviceErrorString(uint8_t ErrorCode,
+	char *pDeviceErrorString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_device_error_string(ErrorCode, pDeviceErrorString);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetRangeStatusString(uint8_t RangeStatus,
+	char *pRangeStatusString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_range_status_string(RangeStatus,
+		pRangeStatusString);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetPalErrorString(int8_t PalErrorCode,
+	char *pPalErrorString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_pal_error_string(PalErrorCode, pPalErrorString);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetPalStateString(uint8_t PalStateCode,
+	char *pPalStateString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_pal_state_string(PalStateCode, pPalStateString);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetPalState(struct vl_data *Dev, uint8_t *pPalState)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	*pPalState = PALDevDataGet(Dev, PalState);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetPowerMode(struct vl_data *Dev,
+	uint8_t PowerMode)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	/* Only level1 of Power mode exists */
+	if ((PowerMode != VL_POWERMODE_STANDBY_LEVEL1)
+		&& (PowerMode != VL_POWERMODE_IDLE_LEVEL1)) {
+		Status = VL_ERROR_MODE_NOT_SUPPORTED;
+	} else if (PowerMode == VL_POWERMODE_STANDBY_LEVEL1) {
+		/* set the standby level1 of power mode */
+		Status = VL_WrByte(Dev, 0x80, 0x00);
+		if (Status == VL_ERROR_NONE) {
+			/* Set PAL State to standby */
+			PALDevDataSet(Dev, PalState, VL_STATE_STANDBY);
+			PALDevDataSet(Dev, PowerMode,
+				VL_POWERMODE_STANDBY_LEVEL1);
+		}
+
+	} else {
+		/* VL_POWERMODE_IDLE_LEVEL1 */
+		Status = VL_WrByte(Dev, 0x80, 0x00);
+		if (Status == VL_ERROR_NONE)
+			Status = VL_StaticInit(Dev);
+
+		if (Status == VL_ERROR_NONE)
+			PALDevDataSet(Dev, PowerMode,
+				VL_POWERMODE_IDLE_LEVEL1);
+
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetPowerMode(struct vl_data *Dev,
+	uint8_t *pPowerMode)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Byte;
+
+	LOG_FUNCTION_START("");
+
+	/* Only level1 of Power mode exists */
+	Status = VL_RdByte(Dev, 0x80, &Byte);
+
+	if (Status == VL_ERROR_NONE) {
+		if (Byte == 1) {
+			PALDevDataSet(Dev, PowerMode,
+				VL_POWERMODE_IDLE_LEVEL1);
+		} else {
+			PALDevDataSet(Dev, PowerMode,
+				VL_POWERMODE_STANDBY_LEVEL1);
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetOffsetCalibrationDataMicroMeter(struct vl_data *Dev,
+	int32_t OffsetCalibrationDataMicroMeter)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_set_offset_calibration_data_micro_meter(Dev,
+		OffsetCalibrationDataMicroMeter);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetOffsetCalibrationDataMicroMeter(struct vl_data *Dev,
+	int32_t *pOffsetCalibrationDataMicroMeter)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_offset_calibration_data_micro_meter(Dev,
+		pOffsetCalibrationDataMicroMeter);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetLinearityCorrectiveGain(struct vl_data *Dev,
+	int16_t LinearityCorrectiveGain)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	if ((LinearityCorrectiveGain < 0) || (LinearityCorrectiveGain > 1000))
+		Status = VL_ERROR_INVALID_PARAMS;
+	else {
+		PALDevDataSet(Dev, LinearityCorrectiveGain,
+			LinearityCorrectiveGain);
+
+		if (LinearityCorrectiveGain != 1000) {
+			/* Disable FW Xtalk */
+			Status = VL_WrWord(Dev,
+			VL_REG_CROSSTALK_COMPENSATION_PEAK_RATE_MCPS, 0);
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetLinearityCorrectiveGain(struct vl_data *Dev,
+	uint16_t *pLinearityCorrectiveGain)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	*pLinearityCorrectiveGain = PALDevDataGet(Dev, LinearityCorrectiveGain);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetGroupParamHold(struct vl_data *Dev, uint8_t GroupParamHold)
+{
+	int8_t Status = VL_ERROR_NOT_IMPLEMENTED;
+
+	LOG_FUNCTION_START("");
+
+	/* not implemented on VL53L0X */
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetUpperLimitMilliMeter(struct vl_data *Dev,
+	uint16_t *pUpperLimitMilliMeter)
+{
+	int8_t Status = VL_ERROR_NOT_IMPLEMENTED;
+
+	LOG_FUNCTION_START("");
+
+	/* not implemented on VL53L0X */
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetTotalSignalRate(struct vl_data *Dev,
+	unsigned int *pTotalSignalRate)
+{
+	int8_t Status = VL_ERROR_NONE;
+	struct VL_RangingMeasurementData_t LastRangeDataBuffer;
+
+	LOG_FUNCTION_START("");
+
+	LastRangeDataBuffer = PALDevDataGet(Dev, LastRangeMeasure);
+
+	Status = VL_get_total_signal_rate(
+		Dev, &LastRangeDataBuffer, pTotalSignalRate);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+/* End Group PAL General Functions */
+
+/* Group PAL Init Functions */
+int8_t VL_SetDeviceAddress(struct vl_data *Dev, uint8_t DeviceAddress)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_WrByte(Dev, VL_REG_I2C_SLAVE_DEVICE_ADDRESS,
+		DeviceAddress / 2);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_DataInit(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+	struct VL_DeviceParameters_t CurrentParameters;
+	int i;
+	uint8_t StopVariable;
+
+	LOG_FUNCTION_START("");
+
+	/* by default the I2C is running at 1V8 if you want to change it you */
+	/* need to include this define at compilation level. */
+#ifdef USE_I2C_2V8
+	Status = VL_UpdateByte(Dev,
+		VL_REG_VHV_CONFIG_PAD_SCL_SDA__EXTSUP_HV,
+		0xFE,
+		0x01);
+#endif
+
+	/* Set I2C standard mode */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev, 0x88, 0x00);
+
+	VL_SETDEVICESPECIFICPARAMETER(Dev, ReadDataFromDeviceDone, 0);
+
+#ifdef USE_IQC_STATION
+	if (Status == VL_ERROR_NONE)
+		Status = VL_apply_offset_adjustment(Dev);
+#endif
+
+	/* Default value is 1000 for Linearity Corrective Gain */
+	PALDevDataSet(Dev, LinearityCorrectiveGain, 1000);
+
+	/* Dmax default Parameter */
+	PALDevDataSet(Dev, DmaxCalRangeMilliMeter, 400);
+	PALDevDataSet(Dev, DmaxCalSignalRateRtnMegaCps,
+		(unsigned int)((0x00016B85))); /* 1.42 No Cover Glass*/
+
+	/* Set Default static parameters */
+	/* *set first temporary values 9.44MHz * 65536 = 618660 */
+	VL_SETDEVICESPECIFICPARAMETER(Dev, OscFrequencyMHz, 618660);
+
+	/* Set Default XTalkCompensationRateMegaCps to 0  */
+	VL_SETPARAMETERFIELD(Dev, XTalkCompensationRateMegaCps, 0);
+
+	/* Get default parameters */
+	Status = VL_GetDeviceParameters(Dev, &CurrentParameters);
+	if (Status == VL_ERROR_NONE) {
+		/* initialize PAL values */
+		CurrentParameters.DeviceMode = VL_DEVICEMODE_SINGLE_RANGING;
+		CurrentParameters.HistogramMode = VL_HISTOGRAMMODE_DISABLED;
+		PALDevDataSet(Dev, CurrentParameters, CurrentParameters);
+	}
+
+	/* Sigma estimator variable */
+	PALDevDataSet(Dev, SigmaEstRefArray, 100);
+	PALDevDataSet(Dev, SigmaEstEffPulseWidth, 900);
+	PALDevDataSet(Dev, SigmaEstEffAmbWidth, 500);
+	PALDevDataSet(Dev, targetRefRate, 0x0A00); /* 20 MCPS in 9:7 format */
+
+	/* Use internal default settings */
+	PALDevDataSet(Dev, UseInternalTuningSettings, 1);
+
+	Status |= VL_WrByte(Dev, 0x80, 0x01);
+	Status |= VL_WrByte(Dev, 0xFF, 0x01);
+	Status |= VL_WrByte(Dev, 0x00, 0x00);
+	Status |= VL_RdByte(Dev, 0x91, &StopVariable);
+	PALDevDataSet(Dev, StopVariable, StopVariable);
+	Status |= VL_WrByte(Dev, 0x00, 0x01);
+	Status |= VL_WrByte(Dev, 0xFF, 0x00);
+	Status |= VL_WrByte(Dev, 0x80, 0x00);
+
+	/* Enable all check */
+	for (i = 0; i < VL_CHECKENABLE_NUMBER_OF_CHECKS; i++) {
+		if (Status == VL_ERROR_NONE)
+			Status |= VL_SetLimitCheckEnable(Dev, i, 1);
+		else
+			break;
+
+	}
+
+	/* Disable the following checks */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetLimitCheckEnable(Dev,
+			VL_CHECKENABLE_SIGNAL_REF_CLIP, 0);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetLimitCheckEnable(Dev,
+			VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD, 0);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetLimitCheckEnable(Dev,
+			VL_CHECKENABLE_SIGNAL_RATE_MSRC, 0);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetLimitCheckEnable(Dev,
+			VL_CHECKENABLE_SIGNAL_RATE_PRE_RANGE, 0);
+
+	/* Limit default values */
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_SetLimitCheckValue(Dev,
+			VL_CHECKENABLE_SIGMA_FINAL_RANGE,
+				(unsigned int)(18 * 65536));
+	}
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_SetLimitCheckValue(Dev,
+			VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE,
+				(unsigned int)(25 * 65536 / 100));
+				/* 0.25 * 65536 */
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_SetLimitCheckValue(Dev,
+			VL_CHECKENABLE_SIGNAL_REF_CLIP,
+				(unsigned int)(35 * 65536));
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_SetLimitCheckValue(Dev,
+			VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD,
+				(unsigned int)(0 * 65536));
+	}
+
+	if (Status == VL_ERROR_NONE) {
+
+		PALDevDataSet(Dev, SequenceConfig, 0xFF);
+		Status = VL_WrByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG,
+			0xFF);
+
+		/* Set PAL state to tell that we are waiting for call to */
+		/* * VL_StaticInit */
+		PALDevDataSet(Dev, PalState, VL_STATE_WAIT_STATICINIT);
+	}
+
+	if (Status == VL_ERROR_NONE)
+		VL_SETDEVICESPECIFICPARAMETER(Dev, RefSpadsInitialised, 0);
+
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetTuningSettingBuffer(struct vl_data *Dev,
+	uint8_t *pTuningSettingBuffer, uint8_t UseInternalTuningSettings)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	if (UseInternalTuningSettings == 1) {
+		/* Force use internal settings */
+		PALDevDataSet(Dev, UseInternalTuningSettings, 1);
+	} else {
+
+		/* check that the first byte is not 0 */
+		if (*pTuningSettingBuffer != 0) {
+			PALDevDataSet(Dev, pTuningSettingsPointer,
+				pTuningSettingBuffer);
+			PALDevDataSet(Dev, UseInternalTuningSettings, 0);
+
+		} else {
+			Status = VL_ERROR_INVALID_PARAMS;
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetTuningSettingBuffer(struct vl_data *Dev,
+	uint8_t **ppTuningSettingBuffer, uint8_t *pUseInternalTuningSettings)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	*ppTuningSettingBuffer = PALDevDataGet(Dev, pTuningSettingsPointer);
+	*pUseInternalTuningSettings = PALDevDataGet(Dev,
+		UseInternalTuningSettings);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_StaticInit(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+	struct VL_DeviceParameters_t CurrentParameters = {0};
+	uint8_t *pTuningSettingBuffer;
+	uint16_t tempword = 0;
+	uint8_t tempbyte = 0;
+	uint8_t UseInternalTuningSettings = 0;
+	uint32_t count = 0;
+	uint8_t isApertureSpads = 0;
+	uint32_t refSpadCount = 0;
+	uint8_t ApertureSpads = 0;
+	uint8_t vcselPulsePeriodPCLK;
+	uint32_t seqTimeoutMicroSecs;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_info_from_device(Dev, 1);
+
+	/* set the ref spad from NVM */
+	count	= (uint32_t)VL_GETDEVICESPECIFICPARAMETER(Dev,
+		ReferenceSpadCount);
+	ApertureSpads = VL_GETDEVICESPECIFICPARAMETER(Dev,
+		ReferenceSpadType);
+
+	/* NVM value invalid */
+	if ((ApertureSpads > 1) ||
+		((ApertureSpads == 1) && (count > 32)) ||
+		((ApertureSpads == 0) && (count > 12)))
+		Status = VL_perform_ref_spad_management(Dev, &refSpadCount,
+			&isApertureSpads);
+	else
+		Status = VL_set_reference_spads(Dev, count, ApertureSpads);
+
+
+	/* Initialize tuning settings buffer to prevent compiler warning. */
+	pTuningSettingBuffer = DefaultTuningSettings;
+
+	if (Status == VL_ERROR_NONE) {
+		UseInternalTuningSettings = PALDevDataGet(Dev,
+			UseInternalTuningSettings);
+
+		if (UseInternalTuningSettings == 0)
+			pTuningSettingBuffer = PALDevDataGet(Dev,
+				pTuningSettingsPointer);
+		else
+			pTuningSettingBuffer = DefaultTuningSettings;
+
+	}
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_load_tuning_settings(Dev,
+			pTuningSettingBuffer);
+
+
+	/* Set interrupt config to new sample ready */
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_SetGpioConfig(Dev, 0, 0,
+		VL_REG_SYSTEM_INTERRUPT_GPIO_NEW_SAMPLE_READY,
+		VL_INTERRUPTPOLARITY_LOW);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_WrByte(Dev, 0xFF, 0x01);
+		Status |= VL_RdWord(Dev, 0x84, &tempword);
+		Status |= VL_WrByte(Dev, 0xFF, 0x00);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		VL_SETDEVICESPECIFICPARAMETER(Dev, OscFrequencyMHz,
+			VL_FIXPOINT412TOFIXPOINT1616(tempword));
+	}
+
+	/* After static init, some device parameters may be changed, */
+	/* * so update them */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_GetDeviceParameters(Dev, &CurrentParameters);
+
+
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_GetFractionEnable(Dev, &tempbyte);
+		if (Status == VL_ERROR_NONE)
+			PALDevDataSet(Dev, RangeFractionalEnable, tempbyte);
+
+	}
+
+	if (Status == VL_ERROR_NONE)
+		PALDevDataSet(Dev, CurrentParameters, CurrentParameters);
+
+
+	/* read the sequence config and save it */
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_RdByte(Dev,
+		VL_REG_SYSTEM_SEQUENCE_CONFIG, &tempbyte);
+		if (Status == VL_ERROR_NONE)
+			PALDevDataSet(Dev, SequenceConfig, tempbyte);
+
+	}
+
+	/* Disable MSRC and TCC by default */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetSequenceStepEnable(Dev,
+					VL_SEQUENCESTEP_TCC, 0);
+
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetSequenceStepEnable(Dev,
+		VL_SEQUENCESTEP_MSRC, 0);
+
+
+	/* Set PAL State to standby */
+	if (Status == VL_ERROR_NONE)
+		PALDevDataSet(Dev, PalState, VL_STATE_IDLE);
+
+
+
+	/* Store pre-range vcsel period */
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_GetVcselPulsePeriod(
+			Dev,
+			VL_VCSEL_PERIOD_PRE_RANGE,
+			&vcselPulsePeriodPCLK);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		VL_SETDEVICESPECIFICPARAMETER(
+		Dev, PreRangeVcselPulsePeriod,
+		vcselPulsePeriodPCLK);
+	}
+
+	/* Store final-range vcsel period */
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_GetVcselPulsePeriod(
+			Dev,
+			VL_VCSEL_PERIOD_FINAL_RANGE,
+			&vcselPulsePeriodPCLK);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		VL_SETDEVICESPECIFICPARAMETER(
+		Dev, FinalRangeVcselPulsePeriod,
+		vcselPulsePeriodPCLK);
+	}
+
+	/* Store pre-range timeout */
+	if (Status == VL_ERROR_NONE) {
+		Status = get_sequence_step_timeout(
+			Dev,
+			VL_SEQUENCESTEP_PRE_RANGE,
+			&seqTimeoutMicroSecs);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		VL_SETDEVICESPECIFICPARAMETER(
+			Dev,
+			PreRangeTimeoutMicroSecs,
+			seqTimeoutMicroSecs);
+	}
+
+	/* Store final-range timeout */
+	if (Status == VL_ERROR_NONE) {
+		Status = get_sequence_step_timeout(
+			Dev,
+			VL_SEQUENCESTEP_FINAL_RANGE,
+			&seqTimeoutMicroSecs);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		VL_SETDEVICESPECIFICPARAMETER(
+			Dev,
+			FinalRangeTimeoutMicroSecs,
+			seqTimeoutMicroSecs);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_WaitDeviceBooted(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NOT_IMPLEMENTED;
+
+	LOG_FUNCTION_START("");
+
+	/* not implemented on VL53L0X */
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_ResetDevice(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Byte;
+
+	LOG_FUNCTION_START("");
+
+	/* Set reset bit */
+	Status = VL_WrByte(Dev, VL_REG_SOFT_RESET_GO2_SOFT_RESET_N,
+		0x00);
+
+	/* Wait for some time */
+	if (Status == VL_ERROR_NONE) {
+		do {
+			Status = VL_RdByte(Dev,
+			VL_REG_IDENTIFICATION_MODEL_ID, &Byte);
+		} while (Byte != 0x00);
+	}
+
+	VL_PollingDelay(Dev);
+
+	/* Release reset */
+	Status = VL_WrByte(Dev, VL_REG_SOFT_RESET_GO2_SOFT_RESET_N,
+		0x01);
+
+	/* Wait until correct boot-up of the device */
+	if (Status == VL_ERROR_NONE) {
+		do {
+			Status = VL_RdByte(Dev,
+			VL_REG_IDENTIFICATION_MODEL_ID, &Byte);
+		} while (Byte == 0x00);
+	}
+
+	VL_PollingDelay(Dev);
+
+	/* Set PAL State to VL_STATE_POWERDOWN */
+	if (Status == VL_ERROR_NONE)
+		PALDevDataSet(Dev, PalState, VL_STATE_POWERDOWN);
+
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+/* End Group PAL Init Functions */
+
+/* Group PAL Parameters Functions */
+int8_t VL_SetDeviceParameters(struct vl_data *Dev,
+	const struct VL_DeviceParameters_t *pDeviceParameters)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int i;
+
+	LOG_FUNCTION_START("");
+	Status = VL_SetDeviceMode(Dev, pDeviceParameters->DeviceMode);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetInterMeasurementPeriodMilliSeconds(Dev,
+			pDeviceParameters->InterMeasurementPeriodMilliSeconds);
+
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetXTalkCompensationRateMegaCps(Dev,
+			pDeviceParameters->XTalkCompensationRateMegaCps);
+
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetOffsetCalibrationDataMicroMeter(Dev,
+			pDeviceParameters->RangeOffsetMicroMeters);
+
+
+	for (i = 0; i < VL_CHECKENABLE_NUMBER_OF_CHECKS; i++) {
+		if (Status == VL_ERROR_NONE)
+			Status |= VL_SetLimitCheckEnable(Dev, i,
+				pDeviceParameters->LimitChecksEnable[i]);
+		else
+			break;
+
+		if (Status == VL_ERROR_NONE)
+			Status |= VL_SetLimitCheckValue(Dev, i,
+				pDeviceParameters->LimitChecksValue[i]);
+		else
+			break;
+
+	}
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetWrapAroundCheckEnable(Dev,
+			pDeviceParameters->WrapAroundCheckEnable);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetMeasurementTimingBudgetMicroSeconds(Dev,
+			pDeviceParameters->MeasurementTimingBudgetMicroSeconds);
+
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetDeviceParameters(struct vl_data *Dev,
+	struct VL_DeviceParameters_t *pDeviceParameters)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int i;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_GetDeviceMode(Dev, &(pDeviceParameters->DeviceMode));
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_GetInterMeasurementPeriodMilliSeconds(Dev,
+		&(pDeviceParameters->InterMeasurementPeriodMilliSeconds));
+
+
+	if (Status == VL_ERROR_NONE)
+		pDeviceParameters->XTalkCompensationEnable = 0;
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_GetXTalkCompensationRateMegaCps(Dev,
+			&(pDeviceParameters->XTalkCompensationRateMegaCps));
+
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_GetOffsetCalibrationDataMicroMeter(Dev,
+			&(pDeviceParameters->RangeOffsetMicroMeters));
+
+
+	if (Status == VL_ERROR_NONE) {
+		for (i = 0; i < VL_CHECKENABLE_NUMBER_OF_CHECKS; i++) {
+			/* get first the values, then the enables.
+			 * VL_GetLimitCheckValue will modify the enable
+			 * flags
+			 */
+			if (Status == VL_ERROR_NONE) {
+				Status |= VL_GetLimitCheckValue(Dev, i,
+				&(pDeviceParameters->LimitChecksValue[i]));
+			} else {
+				break;
+			}
+			if (Status == VL_ERROR_NONE) {
+				Status |= VL_GetLimitCheckEnable(Dev, i,
+				&(pDeviceParameters->LimitChecksEnable[i]));
+			} else {
+				break;
+			}
+		}
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_GetWrapAroundCheckEnable(Dev,
+			&(pDeviceParameters->WrapAroundCheckEnable));
+	}
+
+	/* Need to be done at the end as it uses VCSELPulsePeriod */
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_GetMeasurementTimingBudgetMicroSeconds(Dev,
+		&(pDeviceParameters->MeasurementTimingBudgetMicroSeconds));
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetDeviceMode(struct vl_data *Dev,
+	uint8_t DeviceMode)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("%d", (int)DeviceMode);
+
+	switch (DeviceMode) {
+	case VL_DEVICEMODE_SINGLE_RANGING:
+	case VL_DEVICEMODE_CONTINUOUS_RANGING:
+	case VL_DEVICEMODE_CONTINUOUS_TIMED_RANGING:
+	case VL_DEVICEMODE_GPIO_DRIVE:
+	case VL_DEVICEMODE_GPIO_OSC:
+		/* Supported modes */
+		VL_SETPARAMETERFIELD(Dev, DeviceMode, DeviceMode);
+		break;
+	default:
+		/* Unsupported mode */
+		Status = VL_ERROR_MODE_NOT_SUPPORTED;
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetDeviceMode(struct vl_data *Dev,
+	uint8_t *pDeviceMode)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	VL_GETPARAMETERFIELD(Dev, DeviceMode, *pDeviceMode);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetRangeFractionEnable(struct vl_data *Dev,	uint8_t Enable)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("%d", (int)Enable);
+
+	Status = VL_WrByte(Dev, VL_REG_SYSTEM_RANGE_CONFIG, Enable);
+
+	if (Status == VL_ERROR_NONE)
+		PALDevDataSet(Dev, RangeFractionalEnable, Enable);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetFractionEnable(struct vl_data *Dev, uint8_t *pEnabled)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdByte(Dev, VL_REG_SYSTEM_RANGE_CONFIG, pEnabled);
+
+	if (Status == VL_ERROR_NONE)
+		*pEnabled = (*pEnabled & 1);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetHistogramMode(struct vl_data *Dev,
+	uint8_t HistogramMode)
+{
+	int8_t Status = VL_ERROR_NOT_IMPLEMENTED;
+
+	LOG_FUNCTION_START("");
+
+	/* not implemented on VL53L0X */
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetHistogramMode(struct vl_data *Dev,
+	uint8_t *pHistogramMode)
+{
+	int8_t Status = VL_ERROR_NOT_IMPLEMENTED;
+
+	LOG_FUNCTION_START("");
+
+	/* not implemented on VL53L0X */
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetMeasurementTimingBudgetMicroSeconds(struct vl_data *Dev,
+	uint32_t MeasurementTimingBudgetMicroSeconds)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_set_measurement_timing_budget_micro_seconds(Dev,
+		MeasurementTimingBudgetMicroSeconds);
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+int8_t VL_GetMeasurementTimingBudgetMicroSeconds(struct vl_data *Dev,
+	uint32_t *pMeasurementTimingBudgetMicroSeconds)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_measurement_timing_budget_micro_seconds(Dev,
+		pMeasurementTimingBudgetMicroSeconds);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetVcselPulsePeriod(struct vl_data *Dev,
+	uint8_t VcselPeriodType, uint8_t VCSELPulsePeriodPCLK)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_set_vcsel_pulse_period(Dev, VcselPeriodType,
+		VCSELPulsePeriodPCLK);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetVcselPulsePeriod(struct vl_data *Dev,
+	uint8_t VcselPeriodType, uint8_t *pVCSELPulsePeriodPCLK)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_vcsel_pulse_period(Dev, VcselPeriodType,
+		pVCSELPulsePeriodPCLK);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetSequenceStepEnable(struct vl_data *Dev,
+	uint8_t SequenceStepId, uint8_t SequenceStepEnabled)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t SequenceConfig = 0;
+	uint8_t SequenceConfigNew = 0;
+	uint32_t MeasurementTimingBudgetMicroSeconds;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG,
+		&SequenceConfig);
+
+	SequenceConfigNew = SequenceConfig;
+
+	if (Status == VL_ERROR_NONE) {
+		if (SequenceStepEnabled == 1) {
+
+			/* Enable requested sequence step
+			 */
+			switch (SequenceStepId) {
+			case VL_SEQUENCESTEP_TCC:
+				SequenceConfigNew |= 0x10;
+				break;
+			case VL_SEQUENCESTEP_DSS:
+				SequenceConfigNew |= 0x28;
+				break;
+			case VL_SEQUENCESTEP_MSRC:
+				SequenceConfigNew |= 0x04;
+				break;
+			case VL_SEQUENCESTEP_PRE_RANGE:
+				SequenceConfigNew |= 0x40;
+				break;
+			case VL_SEQUENCESTEP_FINAL_RANGE:
+				SequenceConfigNew |= 0x80;
+				break;
+			default:
+				Status = VL_ERROR_INVALID_PARAMS;
+			}
+		} else {
+			/* Disable requested sequence step
+			 */
+			switch (SequenceStepId) {
+			case VL_SEQUENCESTEP_TCC:
+				SequenceConfigNew &= 0xef;
+				break;
+			case VL_SEQUENCESTEP_DSS:
+				SequenceConfigNew &= 0xd7;
+				break;
+			case VL_SEQUENCESTEP_MSRC:
+				SequenceConfigNew &= 0xfb;
+				break;
+			case VL_SEQUENCESTEP_PRE_RANGE:
+				SequenceConfigNew &= 0xbf;
+				break;
+			case VL_SEQUENCESTEP_FINAL_RANGE:
+				SequenceConfigNew &= 0x7f;
+				break;
+			default:
+				Status = VL_ERROR_INVALID_PARAMS;
+			}
+		}
+	}
+
+	if (SequenceConfigNew != SequenceConfig) {
+		/* Apply New Setting */
+		if (Status == VL_ERROR_NONE) {
+			Status = VL_WrByte(Dev,
+			VL_REG_SYSTEM_SEQUENCE_CONFIG, SequenceConfigNew);
+		}
+		if (Status == VL_ERROR_NONE)
+			PALDevDataSet(Dev, SequenceConfig, SequenceConfigNew);
+
+
+		/* Recalculate timing budget */
+		if (Status == VL_ERROR_NONE) {
+			VL_GETPARAMETERFIELD(Dev,
+				MeasurementTimingBudgetMicroSeconds,
+				MeasurementTimingBudgetMicroSeconds);
+
+			VL_SetMeasurementTimingBudgetMicroSeconds(Dev,
+				MeasurementTimingBudgetMicroSeconds);
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+int8_t sequence_step_enabled(struct vl_data *Dev,
+	uint8_t SequenceStepId, uint8_t SequenceConfig,
+	uint8_t *pSequenceStepEnabled)
+{
+	int8_t Status = VL_ERROR_NONE;
+	*pSequenceStepEnabled = 0;
+
+	LOG_FUNCTION_START("");
+
+	switch (SequenceStepId) {
+	case VL_SEQUENCESTEP_TCC:
+		*pSequenceStepEnabled = (SequenceConfig & 0x10) >> 4;
+		break;
+	case VL_SEQUENCESTEP_DSS:
+		*pSequenceStepEnabled = (SequenceConfig & 0x08) >> 3;
+		break;
+	case VL_SEQUENCESTEP_MSRC:
+		*pSequenceStepEnabled = (SequenceConfig & 0x04) >> 2;
+		break;
+	case VL_SEQUENCESTEP_PRE_RANGE:
+		*pSequenceStepEnabled = (SequenceConfig & 0x40) >> 6;
+		break;
+	case VL_SEQUENCESTEP_FINAL_RANGE:
+		*pSequenceStepEnabled = (SequenceConfig & 0x80) >> 7;
+		break;
+	default:
+		Status = VL_ERROR_INVALID_PARAMS;
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetSequenceStepEnable(struct vl_data *Dev,
+	uint8_t SequenceStepId, uint8_t *pSequenceStepEnabled)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t SequenceConfig = 0;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG,
+		&SequenceConfig);
+
+	if (Status == VL_ERROR_NONE) {
+		Status = sequence_step_enabled(Dev, SequenceStepId,
+			SequenceConfig, pSequenceStepEnabled);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetSequenceStepEnables(struct vl_data *Dev,
+	struct VL_SchedulerSequenceSteps_t *pSchedulerSequenceSteps)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t SequenceConfig = 0;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG,
+		&SequenceConfig);
+
+	if (Status == VL_ERROR_NONE) {
+		Status = sequence_step_enabled(Dev,
+		VL_SEQUENCESTEP_TCC, SequenceConfig,
+			&pSchedulerSequenceSteps->TccOn);
+	}
+	if (Status == VL_ERROR_NONE) {
+		Status = sequence_step_enabled(Dev,
+		VL_SEQUENCESTEP_DSS, SequenceConfig,
+			&pSchedulerSequenceSteps->DssOn);
+	}
+	if (Status == VL_ERROR_NONE) {
+		Status = sequence_step_enabled(Dev,
+		VL_SEQUENCESTEP_MSRC, SequenceConfig,
+			&pSchedulerSequenceSteps->MsrcOn);
+	}
+	if (Status == VL_ERROR_NONE) {
+		Status = sequence_step_enabled(Dev,
+		VL_SEQUENCESTEP_PRE_RANGE, SequenceConfig,
+			&pSchedulerSequenceSteps->PreRangeOn);
+	}
+	if (Status == VL_ERROR_NONE) {
+		Status = sequence_step_enabled(Dev,
+		VL_SEQUENCESTEP_FINAL_RANGE, SequenceConfig,
+			&pSchedulerSequenceSteps->FinalRangeOn);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetNumberOfSequenceSteps(struct vl_data *Dev,
+	uint8_t *pNumberOfSequenceSteps)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	*pNumberOfSequenceSteps = VL_SEQUENCESTEP_NUMBER_OF_CHECKS;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetSequenceStepsInfo(
+	uint8_t SequenceStepId, char *pSequenceStepsString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_sequence_steps_info(
+			SequenceStepId,
+			pSequenceStepsString);
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+int8_t VL_SetSequenceStepTimeout(struct vl_data *Dev,
+	uint8_t SequenceStepId, unsigned int TimeOutMilliSecs)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int8_t Status1 = VL_ERROR_NONE;
+	uint32_t TimeoutMicroSeconds = ((TimeOutMilliSecs * 1000) + 0x8000)
+		>> 16;
+	uint32_t MeasurementTimingBudgetMicroSeconds;
+	unsigned int OldTimeOutMicroSeconds;
+
+	LOG_FUNCTION_START("");
+
+	/* Read back the current value in case we need to revert back to this.
+	 */
+	Status = get_sequence_step_timeout(Dev, SequenceStepId,
+		&OldTimeOutMicroSeconds);
+
+	if (Status == VL_ERROR_NONE) {
+		Status = set_sequence_step_timeout(Dev, SequenceStepId,
+			TimeoutMicroSeconds);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		VL_GETPARAMETERFIELD(Dev,
+			MeasurementTimingBudgetMicroSeconds,
+			MeasurementTimingBudgetMicroSeconds);
+
+		/* At this point we don't know if the requested */
+		/* value is valid, */
+		/* therefore proceed to update the entire timing budget and */
+		/* if this fails, revert back to the previous value. */
+		Status = VL_SetMeasurementTimingBudgetMicroSeconds(Dev,
+			MeasurementTimingBudgetMicroSeconds);
+
+		if (Status != VL_ERROR_NONE) {
+			Status1 = set_sequence_step_timeout(Dev, SequenceStepId,
+				OldTimeOutMicroSeconds);
+
+			if (Status1 == VL_ERROR_NONE) {
+				Status1 =
+				VL_SetMeasurementTimingBudgetMicroSeconds(
+					Dev,
+					MeasurementTimingBudgetMicroSeconds);
+			}
+
+			Status = Status1;
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+int8_t VL_GetSequenceStepTimeout(struct vl_data *Dev,
+	uint8_t SequenceStepId,
+	unsigned int *pTimeOutMilliSecs)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint32_t TimeoutMicroSeconds;
+
+	LOG_FUNCTION_START("");
+
+	Status = get_sequence_step_timeout(Dev, SequenceStepId,
+		&TimeoutMicroSeconds);
+	if (Status == VL_ERROR_NONE) {
+		TimeoutMicroSeconds <<= 8;
+		*pTimeOutMilliSecs = (TimeoutMicroSeconds + 500)/1000;
+		*pTimeOutMilliSecs <<= 8;
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetInterMeasurementPeriodMilliSeconds(struct vl_data *Dev,
+	uint32_t InterMeasurementPeriodMilliSeconds)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint16_t osc_calibrate_val;
+	uint32_t IMPeriodMilliSeconds;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdWord(Dev, VL_REG_OSC_CALIBRATE_VAL,
+		&osc_calibrate_val);
+
+	if (Status == VL_ERROR_NONE) {
+		if (osc_calibrate_val != 0) {
+			IMPeriodMilliSeconds =
+				InterMeasurementPeriodMilliSeconds
+					* osc_calibrate_val;
+		} else {
+			IMPeriodMilliSeconds =
+				InterMeasurementPeriodMilliSeconds;
+		}
+		Status = VL_WrDWord(Dev,
+		VL_REG_SYSTEM_INTERMEASUREMENT_PERIOD,
+			IMPeriodMilliSeconds);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		VL_SETPARAMETERFIELD(Dev,
+			InterMeasurementPeriodMilliSeconds,
+			InterMeasurementPeriodMilliSeconds);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetInterMeasurementPeriodMilliSeconds(struct vl_data *Dev,
+	uint32_t *pInterMeasurementPeriodMilliSeconds)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint16_t osc_calibrate_val;
+	uint32_t IMPeriodMilliSeconds;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdWord(Dev, VL_REG_OSC_CALIBRATE_VAL,
+		&osc_calibrate_val);
+
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_RdDWord(Dev,
+		VL_REG_SYSTEM_INTERMEASUREMENT_PERIOD,
+			&IMPeriodMilliSeconds);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		if (osc_calibrate_val != 0) {
+			*pInterMeasurementPeriodMilliSeconds =
+				IMPeriodMilliSeconds / osc_calibrate_val;
+		}
+		VL_SETPARAMETERFIELD(Dev,
+			InterMeasurementPeriodMilliSeconds,
+			*pInterMeasurementPeriodMilliSeconds);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetXTalkCompensationEnable(struct vl_data *Dev,
+	uint8_t XTalkCompensationEnable)
+{
+	int8_t Status = VL_ERROR_NONE;
+	unsigned int TempFix1616;
+	uint16_t LinearityCorrectiveGain;
+
+	LOG_FUNCTION_START("");
+
+	LinearityCorrectiveGain = PALDevDataGet(Dev, LinearityCorrectiveGain);
+
+	if ((XTalkCompensationEnable == 0)
+		|| (LinearityCorrectiveGain != 1000)) {
+		TempFix1616 = 0;
+	} else {
+		VL_GETPARAMETERFIELD(Dev, XTalkCompensationRateMegaCps,
+			TempFix1616);
+	}
+
+	/* the following register has a format 3.13 */
+	Status = VL_WrWord(Dev,
+	VL_REG_CROSSTALK_COMPENSATION_PEAK_RATE_MCPS,
+		VL_FIXPOINT1616TOFIXPOINT313(TempFix1616));
+
+	if (Status == VL_ERROR_NONE) {
+		if (XTalkCompensationEnable == 0) {
+			VL_SETPARAMETERFIELD(Dev, XTalkCompensationEnable,
+				0);
+		} else {
+			VL_SETPARAMETERFIELD(Dev, XTalkCompensationEnable,
+				1);
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetXTalkCompensationEnable(struct vl_data *Dev,
+	uint8_t *pXTalkCompensationEnable)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Temp8;
+
+	LOG_FUNCTION_START("");
+
+	VL_GETPARAMETERFIELD(Dev, XTalkCompensationEnable, Temp8);
+	*pXTalkCompensationEnable = Temp8;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetXTalkCompensationRateMegaCps(struct vl_data *Dev,
+	unsigned int XTalkCompensationRateMegaCps)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Temp8;
+	uint16_t LinearityCorrectiveGain;
+	uint16_t data;
+
+	LOG_FUNCTION_START("");
+
+	VL_GETPARAMETERFIELD(Dev, XTalkCompensationEnable, Temp8);
+	LinearityCorrectiveGain = PALDevDataGet(Dev, LinearityCorrectiveGain);
+
+	if (Temp8 == 0) { /* disabled write only internal value */
+		VL_SETPARAMETERFIELD(Dev, XTalkCompensationRateMegaCps,
+			XTalkCompensationRateMegaCps);
+	} else {
+		/* the following register has a format 3.13 */
+		if (LinearityCorrectiveGain == 1000) {
+			data = VL_FIXPOINT1616TOFIXPOINT313(
+				XTalkCompensationRateMegaCps);
+		} else {
+			data = 0;
+		}
+
+		Status = VL_WrWord(Dev,
+		VL_REG_CROSSTALK_COMPENSATION_PEAK_RATE_MCPS, data);
+
+		if (Status == VL_ERROR_NONE) {
+			VL_SETPARAMETERFIELD(Dev,
+				XTalkCompensationRateMegaCps,
+				XTalkCompensationRateMegaCps);
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetXTalkCompensationRateMegaCps(struct vl_data *Dev,
+	unsigned int *pXTalkCompensationRateMegaCps)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint16_t Value;
+	unsigned int TempFix1616;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdWord(Dev,
+	VL_REG_CROSSTALK_COMPENSATION_PEAK_RATE_MCPS, (uint16_t *)&Value);
+	if (Status == VL_ERROR_NONE) {
+		if (Value == 0) {
+			/* the Xtalk is disabled return value from memory */
+			VL_GETPARAMETERFIELD(Dev,
+				XTalkCompensationRateMegaCps, TempFix1616);
+			*pXTalkCompensationRateMegaCps = TempFix1616;
+			VL_SETPARAMETERFIELD(Dev, XTalkCompensationEnable,
+				0);
+		} else {
+			TempFix1616 = VL_FIXPOINT313TOFIXPOINT1616(Value);
+			*pXTalkCompensationRateMegaCps = TempFix1616;
+			VL_SETPARAMETERFIELD(Dev,
+				XTalkCompensationRateMegaCps, TempFix1616);
+			VL_SETPARAMETERFIELD(Dev, XTalkCompensationEnable,
+				1);
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetRefCalibration(struct vl_data *Dev, uint8_t VhvSettings,
+	uint8_t PhaseCal)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_set_ref_calibration(Dev, VhvSettings, PhaseCal);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetRefCalibration(struct vl_data *Dev, uint8_t *pVhvSettings,
+	uint8_t *pPhaseCal)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_ref_calibration(Dev, pVhvSettings, pPhaseCal);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+/*
+ * CHECK LIMIT FUNCTIONS
+ */
+
+int8_t VL_GetNumberOfLimitCheck(uint16_t *pNumberOfLimitCheck)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	*pNumberOfLimitCheck = VL_CHECKENABLE_NUMBER_OF_CHECKS;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetLimitCheckInfo(struct vl_data *Dev, uint16_t LimitCheckId,
+	char *pLimitCheckString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_limit_check_info(Dev, LimitCheckId,
+		pLimitCheckString);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetLimitCheckStatus(struct vl_data *Dev,
+	uint16_t LimitCheckId, uint8_t *pLimitCheckStatus)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Temp8;
+
+	LOG_FUNCTION_START("");
+
+	if (LimitCheckId >= VL_CHECKENABLE_NUMBER_OF_CHECKS) {
+		Status = VL_ERROR_INVALID_PARAMS;
+	} else {
+
+		VL_GETARRAYPARAMETERFIELD(Dev, LimitChecksStatus,
+			LimitCheckId, Temp8);
+
+		*pLimitCheckStatus = Temp8;
+
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetLimitCheckEnable(struct vl_data *Dev,
+	uint16_t LimitCheckId, uint8_t LimitCheckEnable)
+{
+	int8_t Status = VL_ERROR_NONE;
+	unsigned int TempFix1616 = 0;
+	uint8_t LimitCheckEnableInt = 0;
+	uint8_t LimitCheckDisable = 0;
+	uint8_t Temp8;
+
+	LOG_FUNCTION_START("");
+
+	if (LimitCheckId >= VL_CHECKENABLE_NUMBER_OF_CHECKS) {
+		Status = VL_ERROR_INVALID_PARAMS;
+	} else {
+		if (LimitCheckEnable == 0) {
+			TempFix1616 = 0;
+			LimitCheckEnableInt = 0;
+			LimitCheckDisable = 1;
+
+		} else {
+			VL_GETARRAYPARAMETERFIELD(Dev, LimitChecksValue,
+				LimitCheckId, TempFix1616);
+			LimitCheckDisable = 0;
+			/* this to be sure to have either 0 or 1 */
+			LimitCheckEnableInt = 1;
+		}
+
+		switch (LimitCheckId) {
+
+		case VL_CHECKENABLE_SIGMA_FINAL_RANGE:
+			/* internal computation: */
+			VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksEnable,
+				VL_CHECKENABLE_SIGMA_FINAL_RANGE,
+				LimitCheckEnableInt);
+
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE:
+
+			Status = VL_WrWord(Dev,
+			VL_REG_FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT,
+				VL_FIXPOINT1616TOFIXPOINT97(TempFix1616));
+
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_REF_CLIP:
+
+			/* internal computation: */
+			VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksEnable,
+				VL_CHECKENABLE_SIGNAL_REF_CLIP,
+				LimitCheckEnableInt);
+
+			break;
+
+		case VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD:
+
+			/* internal computation: */
+			VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksEnable,
+				VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD,
+				LimitCheckEnableInt);
+
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_RATE_MSRC:
+
+			Temp8 = (uint8_t)(LimitCheckDisable << 1);
+			Status = VL_UpdateByte(Dev,
+				VL_REG_MSRC_CONFIG_CONTROL,
+				0xFE, Temp8);
+
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_RATE_PRE_RANGE:
+
+			Temp8 = (uint8_t)(LimitCheckDisable << 4);
+			Status = VL_UpdateByte(Dev,
+				VL_REG_MSRC_CONFIG_CONTROL,
+				0xEF, Temp8);
+
+			break;
+
+
+		default:
+			Status = VL_ERROR_INVALID_PARAMS;
+
+		}
+
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		if (LimitCheckEnable == 0) {
+			VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksEnable,
+				LimitCheckId, 0);
+		} else {
+			VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksEnable,
+				LimitCheckId, 1);
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetLimitCheckEnable(struct vl_data *Dev,
+	uint16_t LimitCheckId, uint8_t *pLimitCheckEnable)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Temp8;
+
+	LOG_FUNCTION_START("");
+
+	if (LimitCheckId >= VL_CHECKENABLE_NUMBER_OF_CHECKS) {
+		Status = VL_ERROR_INVALID_PARAMS;
+		*pLimitCheckEnable = 0;
+	} else {
+		VL_GETARRAYPARAMETERFIELD(Dev, LimitChecksEnable,
+			LimitCheckId, Temp8);
+		*pLimitCheckEnable = Temp8;
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetLimitCheckValue(struct vl_data *Dev, uint16_t LimitCheckId,
+	unsigned int LimitCheckValue)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Temp8;
+
+	LOG_FUNCTION_START("");
+
+	VL_GETARRAYPARAMETERFIELD(Dev, LimitChecksEnable, LimitCheckId,
+		Temp8);
+
+	if (Temp8 == 0) { /* disabled write only internal value */
+		VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksValue,
+			LimitCheckId, LimitCheckValue);
+	} else {
+
+		switch (LimitCheckId) {
+
+		case VL_CHECKENABLE_SIGMA_FINAL_RANGE:
+			/* internal computation: */
+			VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksValue,
+				VL_CHECKENABLE_SIGMA_FINAL_RANGE,
+				LimitCheckValue);
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE:
+
+			Status = VL_WrWord(Dev,
+			VL_REG_FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT,
+				VL_FIXPOINT1616TOFIXPOINT97(
+					LimitCheckValue));
+
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_REF_CLIP:
+
+			/* internal computation: */
+			VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksValue,
+				VL_CHECKENABLE_SIGNAL_REF_CLIP,
+				LimitCheckValue);
+
+			break;
+
+		case VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD:
+
+			/* internal computation: */
+			VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksValue,
+				VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD,
+				LimitCheckValue);
+
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_RATE_MSRC:
+		case VL_CHECKENABLE_SIGNAL_RATE_PRE_RANGE:
+
+			Status = VL_WrWord(Dev,
+				VL_REG_PRE_RANGE_MIN_COUNT_RATE_RTN_LIMIT,
+				VL_FIXPOINT1616TOFIXPOINT97(
+					LimitCheckValue));
+
+			break;
+
+		default:
+			Status = VL_ERROR_INVALID_PARAMS;
+
+		}
+
+		if (Status == VL_ERROR_NONE) {
+			VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksValue,
+				LimitCheckId, LimitCheckValue);
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetLimitCheckValue(struct vl_data *Dev, uint16_t LimitCheckId,
+	unsigned int *pLimitCheckValue)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t EnableZeroValue = 0;
+	uint16_t Temp16;
+	unsigned int TempFix1616;
+
+	LOG_FUNCTION_START("");
+
+	switch (LimitCheckId) {
+
+	case VL_CHECKENABLE_SIGMA_FINAL_RANGE:
+		/* internal computation: */
+		VL_GETARRAYPARAMETERFIELD(Dev, LimitChecksValue,
+			VL_CHECKENABLE_SIGMA_FINAL_RANGE, TempFix1616);
+		EnableZeroValue = 0;
+		break;
+
+	case VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE:
+		Status = VL_RdWord(Dev,
+		VL_REG_FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT,
+			&Temp16);
+		if (Status == VL_ERROR_NONE)
+			TempFix1616 = VL_FIXPOINT97TOFIXPOINT1616(Temp16);
+
+
+		EnableZeroValue = 1;
+		break;
+
+	case VL_CHECKENABLE_SIGNAL_REF_CLIP:
+		/* internal computation: */
+		VL_GETARRAYPARAMETERFIELD(Dev, LimitChecksValue,
+			VL_CHECKENABLE_SIGNAL_REF_CLIP, TempFix1616);
+		EnableZeroValue = 0;
+		break;
+
+	case VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD:
+		/* internal computation: */
+		VL_GETARRAYPARAMETERFIELD(Dev, LimitChecksValue,
+		    VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD, TempFix1616);
+		EnableZeroValue = 0;
+		break;
+
+	case VL_CHECKENABLE_SIGNAL_RATE_MSRC:
+	case VL_CHECKENABLE_SIGNAL_RATE_PRE_RANGE:
+		Status = VL_RdWord(Dev,
+			VL_REG_PRE_RANGE_MIN_COUNT_RATE_RTN_LIMIT,
+			&Temp16);
+		if (Status == VL_ERROR_NONE)
+			TempFix1616 = VL_FIXPOINT97TOFIXPOINT1616(Temp16);
+
+
+		EnableZeroValue = 0;
+		break;
+
+	default:
+		Status = VL_ERROR_INVALID_PARAMS;
+
+	}
+
+	if (Status == VL_ERROR_NONE) {
+
+		if (EnableZeroValue == 1) {
+
+			if (TempFix1616 == 0) {
+				/* disabled: return value from memory */
+				VL_GETARRAYPARAMETERFIELD(Dev,
+					LimitChecksValue, LimitCheckId,
+					TempFix1616);
+				*pLimitCheckValue = TempFix1616;
+				VL_SETARRAYPARAMETERFIELD(Dev,
+					LimitChecksEnable, LimitCheckId, 0);
+			} else {
+				*pLimitCheckValue = TempFix1616;
+				VL_SETARRAYPARAMETERFIELD(Dev,
+					LimitChecksValue, LimitCheckId,
+					TempFix1616);
+				VL_SETARRAYPARAMETERFIELD(Dev,
+					LimitChecksEnable, LimitCheckId, 1);
+			}
+		} else {
+			*pLimitCheckValue = TempFix1616;
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+
+}
+
+int8_t VL_GetLimitCheckCurrent(struct vl_data *Dev,
+	uint16_t LimitCheckId, unsigned int *pLimitCheckCurrent)
+{
+	int8_t Status = VL_ERROR_NONE;
+	struct VL_RangingMeasurementData_t LastRangeDataBuffer;
+
+	LOG_FUNCTION_START("");
+
+	if (LimitCheckId >= VL_CHECKENABLE_NUMBER_OF_CHECKS) {
+		Status = VL_ERROR_INVALID_PARAMS;
+	} else {
+		switch (LimitCheckId) {
+		case VL_CHECKENABLE_SIGMA_FINAL_RANGE:
+			/* Need to run a ranging to have the latest values */
+			*pLimitCheckCurrent = PALDevDataGet(Dev, SigmaEstimate);
+
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE:
+			/* Need to run a ranging to have the latest values */
+			LastRangeDataBuffer = PALDevDataGet(Dev,
+				LastRangeMeasure);
+			*pLimitCheckCurrent =
+				LastRangeDataBuffer.SignalRateRtnMegaCps;
+
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_REF_CLIP:
+			/* Need to run a ranging to have the latest values */
+			*pLimitCheckCurrent = PALDevDataGet(Dev,
+				LastSignalRefMcps);
+
+			break;
+
+		case VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD:
+			/* Need to run a ranging to have the latest values */
+			LastRangeDataBuffer = PALDevDataGet(Dev,
+				LastRangeMeasure);
+			*pLimitCheckCurrent =
+				LastRangeDataBuffer.SignalRateRtnMegaCps;
+
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_RATE_MSRC:
+			/* Need to run a ranging to have the latest values */
+			LastRangeDataBuffer = PALDevDataGet(Dev,
+				LastRangeMeasure);
+			*pLimitCheckCurrent =
+				LastRangeDataBuffer.SignalRateRtnMegaCps;
+
+			break;
+
+		case VL_CHECKENABLE_SIGNAL_RATE_PRE_RANGE:
+			/* Need to run a ranging to have the latest values */
+			LastRangeDataBuffer = PALDevDataGet(Dev,
+				LastRangeMeasure);
+			*pLimitCheckCurrent =
+				LastRangeDataBuffer.SignalRateRtnMegaCps;
+
+			break;
+
+		default:
+			Status = VL_ERROR_INVALID_PARAMS;
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+
+}
+
+/*
+ * WRAPAROUND Check
+ */
+int8_t VL_SetWrapAroundCheckEnable(struct vl_data *Dev,
+	uint8_t WrapAroundCheckEnable)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Byte;
+	uint8_t WrapAroundCheckEnableInt;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG, &Byte);
+	if (WrapAroundCheckEnable == 0) {
+		/* Disable wraparound */
+		Byte = Byte & 0x7F;
+		WrapAroundCheckEnableInt = 0;
+	} else {
+		/*Enable wraparound */
+		Byte = Byte | 0x80;
+		WrapAroundCheckEnableInt = 1;
+	}
+
+	Status = VL_WrByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG, Byte);
+
+	if (Status == VL_ERROR_NONE) {
+		PALDevDataSet(Dev, SequenceConfig, Byte);
+		VL_SETPARAMETERFIELD(Dev, WrapAroundCheckEnable,
+			WrapAroundCheckEnableInt);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetWrapAroundCheckEnable(struct vl_data *Dev,
+	uint8_t *pWrapAroundCheckEnable)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t data;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG, &data);
+	if (Status == VL_ERROR_NONE) {
+		PALDevDataSet(Dev, SequenceConfig, data);
+		if (data & (0x01 << 7))
+			*pWrapAroundCheckEnable = 0x01;
+		else
+			*pWrapAroundCheckEnable = 0x00;
+	}
+	if (Status == VL_ERROR_NONE) {
+		VL_SETPARAMETERFIELD(Dev, WrapAroundCheckEnable,
+			*pWrapAroundCheckEnable);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetDmaxCalParameters(struct vl_data *Dev,
+	uint16_t RangeMilliMeter, unsigned int SignalRateRtnMegaCps)
+{
+	int8_t Status = VL_ERROR_NONE;
+	unsigned int SignalRateRtnMegaCpsTemp = 0;
+
+	LOG_FUNCTION_START("");
+
+	/* Check if one of input parameter is zero, in that case the */
+	/* value are get from NVM */
+	if ((RangeMilliMeter == 0) || (SignalRateRtnMegaCps == 0)) {
+		/* NVM parameters */
+		/* Run VL_get_info_from_device wit option 4 to get */
+		/* signal rate at 400 mm if the value have been already */
+		/* get this function will return with no access to device */
+		VL_get_info_from_device(Dev, 4);
+
+		SignalRateRtnMegaCpsTemp = VL_GETDEVICESPECIFICPARAMETER(
+			Dev, SignalRateMeasFixed400mm);
+
+		PALDevDataSet(Dev, DmaxCalRangeMilliMeter, 400);
+		PALDevDataSet(Dev, DmaxCalSignalRateRtnMegaCps,
+			SignalRateRtnMegaCpsTemp);
+	} else {
+		/* User parameters */
+		PALDevDataSet(Dev, DmaxCalRangeMilliMeter, RangeMilliMeter);
+		PALDevDataSet(Dev, DmaxCalSignalRateRtnMegaCps,
+			SignalRateRtnMegaCps);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetDmaxCalParameters(struct vl_data *Dev,
+	uint16_t *pRangeMilliMeter, unsigned int *pSignalRateRtnMegaCps)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	*pRangeMilliMeter = PALDevDataGet(Dev, DmaxCalRangeMilliMeter);
+	*pSignalRateRtnMegaCps = PALDevDataGet(Dev,
+		DmaxCalSignalRateRtnMegaCps);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+/* End Group PAL Parameters Functions */
+
+/* Group PAL Measurement Functions */
+int8_t VL_PerformSingleMeasurement(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t DeviceMode;
+
+	LOG_FUNCTION_START("");
+
+	/* Get Current DeviceMode */
+	Status = VL_GetDeviceMode(Dev, &DeviceMode);
+
+	/* Start immediately to run a single ranging measurement in case of */
+	/* single ranging or single histogram */
+	if (Status == VL_ERROR_NONE
+		&& DeviceMode == VL_DEVICEMODE_SINGLE_RANGING)
+		Status = VL_StartMeasurement(Dev);
+
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_measurement_poll_for_completion(Dev);
+
+
+	/* Change PAL State in case of single ranging or single histogram */
+	if (Status == VL_ERROR_NONE
+		&& DeviceMode == VL_DEVICEMODE_SINGLE_RANGING)
+		PALDevDataSet(Dev, PalState, VL_STATE_IDLE);
+
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_PerformSingleHistogramMeasurement(struct vl_data *Dev,
+	struct VL_HistogramMeasurementData_t *pHistogramMeasurementData)
+{
+	int8_t Status = VL_ERROR_NOT_IMPLEMENTED;
+
+	LOG_FUNCTION_START("");
+
+	/* not implemented on VL53L0X */
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_PerformRefCalibration(struct vl_data *Dev,
+	uint8_t *pVhvSettings, uint8_t *pPhaseCal)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_perform_ref_calibration(Dev, pVhvSettings,
+		pPhaseCal, 1);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_PerformXTalkMeasurement(struct vl_data *Dev,
+	uint32_t TimeoutMs, unsigned int *pXtalkPerSpad,
+	uint8_t *pAmbientTooHigh)
+{
+	int8_t Status = VL_ERROR_NOT_IMPLEMENTED;
+
+	LOG_FUNCTION_START("");
+
+	/* not implemented on VL53L0X */
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_PerformXTalkCalibration(struct vl_data *Dev,
+	unsigned int XTalkCalDistance,
+	unsigned int *pXTalkCompensationRateMegaCps)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_perform_xtalk_calibration(Dev, XTalkCalDistance,
+		pXTalkCompensationRateMegaCps);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_PerformOffsetCalibration(struct vl_data *Dev,
+	unsigned int CalDistanceMilliMeter, int32_t *pOffsetMicroMeter)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_perform_offset_calibration(Dev, CalDistanceMilliMeter,
+		pOffsetMicroMeter);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_CheckAndLoadInterruptSettings(struct vl_data *Dev,
+	uint8_t StartNotStopFlag)
+{
+	uint8_t InterruptConfig;
+	unsigned int ThresholdLow;
+	unsigned int ThresholdHigh;
+	int8_t Status = VL_ERROR_NONE;
+
+	InterruptConfig = VL_GETDEVICESPECIFICPARAMETER(Dev,
+		Pin0GpioFunctionality);
+
+	if ((InterruptConfig ==
+		VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_LOW) ||
+		(InterruptConfig ==
+		VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_HIGH) ||
+		(InterruptConfig ==
+		VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_OUT)) {
+
+		Status = VL_GetInterruptThresholds(Dev,
+			VL_DEVICEMODE_CONTINUOUS_RANGING,
+			&ThresholdLow, &ThresholdHigh);
+
+		if (((ThresholdLow > 255*65536) ||
+			(ThresholdHigh > 255*65536)) &&
+			(Status == VL_ERROR_NONE)) {
+
+			if (StartNotStopFlag != 0) {
+				Status = VL_load_tuning_settings(Dev,
+					InterruptThresholdSettings);
+			} else {
+				Status |= VL_WrByte(Dev, 0xFF, 0x04);
+				Status |= VL_WrByte(Dev, 0x70, 0x00);
+				Status |= VL_WrByte(Dev, 0xFF, 0x00);
+				Status |= VL_WrByte(Dev, 0x80, 0x00);
+			}
+
+		}
+
+
+	}
+
+	return Status;
+
+}
+
+
+int8_t VL_StartMeasurement(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t DeviceMode;
+	uint8_t Byte;
+	uint8_t StartStopByte = VL_REG_SYSRANGE_MODE_START_STOP;
+	uint32_t LoopNb;
+
+	LOG_FUNCTION_START("");
+
+	/* Get Current DeviceMode */
+	VL_GetDeviceMode(Dev, &DeviceMode);
+
+	Status = VL_WrByte(Dev, 0x80, 0x01);
+	Status = VL_WrByte(Dev, 0xFF, 0x01);
+	Status = VL_WrByte(Dev, 0x00, 0x00);
+	Status = VL_WrByte(Dev, 0x91, PALDevDataGet(Dev, StopVariable));
+	Status = VL_WrByte(Dev, 0x00, 0x01);
+	Status = VL_WrByte(Dev, 0xFF, 0x00);
+	Status = VL_WrByte(Dev, 0x80, 0x00);
+
+	switch (DeviceMode) {
+	case VL_DEVICEMODE_SINGLE_RANGING:
+		Status = VL_WrByte(Dev, VL_REG_SYSRANGE_START, 0x01);
+
+		Byte = StartStopByte;
+		if (Status == VL_ERROR_NONE) {
+			/* Wait until start bit has been cleared */
+			LoopNb = 0;
+			do {
+				if (LoopNb > 0)
+					Status = VL_RdByte(Dev,
+					VL_REG_SYSRANGE_START, &Byte);
+				LoopNb = LoopNb + 1;
+			} while (((Byte & StartStopByte) == StartStopByte)
+				&& (Status == VL_ERROR_NONE)
+				&& (LoopNb < VL_DEFAULT_MAX_LOOP));
+
+			if (LoopNb >= VL_DEFAULT_MAX_LOOP)
+				Status = VL_ERROR_TIME_OUT;
+
+		}
+
+		break;
+	case VL_DEVICEMODE_CONTINUOUS_RANGING:
+		/* Back-to-back mode */
+
+		/* Check if need to apply interrupt settings */
+		if (Status == VL_ERROR_NONE)
+			Status = VL_CheckAndLoadInterruptSettings(Dev, 1);
+
+		Status = VL_WrByte(Dev,
+		VL_REG_SYSRANGE_START,
+		VL_REG_SYSRANGE_MODE_BACKTOBACK);
+		if (Status == VL_ERROR_NONE) {
+			/* Set PAL State to Running */
+			PALDevDataSet(Dev, PalState, VL_STATE_RUNNING);
+		}
+		break;
+	case VL_DEVICEMODE_CONTINUOUS_TIMED_RANGING:
+		/* Continuous mode */
+		/* Check if need to apply interrupt settings */
+		if (Status == VL_ERROR_NONE)
+			Status = VL_CheckAndLoadInterruptSettings(Dev, 1);
+
+		Status = VL_WrByte(Dev,
+		VL_REG_SYSRANGE_START,
+		VL_REG_SYSRANGE_MODE_TIMED);
+
+		if (Status == VL_ERROR_NONE) {
+			/* Set PAL State to Running */
+			PALDevDataSet(Dev, PalState, VL_STATE_RUNNING);
+		}
+		break;
+	default:
+		/* Selected mode not supported */
+		Status = VL_ERROR_MODE_NOT_SUPPORTED;
+	}
+
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_StopMeasurement(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_WrByte(Dev, VL_REG_SYSRANGE_START,
+	VL_REG_SYSRANGE_MODE_SINGLESHOT);
+
+	Status = VL_WrByte(Dev, 0xFF, 0x01);
+	Status = VL_WrByte(Dev, 0x00, 0x00);
+	Status = VL_WrByte(Dev, 0x91, 0x00);
+	Status = VL_WrByte(Dev, 0x00, 0x01);
+	Status = VL_WrByte(Dev, 0xFF, 0x00);
+
+	if (Status == VL_ERROR_NONE) {
+		/* Set PAL State to Idle */
+		PALDevDataSet(Dev, PalState, VL_STATE_IDLE);
+	}
+
+	/* Check if need to apply interrupt settings */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_CheckAndLoadInterruptSettings(Dev, 0);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetMeasurementDataReady(struct vl_data *Dev,
+	uint8_t *pMeasurementDataReady)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t SysRangeStatusRegister;
+	uint8_t InterruptConfig;
+	uint32_t InterruptMask;
+
+	LOG_FUNCTION_START("");
+
+	InterruptConfig = VL_GETDEVICESPECIFICPARAMETER(Dev,
+		Pin0GpioFunctionality);
+
+	if (InterruptConfig ==
+		VL_REG_SYSTEM_INTERRUPT_GPIO_NEW_SAMPLE_READY) {
+		Status = VL_GetInterruptMaskStatus(Dev, &InterruptMask);
+		if (InterruptMask ==
+		VL_REG_SYSTEM_INTERRUPT_GPIO_NEW_SAMPLE_READY)
+			*pMeasurementDataReady = 1;
+		else
+			*pMeasurementDataReady = 0;
+	} else {
+		Status = VL_RdByte(Dev, VL_REG_RESULT_RANGE_STATUS,
+			&SysRangeStatusRegister);
+		if (Status == VL_ERROR_NONE) {
+			if (SysRangeStatusRegister & 0x01)
+				*pMeasurementDataReady = 1;
+			else
+				*pMeasurementDataReady = 0;
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_WaitDeviceReadyForNewMeasurement(struct vl_data *Dev,
+	uint32_t MaxLoop)
+{
+	int8_t Status = VL_ERROR_NOT_IMPLEMENTED;
+
+	LOG_FUNCTION_START("");
+
+	/* not implemented for VL53L0X */
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+
+int8_t VL_GetRangingMeasurementData(struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t DeviceRangeStatus;
+	uint8_t RangeFractionalEnable;
+	uint8_t PalRangeStatus;
+	uint8_t XTalkCompensationEnable;
+	uint16_t AmbientRate;
+	unsigned int SignalRate;
+	uint16_t XTalkCompensationRateMegaCps;
+	uint16_t EffectiveSpadRtnCount;
+	uint16_t tmpuint16;
+	uint16_t XtalkRangeMilliMeter;
+	uint16_t LinearityCorrectiveGain;
+	uint8_t localBuffer[12];
+	struct VL_RangingMeasurementData_t LastRangeDataBuffer;
+
+	LOG_FUNCTION_START("");
+
+	/*
+	 * use multi read even if some registers are not useful, result will
+	 * be more efficient
+	 * start reading at 0x14 dec20
+	 * end reading at 0x21 dec33 total 14 bytes to read
+	 */
+	Status = VL_ReadMulti(Dev, 0x14, localBuffer, 12);
+
+	if (Status == VL_ERROR_NONE) {
+
+		pRangingMeasurementData->ZoneId = 0; /* Only one zone */
+		pRangingMeasurementData->TimeStamp = 0; /* Not Implemented */
+
+		tmpuint16 = VL_MAKEUINT16(localBuffer[11],
+			localBuffer[10]);
+		/* cut1.1 if SYSTEM__RANGE_CONFIG if 1 range is 2bits fractional
+		 *(format 11.2) else no fractional
+		 */
+
+		pRangingMeasurementData->MeasurementTimeUsec = 0;
+
+		SignalRate = VL_FIXPOINT97TOFIXPOINT1616(
+			VL_MAKEUINT16(localBuffer[7], localBuffer[6]));
+		/* peak_signal_count_rate_rtn_mcps */
+		pRangingMeasurementData->SignalRateRtnMegaCps = SignalRate;
+
+		AmbientRate = VL_MAKEUINT16(localBuffer[9],
+			localBuffer[8]);
+		pRangingMeasurementData->AmbientRateRtnMegaCps =
+			VL_FIXPOINT97TOFIXPOINT1616(AmbientRate);
+
+		EffectiveSpadRtnCount = VL_MAKEUINT16(localBuffer[3],
+			localBuffer[2]);
+		/* EffectiveSpadRtnCount is 8.8 format */
+		pRangingMeasurementData->EffectiveSpadRtnCount =
+			EffectiveSpadRtnCount;
+
+		DeviceRangeStatus = localBuffer[0];
+
+		/* Get Linearity Corrective Gain */
+		LinearityCorrectiveGain = PALDevDataGet(Dev,
+			LinearityCorrectiveGain);
+
+		/* Get ranging configuration */
+		RangeFractionalEnable = PALDevDataGet(Dev,
+			RangeFractionalEnable);
+
+		if (LinearityCorrectiveGain != 1000) {
+
+			tmpuint16 = (uint16_t)((LinearityCorrectiveGain
+				* tmpuint16 + 500) / 1000);
+
+			/* Implement Xtalk */
+			VL_GETPARAMETERFIELD(Dev,
+				XTalkCompensationRateMegaCps,
+				XTalkCompensationRateMegaCps);
+			VL_GETPARAMETERFIELD(Dev, XTalkCompensationEnable,
+				XTalkCompensationEnable);
+
+			if (XTalkCompensationEnable) {
+
+				if ((SignalRate
+					- ((XTalkCompensationRateMegaCps
+					* EffectiveSpadRtnCount) >> 8))
+					<= 0) {
+					if (RangeFractionalEnable)
+						XtalkRangeMilliMeter = 8888;
+					else
+						XtalkRangeMilliMeter = 8888
+							<< 2;
+				} else {
+					XtalkRangeMilliMeter =
+					(tmpuint16 * SignalRate)
+						/ (SignalRate
+						- ((XTalkCompensationRateMegaCps
+						* EffectiveSpadRtnCount)
+						>> 8));
+				}
+
+				tmpuint16 = XtalkRangeMilliMeter;
+			}
+
+		}
+
+		if (RangeFractionalEnable) {
+			pRangingMeasurementData->RangeMilliMeter =
+				(uint16_t)((tmpuint16) >> 2);
+			pRangingMeasurementData->RangeFractionalPart =
+				(uint8_t)((tmpuint16 & 0x03) << 6);
+		} else {
+			pRangingMeasurementData->RangeMilliMeter = tmpuint16;
+			pRangingMeasurementData->RangeFractionalPart = 0;
+		}
+
+		/*
+		 * For a standard definition of RangeStatus, this should
+		 * return 0 in case of good result after a ranging
+		 * The range status depends on the device so call a device
+		 * specific function to obtain the right Status.
+		 */
+		Status |= VL_get_pal_range_status(Dev, DeviceRangeStatus,
+			SignalRate, EffectiveSpadRtnCount,
+			pRangingMeasurementData, &PalRangeStatus);
+
+		if (Status == VL_ERROR_NONE)
+			pRangingMeasurementData->RangeStatus = PalRangeStatus;
+
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		/* Copy last read data into Dev buffer */
+		LastRangeDataBuffer = PALDevDataGet(Dev, LastRangeMeasure);
+
+		LastRangeDataBuffer.RangeMilliMeter =
+			pRangingMeasurementData->RangeMilliMeter;
+		LastRangeDataBuffer.RangeFractionalPart =
+			pRangingMeasurementData->RangeFractionalPart;
+		LastRangeDataBuffer.RangeDMaxMilliMeter =
+			pRangingMeasurementData->RangeDMaxMilliMeter;
+		LastRangeDataBuffer.MeasurementTimeUsec =
+			pRangingMeasurementData->MeasurementTimeUsec;
+		LastRangeDataBuffer.SignalRateRtnMegaCps =
+			pRangingMeasurementData->SignalRateRtnMegaCps;
+		LastRangeDataBuffer.AmbientRateRtnMegaCps =
+			pRangingMeasurementData->AmbientRateRtnMegaCps;
+		LastRangeDataBuffer.EffectiveSpadRtnCount =
+			pRangingMeasurementData->EffectiveSpadRtnCount;
+		LastRangeDataBuffer.RangeStatus =
+			pRangingMeasurementData->RangeStatus;
+
+		PALDevDataSet(Dev, LastRangeMeasure, LastRangeDataBuffer);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetMeasurementRefSignal(struct vl_data *Dev,
+	unsigned int *pMeasurementRefSignal)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t SignalRefClipLimitCheckEnable = 0;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_GetLimitCheckEnable(Dev,
+			VL_CHECKENABLE_SIGNAL_REF_CLIP,
+			&SignalRefClipLimitCheckEnable);
+	if (SignalRefClipLimitCheckEnable != 0)
+		*pMeasurementRefSignal = PALDevDataGet(Dev, LastSignalRefMcps);
+	else
+		Status = VL_ERROR_INVALID_COMMAND;
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+int8_t VL_GetHistogramMeasurementData(struct vl_data *Dev,
+	struct VL_HistogramMeasurementData_t *pHistogramMeasurementData)
+{
+	int8_t Status = VL_ERROR_NOT_IMPLEMENTED;
+
+	LOG_FUNCTION_START("");
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_PerformSingleRangingMeasurement(struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	/* This function will do a complete single ranging */
+	/* Here we fix the mode! */
+	Status = VL_SetDeviceMode(Dev, VL_DEVICEMODE_SINGLE_RANGING);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_PerformSingleMeasurement(Dev);
+
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_GetRangingMeasurementData(Dev,
+			pRangingMeasurementData);
+
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_ClearInterruptMask(Dev, 0);
+
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetNumberOfROIZones(struct vl_data *Dev,
+	uint8_t NumberOfROIZones)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	if (NumberOfROIZones != 1)
+		Status = VL_ERROR_INVALID_PARAMS;
+
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetNumberOfROIZones(struct vl_data *Dev,
+	uint8_t *pNumberOfROIZones)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	*pNumberOfROIZones = 1;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetMaxNumberOfROIZones(struct vl_data *Dev,
+	uint8_t *pMaxNumberOfROIZones)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	*pMaxNumberOfROIZones = 1;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+/* End Group PAL Measurement Functions */
+
+int8_t VL_SetGpioConfig(struct vl_data *Dev, uint8_t Pin,
+	uint8_t DeviceMode, uint8_t Functionality,
+	uint8_t Polarity)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t data;
+
+	LOG_FUNCTION_START("");
+
+	if (Pin != 0) {
+		Status = VL_ERROR_GPIO_NOT_EXISTING;
+	} else if (DeviceMode == VL_DEVICEMODE_GPIO_DRIVE) {
+		if (Polarity == VL_INTERRUPTPOLARITY_LOW)
+			data = 0x10;
+		else
+			data = 1;
+
+		Status = VL_WrByte(Dev,
+		VL_REG_GPIO_HV_MUX_ACTIVE_HIGH, data);
+
+	} else if (DeviceMode == VL_DEVICEMODE_GPIO_OSC) {
+
+		Status |= VL_WrByte(Dev, 0xff, 0x01);
+		Status |= VL_WrByte(Dev, 0x00, 0x00);
+
+		Status |= VL_WrByte(Dev, 0xff, 0x00);
+		Status |= VL_WrByte(Dev, 0x80, 0x01);
+		Status |= VL_WrByte(Dev, 0x85, 0x02);
+
+		Status |= VL_WrByte(Dev, 0xff, 0x04);
+		Status |= VL_WrByte(Dev, 0xcd, 0x00);
+		Status |= VL_WrByte(Dev, 0xcc, 0x11);
+
+		Status |= VL_WrByte(Dev, 0xff, 0x07);
+		Status |= VL_WrByte(Dev, 0xbe, 0x00);
+
+		Status |= VL_WrByte(Dev, 0xff, 0x06);
+		Status |= VL_WrByte(Dev, 0xcc, 0x09);
+
+		Status |= VL_WrByte(Dev, 0xff, 0x00);
+		Status |= VL_WrByte(Dev, 0xff, 0x01);
+		Status |= VL_WrByte(Dev, 0x00, 0x00);
+
+	} else {
+
+		if (Status == VL_ERROR_NONE) {
+			switch (Functionality) {
+			case VL_GPIOFUNCTIONALITY_OFF:
+				data = 0x00;
+				break;
+			case VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_LOW:
+				data = 0x01;
+				break;
+			case VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_HIGH:
+				data = 0x02;
+				break;
+			case VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_OUT:
+				data = 0x03;
+				break;
+			case VL_GPIOFUNCTIONALITY_NEW_MEASURE_READY:
+				data = 0x04;
+				break;
+			default:
+				Status =
+				VL_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED;
+			}
+		}
+
+		if (Status == VL_ERROR_NONE)
+			Status = VL_WrByte(Dev,
+			VL_REG_SYSTEM_INTERRUPT_CONFIG_GPIO, data);
+
+		if (Status == VL_ERROR_NONE) {
+			if (Polarity == VL_INTERRUPTPOLARITY_LOW)
+				data = 0;
+			else
+				data = (uint8_t)(1 << 4);
+
+			Status = VL_UpdateByte(Dev,
+			VL_REG_GPIO_HV_MUX_ACTIVE_HIGH, 0xEF, data);
+		}
+
+		if (Status == VL_ERROR_NONE)
+			VL_SETDEVICESPECIFICPARAMETER(Dev,
+				Pin0GpioFunctionality, Functionality);
+
+		if (Status == VL_ERROR_NONE)
+			Status = VL_ClearInterruptMask(Dev, 0);
+
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetGpioConfig(struct vl_data *Dev, uint8_t Pin,
+	uint8_t *pDeviceMode,
+	uint8_t *pFunctionality,
+	uint8_t *pPolarity)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t GpioFunctionality;
+	uint8_t data;
+
+	LOG_FUNCTION_START("");
+
+	/* pDeviceMode not managed by Ewok it return the current mode */
+
+	Status = VL_GetDeviceMode(Dev, pDeviceMode);
+
+	if (Status == VL_ERROR_NONE) {
+		if (Pin != 0) {
+			Status = VL_ERROR_GPIO_NOT_EXISTING;
+		} else {
+			Status = VL_RdByte(Dev,
+			VL_REG_SYSTEM_INTERRUPT_CONFIG_GPIO, &data);
+		}
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		switch (data & 0x07) {
+		case 0x00:
+			GpioFunctionality = VL_GPIOFUNCTIONALITY_OFF;
+			break;
+		case 0x01:
+			GpioFunctionality =
+			VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_LOW;
+			break;
+		case 0x02:
+			GpioFunctionality =
+			VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_HIGH;
+			break;
+		case 0x03:
+			GpioFunctionality =
+			VL_GPIOFUNCTIONALITY_THRESHOLD_CROSSED_OUT;
+			break;
+		case 0x04:
+			GpioFunctionality =
+			VL_GPIOFUNCTIONALITY_NEW_MEASURE_READY;
+			break;
+		default:
+			Status = VL_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED;
+		}
+	}
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_RdByte(Dev,
+			VL_REG_GPIO_HV_MUX_ACTIVE_HIGH, &data);
+
+	if (Status == VL_ERROR_NONE) {
+		if ((data & (uint8_t)(1 << 4)) == 0)
+			*pPolarity = VL_INTERRUPTPOLARITY_LOW;
+		else
+			*pPolarity = VL_INTERRUPTPOLARITY_HIGH;
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		*pFunctionality = GpioFunctionality;
+		VL_SETDEVICESPECIFICPARAMETER(Dev, Pin0GpioFunctionality,
+			GpioFunctionality);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetInterruptThresholds(struct vl_data *Dev,
+	uint8_t DeviceMode, unsigned int ThresholdLow,
+	unsigned int ThresholdHigh)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint16_t Threshold16;
+
+	LOG_FUNCTION_START("");
+
+	/* no dependency on DeviceMode for Ewok */
+	/* Need to divide by 2 because the FW will apply a x2 */
+	Threshold16 = (uint16_t)((ThresholdLow >> 17) & 0x00fff);
+	Status = VL_WrWord(Dev, VL_REG_SYSTEM_THRESH_LOW,
+		Threshold16);
+
+	if (Status == VL_ERROR_NONE) {
+		/* Need to divide by 2 because the FW will apply a x2 */
+		Threshold16 = (uint16_t)((ThresholdHigh >> 17) & 0x00fff);
+		Status = VL_WrWord(Dev, VL_REG_SYSTEM_THRESH_HIGH,
+			Threshold16);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetInterruptThresholds(struct vl_data *Dev,
+	uint8_t DeviceMode, unsigned int *pThresholdLow,
+	unsigned int *pThresholdHigh)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint16_t Threshold16;
+
+	LOG_FUNCTION_START("");
+
+	/* no dependency on DeviceMode for Ewok */
+
+	Status = VL_RdWord(Dev, VL_REG_SYSTEM_THRESH_LOW,
+		&Threshold16);
+	/* Need to multiply by 2 because the FW will apply a x2 */
+	*pThresholdLow = (unsigned int)((0x00fff & Threshold16) << 17);
+
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_RdWord(Dev, VL_REG_SYSTEM_THRESH_HIGH,
+			&Threshold16);
+		/* Need to multiply by 2 because the FW will apply a x2 */
+		*pThresholdHigh =
+			(unsigned int)((0x00fff & Threshold16) << 17);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetStopCompletedStatus(struct vl_data *Dev,
+	uint32_t *pStopStatus)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Byte = 0;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_WrByte(Dev, 0xFF, 0x01);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_RdByte(Dev, 0x04, &Byte);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev, 0xFF, 0x0);
+
+	*pStopStatus = Byte;
+
+	if (Byte == 0) {
+		Status = VL_WrByte(Dev, 0x80, 0x01);
+		Status = VL_WrByte(Dev, 0xFF, 0x01);
+		Status = VL_WrByte(Dev, 0x00, 0x00);
+		Status = VL_WrByte(Dev, 0x91,
+			PALDevDataGet(Dev, StopVariable));
+		Status = VL_WrByte(Dev, 0x00, 0x01);
+		Status = VL_WrByte(Dev, 0xFF, 0x00);
+		Status = VL_WrByte(Dev, 0x80, 0x00);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+/* Group PAL Interrupt Functions */
+int8_t VL_ClearInterruptMask(struct vl_data *Dev,
+	uint32_t InterruptMask)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t LoopCount;
+	uint8_t Byte;
+
+	LOG_FUNCTION_START("");
+
+	/* clear bit 0 range interrupt, bit 1 error interrupt */
+	LoopCount = 0;
+	do {
+		Status = VL_WrByte(Dev,
+			VL_REG_SYSTEM_INTERRUPT_CLEAR, 0x01);
+		Status |= VL_WrByte(Dev,
+			VL_REG_SYSTEM_INTERRUPT_CLEAR, 0x00);
+		Status |= VL_RdByte(Dev,
+			VL_REG_RESULT_INTERRUPT_STATUS, &Byte);
+		LoopCount++;
+	} while (((Byte & 0x07) != 0x00)
+			&& (LoopCount < 3)
+			&& (Status == VL_ERROR_NONE));
+
+
+	if (LoopCount >= 3)
+		Status = VL_ERROR_INTERRUPT_NOT_CLEARED;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetInterruptMaskStatus(struct vl_data *Dev,
+	uint32_t *pInterruptMaskStatus)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Byte;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_RdByte(Dev, VL_REG_RESULT_INTERRUPT_STATUS,
+		&Byte);
+	*pInterruptMaskStatus = Byte & 0x07;
+
+	if (Byte & 0x18)
+		Status = VL_ERROR_RANGE_ERROR;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_EnableInterruptMask(struct vl_data *Dev,
+	uint32_t InterruptMask)
+{
+	int8_t Status = VL_ERROR_NOT_IMPLEMENTED;
+
+	LOG_FUNCTION_START("");
+
+	/* not implemented for VL53L0X */
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+/* End Group PAL Interrupt Functions */
+
+/* Group SPAD functions */
+
+int8_t VL_SetSpadAmbientDamperThreshold(struct vl_data *Dev,
+	uint16_t SpadAmbientDamperThreshold)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_WrByte(Dev, 0xFF, 0x01);
+	Status |= VL_WrWord(Dev, 0x40, SpadAmbientDamperThreshold);
+	Status |= VL_WrByte(Dev, 0xFF, 0x00);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetSpadAmbientDamperThreshold(struct vl_data *Dev,
+	uint16_t *pSpadAmbientDamperThreshold)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_WrByte(Dev, 0xFF, 0x01);
+	Status |= VL_RdWord(Dev, 0x40, pSpadAmbientDamperThreshold);
+	Status |= VL_WrByte(Dev, 0xFF, 0x00);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_SetSpadAmbientDamperFactor(struct vl_data *Dev,
+	uint16_t SpadAmbientDamperFactor)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Byte;
+
+	LOG_FUNCTION_START("");
+
+	Byte = (uint8_t)(SpadAmbientDamperFactor & 0x00FF);
+
+	Status = VL_WrByte(Dev, 0xFF, 0x01);
+	Status |= VL_WrByte(Dev, 0x42, Byte);
+	Status |= VL_WrByte(Dev, 0xFF, 0x00);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_GetSpadAmbientDamperFactor(struct vl_data *Dev,
+	uint16_t *pSpadAmbientDamperFactor)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t Byte;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_WrByte(Dev, 0xFF, 0x01);
+	Status |= VL_RdByte(Dev, 0x42, &Byte);
+	Status |= VL_WrByte(Dev, 0xFF, 0x00);
+	*pSpadAmbientDamperFactor = (uint16_t)Byte;
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+/* END Group SPAD functions */
+
+/*****************************************************************************
+ * Internal functions
+ *****************************************************************************/
+
+int8_t VL_SetReferenceSpads(struct vl_data *Dev, uint32_t count,
+	uint8_t isApertureSpads)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_set_reference_spads(Dev, count, isApertureSpads);
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+int8_t VL_GetReferenceSpads(struct vl_data *Dev, uint32_t *pSpadCount,
+	uint8_t *pIsApertureSpads)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_reference_spads(Dev, pSpadCount, pIsApertureSpads);
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+int8_t VL_PerformRefSpadManagement(struct vl_data *Dev,
+	uint32_t *refSpadCount, uint8_t *isApertureSpads)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_perform_ref_spad_management(Dev, refSpadCount,
+		isApertureSpads);
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_api_calibration.c b/drivers/input/misc/vl53l0x/src/vl53l0x_api_calibration.c
new file mode 100644
index 0000000..3905bd2
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_api_calibration.c
@@ -0,0 +1,1266 @@
+/*
+ *  vl53l0x_api_calibration.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include "vl53l0x_api.h"
+#include "vl53l0x_api_core.h"
+#include "vl53l0x_api_calibration.h"
+
+#ifndef __KERNEL__
+#include <stdlib.h>
+#endif
+
+#define LOG_FUNCTION_START(fmt, ...) \
+	_LOG_FUNCTION_START(TRACE_MODULE_API, fmt, ##__VA_ARGS__)
+#define LOG_FUNCTION_END(status, ...) \
+	_LOG_FUNCTION_END(TRACE_MODULE_API, status, ##__VA_ARGS__)
+#define LOG_FUNCTION_END_FMT(status, fmt, ...) \
+	_LOG_FUNCTION_END_FMT(TRACE_MODULE_API, status, fmt, ##__VA_ARGS__)
+
+#define REF_ARRAY_SPAD_0  0
+#define REF_ARRAY_SPAD_5  5
+#define REF_ARRAY_SPAD_10 10
+
+uint32_t refArrayQuadrants[4] = {REF_ARRAY_SPAD_10, REF_ARRAY_SPAD_5,
+		REF_ARRAY_SPAD_0, REF_ARRAY_SPAD_5 };
+
+int8_t VL_perform_xtalk_calibration(struct vl_data *Dev,
+			unsigned int XTalkCalDistance,
+			unsigned int *pXTalkCompensationRateMegaCps)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint16_t sum_ranging = 0;
+	uint16_t sum_spads = 0;
+	unsigned int sum_signalRate = 0;
+	unsigned int total_count = 0;
+	uint8_t xtalk_meas = 0;
+	struct VL_RangingMeasurementData_t RangingMeasurementData;
+	unsigned int xTalkStoredMeanSignalRate;
+	unsigned int xTalkStoredMeanRange;
+	unsigned int xTalkStoredMeanRtnSpads;
+	uint32_t signalXTalkTotalPerSpad;
+	uint32_t xTalkStoredMeanRtnSpadsAsInt;
+	uint32_t xTalkCalDistanceAsInt;
+	unsigned int XTalkCompensationRateMegaCps;
+
+	if (XTalkCalDistance <= 0)
+		Status = VL_ERROR_INVALID_PARAMS;
+
+	/* Disable the XTalk compensation */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetXTalkCompensationEnable(Dev, 0);
+
+	/* Disable the RIT */
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_SetLimitCheckEnable(Dev,
+				VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD, 0);
+	}
+
+	/* Perform 50 measurements and compute the averages */
+	if (Status == VL_ERROR_NONE) {
+		sum_ranging = 0;
+		sum_spads = 0;
+		sum_signalRate = 0;
+		total_count = 0;
+		for (xtalk_meas = 0; xtalk_meas < 50; xtalk_meas++) {
+			Status = VL_PerformSingleRangingMeasurement(Dev,
+				&RangingMeasurementData);
+
+			if (Status != VL_ERROR_NONE)
+				break;
+
+			/* The range is valid when RangeStatus = 0 */
+			if (RangingMeasurementData.RangeStatus == 0) {
+				sum_ranging = sum_ranging +
+					RangingMeasurementData.RangeMilliMeter;
+				sum_signalRate = sum_signalRate +
+				RangingMeasurementData.SignalRateRtnMegaCps;
+				sum_spads = sum_spads +
+				RangingMeasurementData.EffectiveSpadRtnCount
+					/ 256;
+				total_count = total_count + 1;
+			}
+		}
+
+		/* no valid values found */
+		if (total_count == 0)
+			Status = VL_ERROR_RANGE_ERROR;
+
+	}
+
+
+	if (Status == VL_ERROR_NONE) {
+		/* unsigned int / uint16_t = unsigned int */
+		xTalkStoredMeanSignalRate = sum_signalRate / total_count;
+		xTalkStoredMeanRange = (unsigned int)((uint32_t)(
+			sum_ranging << 16) / total_count);
+		xTalkStoredMeanRtnSpads = (unsigned int)((uint32_t)(
+			sum_spads << 16) / total_count);
+
+		/* Round Mean Spads to Whole Number.
+		 * Typically the calculated mean SPAD count is a whole number
+		 * or very close to a whole
+		 * number, therefore any truncation will not result in a
+		 * significant loss in accuracy.
+		 * Also, for a grey target at a typical distance of around
+		 * 400mm, around 220 SPADs will
+		 * be enabled, therefore, any truncation will result in a loss
+		 * of accuracy of less than
+		 * 0.5%.
+		 */
+		xTalkStoredMeanRtnSpadsAsInt = (xTalkStoredMeanRtnSpads +
+			0x8000) >> 16;
+
+		/* Round Cal Distance to Whole Number. */
+		/* Note that the cal distance is in mm, */
+		/* therefore no resolution is lost.*/
+		 xTalkCalDistanceAsInt = (XTalkCalDistance + 0x8000) >> 16;
+
+		if (xTalkStoredMeanRtnSpadsAsInt == 0 ||
+		   xTalkCalDistanceAsInt == 0 ||
+		   xTalkStoredMeanRange >= XTalkCalDistance) {
+			XTalkCompensationRateMegaCps = 0;
+		} else {
+			/* Round Cal Distance to Whole Number. */
+			/* Note that the cal distance is in mm, therefore no */
+			/* resolution is lost.*/
+			xTalkCalDistanceAsInt = (XTalkCalDistance +
+				0x8000) >> 16;
+
+			/* Apply division by mean spad count early in the */
+			/* calculation to keep the numbers small. */
+			/* This ensures we can maintain a 32bit calculation. */
+			/* Fixed1616 / int := Fixed1616 */
+			signalXTalkTotalPerSpad = (xTalkStoredMeanSignalRate) /
+				xTalkStoredMeanRtnSpadsAsInt;
+
+			/* Complete the calculation for total Signal */
+			/* XTalk per SPAD */
+			/* Fixed1616 * (Fixed1616 - Fixed1616/int) := */
+			/* (2^16 * Fixed1616) */
+
+			signalXTalkTotalPerSpad *= ((1 << 16) -
+				(xTalkStoredMeanRange / xTalkCalDistanceAsInt));
+
+			/* Round from 2^16 * Fixed1616, to Fixed1616. */
+			XTalkCompensationRateMegaCps = (signalXTalkTotalPerSpad
+				+ 0x8000) >> 16;
+		}
+
+		*pXTalkCompensationRateMegaCps = XTalkCompensationRateMegaCps;
+
+		/* Enable the XTalk compensation */
+		if (Status == VL_ERROR_NONE)
+			Status = VL_SetXTalkCompensationEnable(Dev, 1);
+
+		/* Enable the XTalk compensation */
+		if (Status == VL_ERROR_NONE)
+			Status = VL_SetXTalkCompensationRateMegaCps(Dev,
+					XTalkCompensationRateMegaCps);
+
+	}
+
+	return Status;
+}
+
+int8_t VL_perform_offset_calibration(struct vl_data *Dev,
+			unsigned int CalDistanceMilliMeter,
+			int32_t *pOffsetMicroMeter)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint16_t sum_ranging = 0;
+	unsigned int total_count = 0;
+	struct VL_RangingMeasurementData_t RangingMeasurementData;
+	unsigned int StoredMeanRange;
+	uint32_t StoredMeanRangeAsInt;
+	uint32_t CalDistanceAsInt_mm;
+	uint8_t SequenceStepEnabled;
+	int meas = 0;
+
+	if (CalDistanceMilliMeter <= 0)
+		Status = VL_ERROR_INVALID_PARAMS;
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetOffsetCalibrationDataMicroMeter(Dev, 0);
+
+
+	/* Get the value of the TCC */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_GetSequenceStepEnable(Dev,
+				VL_SEQUENCESTEP_TCC, &SequenceStepEnabled);
+
+
+	/* Disable the TCC */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetSequenceStepEnable(Dev,
+				VL_SEQUENCESTEP_TCC, 0);
+
+
+	/* Disable the RIT */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_SetLimitCheckEnable(Dev,
+				VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD, 0);
+
+	/* Perform 50 measurements and compute the averages */
+	if (Status == VL_ERROR_NONE) {
+		sum_ranging = 0;
+		total_count = 0;
+		for (meas = 0; meas < 50; meas++) {
+			Status = VL_PerformSingleRangingMeasurement(Dev,
+					&RangingMeasurementData);
+
+			if (Status != VL_ERROR_NONE)
+				break;
+
+			/* The range is valid when RangeStatus = 0 */
+			if (RangingMeasurementData.RangeStatus == 0) {
+				sum_ranging = sum_ranging +
+					RangingMeasurementData.RangeMilliMeter;
+				total_count = total_count + 1;
+			}
+		}
+
+		/* no valid values found */
+		if (total_count == 0)
+			Status = VL_ERROR_RANGE_ERROR;
+	}
+
+
+	if (Status == VL_ERROR_NONE) {
+		/* unsigned int / uint16_t = unsigned int */
+		StoredMeanRange = (unsigned int)((uint32_t)(sum_ranging << 16)
+			/ total_count);
+
+		StoredMeanRangeAsInt = (StoredMeanRange + 0x8000) >> 16;
+
+		/* Round Cal Distance to Whole Number. */
+		/* Note that the cal distance is in mm, */
+		/* therefore no resolution is lost.*/
+		 CalDistanceAsInt_mm = (CalDistanceMilliMeter + 0x8000) >> 16;
+
+		 *pOffsetMicroMeter = (CalDistanceAsInt_mm -
+				 StoredMeanRangeAsInt) * 1000;
+
+		/* Apply the calculated offset */
+		if (Status == VL_ERROR_NONE) {
+			VL_SETPARAMETERFIELD(Dev, RangeOffsetMicroMeters,
+					*pOffsetMicroMeter);
+			Status = VL_SetOffsetCalibrationDataMicroMeter(Dev,
+					*pOffsetMicroMeter);
+		}
+
+	}
+
+	/* Restore the TCC */
+	if (Status == VL_ERROR_NONE) {
+		if (SequenceStepEnabled != 0)
+			Status = VL_SetSequenceStepEnable(Dev,
+					VL_SEQUENCESTEP_TCC, 1);
+	}
+
+	return Status;
+}
+
+
+int8_t VL_set_offset_calibration_data_micro_meter(struct vl_data *Dev,
+		int32_t OffsetCalibrationDataMicroMeter)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int32_t cMaxOffsetMicroMeter = 511000;
+	int32_t cMinOffsetMicroMeter = -512000;
+	int16_t cOffsetRange = 4096;
+	uint32_t encodedOffsetVal;
+
+	LOG_FUNCTION_START("");
+
+	if (OffsetCalibrationDataMicroMeter > cMaxOffsetMicroMeter)
+		OffsetCalibrationDataMicroMeter = cMaxOffsetMicroMeter;
+	else if (OffsetCalibrationDataMicroMeter < cMinOffsetMicroMeter)
+		OffsetCalibrationDataMicroMeter = cMinOffsetMicroMeter;
+
+	/* The offset register is 10.2 format and units are mm
+	 * therefore conversion is applied by a division of
+	 * 250.
+	 */
+	if (OffsetCalibrationDataMicroMeter >= 0) {
+		encodedOffsetVal =
+			OffsetCalibrationDataMicroMeter/250;
+	} else {
+		encodedOffsetVal =
+			cOffsetRange +
+			OffsetCalibrationDataMicroMeter/250;
+	}
+
+	Status = VL_WrWord(Dev,
+		VL_REG_ALGO_PART_TO_PART_RANGE_OFFSET_MM,
+		encodedOffsetVal);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_get_offset_calibration_data_micro_meter(struct vl_data *Dev,
+		int32_t *pOffsetCalibrationDataMicroMeter)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint16_t RangeOffsetRegister;
+	int16_t cMaxOffset = 2047;
+	int16_t cOffsetRange = 4096;
+
+	/* Note that offset has 10.2 format */
+
+	Status = VL_RdWord(Dev,
+				VL_REG_ALGO_PART_TO_PART_RANGE_OFFSET_MM,
+				&RangeOffsetRegister);
+
+	if (Status == VL_ERROR_NONE) {
+		RangeOffsetRegister = (RangeOffsetRegister & 0x0fff);
+
+		/* Apply 12 bit 2's compliment conversion */
+		if (RangeOffsetRegister > cMaxOffset)
+			*pOffsetCalibrationDataMicroMeter =
+				(int16_t)(RangeOffsetRegister - cOffsetRange)
+					* 250;
+		else
+			*pOffsetCalibrationDataMicroMeter =
+				(int16_t)RangeOffsetRegister * 250;
+
+	}
+
+	return Status;
+}
+
+
+int8_t VL_apply_offset_adjustment(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int32_t CorrectedOffsetMicroMeters;
+	int32_t CurrentOffsetMicroMeters;
+
+	/* if we run on this function we can read all the NVM info */
+	/* used by the API */
+	Status = VL_get_info_from_device(Dev, 7);
+
+	/* Read back current device offset */
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_GetOffsetCalibrationDataMicroMeter(Dev,
+					&CurrentOffsetMicroMeters);
+	}
+
+	/* Apply Offset Adjustment derived from 400mm measurements */
+	if (Status == VL_ERROR_NONE) {
+
+		/* Store initial device offset */
+		PALDevDataSet(Dev, Part2PartOffsetNVMMicroMeter,
+			CurrentOffsetMicroMeters);
+
+		CorrectedOffsetMicroMeters = CurrentOffsetMicroMeters +
+			(int32_t)PALDevDataGet(Dev,
+				Part2PartOffsetAdjustmentNVMMicroMeter);
+
+		Status = VL_SetOffsetCalibrationDataMicroMeter(Dev,
+					CorrectedOffsetMicroMeters);
+
+		/* store current, adjusted offset */
+		if (Status == VL_ERROR_NONE) {
+			VL_SETPARAMETERFIELD(Dev, RangeOffsetMicroMeters,
+					CorrectedOffsetMicroMeters);
+		}
+	}
+
+	return Status;
+}
+
+void get_next_good_spad(uint8_t goodSpadArray[], uint32_t size,
+			uint32_t curr, int32_t *next)
+{
+	uint32_t startIndex;
+	uint32_t fineOffset;
+	uint32_t cSpadsPerByte = 8;
+	uint32_t coarseIndex;
+	uint32_t fineIndex;
+	uint8_t dataByte;
+	uint8_t success = 0;
+
+	/*
+	 * Starting with the current good spad, loop through the array to find
+	 * the next. i.e. the next bit set in the sequence.
+	 *
+	 * The coarse index is the byte index of the array and the fine index is
+	 * the index of the bit within each byte.
+	 */
+
+	*next = -1;
+
+	startIndex = curr / cSpadsPerByte;
+	fineOffset = curr % cSpadsPerByte;
+
+	for (coarseIndex = startIndex; ((coarseIndex < size) && !success);
+				coarseIndex++) {
+		fineIndex = 0;
+		dataByte = goodSpadArray[coarseIndex];
+
+		if (coarseIndex == startIndex) {
+			/* locate the bit position of the provided current */
+			/* spad bit before iterating */
+			dataByte >>= fineOffset;
+			fineIndex = fineOffset;
+		}
+
+		while (fineIndex < cSpadsPerByte) {
+			if ((dataByte & 0x1) == 1) {
+				success = 1;
+				*next = coarseIndex * cSpadsPerByte + fineIndex;
+				break;
+			}
+			dataByte >>= 1;
+			fineIndex++;
+		}
+	}
+}
+
+
+uint8_t is_aperture(uint32_t spadIndex)
+{
+	/*
+	 * This function reports if a given spad index is an aperture SPAD by
+	 * deriving the quadrant.
+	 */
+	uint32_t quadrant;
+	uint8_t isAperture = 1;
+
+	quadrant = spadIndex >> 6;
+	if (refArrayQuadrants[quadrant] == REF_ARRAY_SPAD_0)
+		isAperture = 0;
+
+	return isAperture;
+}
+
+
+int8_t enable_spad_bit(uint8_t spadArray[], uint32_t size,
+	uint32_t spadIndex)
+{
+	int8_t status = VL_ERROR_NONE;
+	uint32_t cSpadsPerByte = 8;
+	uint32_t coarseIndex;
+	uint32_t fineIndex;
+
+	coarseIndex = spadIndex / cSpadsPerByte;
+	fineIndex = spadIndex % cSpadsPerByte;
+	if (coarseIndex >= size)
+		status = VL_ERROR_REF_SPAD_INIT;
+	else
+		spadArray[coarseIndex] |= (1 << fineIndex);
+
+	return status;
+}
+
+int8_t count_enabled_spads(uint8_t spadArray[],
+		uint32_t byteCount, uint32_t maxSpads,
+		uint32_t *pTotalSpadsEnabled, uint8_t *pIsAperture)
+{
+	int8_t status = VL_ERROR_NONE;
+	uint32_t cSpadsPerByte = 8;
+	uint32_t lastByte;
+	uint32_t lastBit;
+	uint32_t byteIndex = 0;
+	uint32_t bitIndex = 0;
+	uint8_t tempByte;
+	uint8_t spadTypeIdentified = 0;
+
+	/* The entire array will not be used for spads, therefore the last
+	 * byte and last bit is determined from the max spads value.
+	 */
+
+	lastByte = maxSpads / cSpadsPerByte;
+	lastBit = maxSpads % cSpadsPerByte;
+
+	/* Check that the max spads value does not exceed the array bounds. */
+	if (lastByte >= byteCount)
+		status = VL_ERROR_REF_SPAD_INIT;
+
+	*pTotalSpadsEnabled = 0;
+
+	/* Count the bits enabled in the whole bytes */
+	for (byteIndex = 0; byteIndex <= (lastByte - 1); byteIndex++) {
+		tempByte = spadArray[byteIndex];
+
+		for (bitIndex = 0; bitIndex <= cSpadsPerByte; bitIndex++) {
+			if ((tempByte & 0x01) == 1) {
+				(*pTotalSpadsEnabled)++;
+				if (!spadTypeIdentified) {
+					*pIsAperture = 1;
+					if ((byteIndex < 2) && (bitIndex < 4))
+						*pIsAperture = 0;
+					spadTypeIdentified = 1;
+				}
+			}
+			tempByte >>= 1;
+		}
+	}
+
+	/* Count the number of bits enabled in the last byte accounting
+	 * for the fact that not all bits in the byte may be used.
+	 */
+	tempByte = spadArray[lastByte];
+
+	for (bitIndex = 0; bitIndex <= lastBit; bitIndex++) {
+		if ((tempByte & 0x01) == 1)
+			(*pTotalSpadsEnabled)++;
+	}
+
+	return status;
+}
+
+int8_t set_ref_spad_map(struct vl_data *Dev, uint8_t *refSpadArray)
+{
+	int8_t status = VL_WriteMulti(Dev,
+				VL_REG_GLOBAL_CONFIG_SPAD_ENABLES_REF_0,
+				refSpadArray, 6);
+	return status;
+}
+
+int8_t get_ref_spad_map(struct vl_data *Dev, uint8_t *refSpadArray)
+{
+	int8_t status = VL_ReadMulti(Dev,
+				VL_REG_GLOBAL_CONFIG_SPAD_ENABLES_REF_0,
+				refSpadArray,
+				6);
+	return status;
+}
+
+int8_t enable_ref_spads(struct vl_data *Dev,
+				uint8_t apertureSpads,
+				uint8_t goodSpadArray[],
+				uint8_t spadArray[],
+				uint32_t size,
+				uint32_t start,
+				uint32_t offset,
+				uint32_t spadCount,
+				uint32_t *lastSpad)
+{
+	int8_t status = VL_ERROR_NONE;
+	uint32_t index;
+	uint32_t i;
+	int32_t nextGoodSpad = offset;
+	uint32_t currentSpad;
+	uint8_t checkSpadArray[6];
+
+	/*
+	 * This function takes in a spad array which may or may not have SPADS
+	 * already enabled and appends from a given offset a requested number
+	 * of new SPAD enables. The 'good spad map' is applied to
+	 * determine the next SPADs to enable.
+	 *
+	 * This function applies to only aperture or only non-aperture spads.
+	 * Checks are performed to ensure this.
+	 */
+
+	currentSpad = offset;
+	for (index = 0; index < spadCount; index++) {
+		get_next_good_spad(goodSpadArray, size, currentSpad,
+			&nextGoodSpad);
+
+		if (nextGoodSpad == -1) {
+			status = VL_ERROR_REF_SPAD_INIT;
+			break;
+		}
+
+		/* Confirm that the next good SPAD is non-aperture */
+		if (is_aperture(start + nextGoodSpad) != apertureSpads) {
+			/* if we can't get the required number of good aperture
+			 * spads from the current quadrant then this is an error
+			 */
+			status = VL_ERROR_REF_SPAD_INIT;
+			break;
+		}
+		currentSpad = (uint32_t)nextGoodSpad;
+		enable_spad_bit(spadArray, size, currentSpad);
+		currentSpad++;
+	}
+	*lastSpad = currentSpad;
+
+	if (status == VL_ERROR_NONE)
+		status = set_ref_spad_map(Dev, spadArray);
+
+
+	if (status == VL_ERROR_NONE) {
+		status = get_ref_spad_map(Dev, checkSpadArray);
+
+		i = 0;
+
+		/* Compare spad maps. If not equal report error. */
+		while (i < size) {
+			if (spadArray[i] != checkSpadArray[i]) {
+				status = VL_ERROR_REF_SPAD_INIT;
+				break;
+			}
+			i++;
+		}
+	}
+	return status;
+}
+
+
+int8_t perform_ref_signal_measurement(struct vl_data *Dev,
+		uint16_t *refSignalRate)
+{
+	int8_t status = VL_ERROR_NONE;
+	struct VL_RangingMeasurementData_t rangingMeasurementData;
+
+	uint8_t SequenceConfig = 0;
+
+	/* store the value of the sequence config,
+	 * this will be reset before the end of the function
+	 */
+
+	SequenceConfig = PALDevDataGet(Dev, SequenceConfig);
+
+	/*
+	 * This function performs a reference signal rate measurement.
+	 */
+	if (status == VL_ERROR_NONE)
+		status = VL_WrByte(Dev,
+			VL_REG_SYSTEM_SEQUENCE_CONFIG, 0xC0);
+
+	if (status == VL_ERROR_NONE)
+		status = VL_PerformSingleRangingMeasurement(Dev,
+				&rangingMeasurementData);
+
+	if (status == VL_ERROR_NONE)
+		status = VL_WrByte(Dev, 0xFF, 0x01);
+
+	if (status == VL_ERROR_NONE)
+		status = VL_RdWord(Dev,
+			VL_REG_RESULT_PEAK_SIGNAL_RATE_REF,
+			refSignalRate);
+
+	if (status == VL_ERROR_NONE)
+		status = VL_WrByte(Dev, 0xFF, 0x00);
+
+	if (status == VL_ERROR_NONE) {
+		/* restore the previous Sequence Config */
+		status = VL_WrByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG,
+				SequenceConfig);
+		if (status == VL_ERROR_NONE)
+			PALDevDataSet(Dev, SequenceConfig, SequenceConfig);
+	}
+
+	return status;
+}
+
+int8_t VL_perform_ref_spad_management(struct vl_data *Dev,
+				uint32_t *refSpadCount,
+				uint8_t *isApertureSpads)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t lastSpadArray[6];
+	uint8_t startSelect = 0xB4;
+	uint32_t minimumSpadCount = 3;
+	uint32_t maxSpadCount = 44;
+	uint32_t currentSpadIndex = 0;
+	uint32_t lastSpadIndex = 0;
+	int32_t nextGoodSpad = 0;
+	uint16_t targetRefRate = 0x0A00; /* 20 MCPS in 9:7 format */
+	uint16_t peakSignalRateRef;
+	uint32_t needAptSpads = 0;
+	uint32_t index = 0;
+	uint32_t spadArraySize = 6;
+	uint32_t signalRateDiff = 0;
+	uint32_t lastSignalRateDiff = 0;
+	uint8_t complete = 0;
+	uint8_t VhvSettings = 0;
+	uint8_t PhaseCal = 0;
+	uint32_t refSpadCount_int = 0;
+	uint8_t	 isApertureSpads_int = 0;
+
+	/*
+	 * The reference SPAD initialization procedure determines the minimum
+	 * amount of reference spads to be enables to achieve a target reference
+	 * signal rate and should be performed once during initialization.
+	 *
+	 * Either aperture or non-aperture spads are applied but never both.
+	 * Firstly non-aperture spads are set, beginning with 5 spads, and
+	 * increased one spad at a time until the closest measurement to the
+	 * target rate is achieved.
+	 *
+	 * If the target rate is exceeded when 5 non-aperture spads are enabled,
+	 * initialization is performed instead with aperture spads.
+	 *
+	 * When setting spads, a 'Good Spad Map' is applied.
+	 *
+	 * This procedure operates within a SPAD window of interest of a maximum
+	 * 44 spads.
+	 * The start point is currently fixed to 180, which lies towards the end
+	 * of the non-aperture quadrant and runs in to the adjacent aperture
+	 * quadrant.
+	 */
+
+
+	targetRefRate = PALDevDataGet(Dev, targetRefRate);
+
+	/*
+	 * Initialize Spad arrays.
+	 * Currently the good spad map is initialised to 'All good'.
+	 * This is a short term implementation. The good spad map will be
+	 * provided as an input.
+	 * Note that there are 6 bytes. Only the first 44 bits will be used to
+	 * represent spads.
+	 */
+	for (index = 0; index < spadArraySize; index++)
+		Dev->Data.SpadData.RefSpadEnables[index] = 0;
+
+
+	Status = VL_WrByte(Dev, 0xFF, 0x01);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev,
+			VL_REG_DYNAMIC_SPAD_REF_EN_START_OFFSET, 0x00);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev,
+			VL_REG_DYNAMIC_SPAD_NUM_REQUESTED_REF_SPAD, 0x2C);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev, 0xFF, 0x00);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev,
+			VL_REG_GLOBAL_CONFIG_REF_EN_START_SELECT,
+			startSelect);
+
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev,
+			VL_REG_POWER_MANAGEMENT_GO1_POWER_FORCE, 0);
+
+	/* Perform ref calibration */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_perform_ref_calibration(Dev, &VhvSettings,
+			&PhaseCal, 0);
+
+	if (Status == VL_ERROR_NONE) {
+		/* Enable Minimum NON-APERTURE Spads */
+		currentSpadIndex = 0;
+		lastSpadIndex = currentSpadIndex;
+		needAptSpads = 0;
+		Status = enable_ref_spads(Dev,
+					needAptSpads,
+					Dev->Data.SpadData.RefGoodSpadMap,
+					Dev->Data.SpadData.RefSpadEnables,
+					spadArraySize,
+					startSelect,
+					currentSpadIndex,
+					minimumSpadCount,
+					&lastSpadIndex);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		currentSpadIndex = lastSpadIndex;
+
+		Status = perform_ref_signal_measurement(Dev,
+			&peakSignalRateRef);
+		if ((Status == VL_ERROR_NONE) &&
+			(peakSignalRateRef > targetRefRate)) {
+			/* Signal rate measurement too high, */
+			/* switch to APERTURE SPADs */
+
+			for (index = 0; index < spadArraySize; index++)
+				Dev->Data.SpadData.RefSpadEnables[index] = 0;
+
+
+			/* Increment to the first APERTURE spad */
+			while ((is_aperture(startSelect + currentSpadIndex)
+				== 0) && (currentSpadIndex < maxSpadCount)) {
+				currentSpadIndex++;
+			}
+
+			needAptSpads = 1;
+
+			Status = enable_ref_spads(Dev,
+					needAptSpads,
+					Dev->Data.SpadData.RefGoodSpadMap,
+					Dev->Data.SpadData.RefSpadEnables,
+					spadArraySize,
+					startSelect,
+					currentSpadIndex,
+					minimumSpadCount,
+					&lastSpadIndex);
+
+			if (Status == VL_ERROR_NONE) {
+				currentSpadIndex = lastSpadIndex;
+				Status = perform_ref_signal_measurement(Dev,
+						&peakSignalRateRef);
+
+				if ((Status == VL_ERROR_NONE) &&
+					(peakSignalRateRef > targetRefRate)) {
+					/* Signal rate still too high after
+					 * setting the minimum number of
+					 * APERTURE spads. Can do no more
+					 * therefore set the min number of
+					 * aperture spads as the result.
+					 */
+					isApertureSpads_int = 1;
+					refSpadCount_int = minimumSpadCount;
+				}
+			}
+		} else {
+			needAptSpads = 0;
+		}
+	}
+
+	if ((Status == VL_ERROR_NONE) &&
+		(peakSignalRateRef < targetRefRate)) {
+		/* At this point, the minimum number of either aperture
+		 * or non-aperture spads have been set. Proceed to add
+		 * spads and perform measurements until the target
+		 * reference is reached.
+		 */
+		isApertureSpads_int = needAptSpads;
+		refSpadCount_int	= minimumSpadCount;
+
+		memcpy(lastSpadArray, Dev->Data.SpadData.RefSpadEnables,
+				spadArraySize);
+		lastSignalRateDiff = abs(peakSignalRateRef -
+			targetRefRate);
+		complete = 0;
+
+		while (!complete) {
+			get_next_good_spad(
+				Dev->Data.SpadData.RefGoodSpadMap,
+				spadArraySize, currentSpadIndex,
+				&nextGoodSpad);
+
+			if (nextGoodSpad == -1) {
+				Status = VL_ERROR_REF_SPAD_INIT;
+				break;
+			}
+
+			/* Cannot combine Aperture and Non-Aperture spads, so
+			 * ensure the current spad is of the correct type.
+			 */
+			if (is_aperture((uint32_t)startSelect + nextGoodSpad) !=
+					needAptSpads) {
+				/* At this point we have enabled the maximum
+				 * number of Aperture spads.
+				 */
+				complete = 1;
+				break;
+			}
+
+			(refSpadCount_int)++;
+
+			currentSpadIndex = nextGoodSpad;
+			Status = enable_spad_bit(
+					Dev->Data.SpadData.RefSpadEnables,
+					spadArraySize, currentSpadIndex);
+
+			if (Status == VL_ERROR_NONE) {
+				currentSpadIndex++;
+				/* Proceed to apply the additional spad and */
+				/* perform measurement. */
+				Status = set_ref_spad_map(Dev,
+					Dev->Data.SpadData.RefSpadEnables);
+			}
+
+			if (Status != VL_ERROR_NONE)
+				break;
+
+			Status = perform_ref_signal_measurement(Dev,
+					&peakSignalRateRef);
+
+			if (Status != VL_ERROR_NONE)
+				break;
+
+			signalRateDiff = abs(peakSignalRateRef - targetRefRate);
+
+			if (peakSignalRateRef > targetRefRate) {
+				/* Select the spad map that provides the */
+				/* measurement closest to the target rate, */
+				/* either above or below it. */
+
+				if (signalRateDiff > lastSignalRateDiff) {
+				/* Previous spad map produced a closer */
+				/* measurement, so choose this. */
+					Status = set_ref_spad_map(Dev,
+							lastSpadArray);
+					memcpy(
+					Dev->Data.SpadData.RefSpadEnables,
+					lastSpadArray, spadArraySize);
+
+					(refSpadCount_int)--;
+				}
+				complete = 1;
+			} else {
+				/* Continue to add spads */
+				lastSignalRateDiff = signalRateDiff;
+				memcpy(lastSpadArray,
+					Dev->Data.SpadData.RefSpadEnables,
+					spadArraySize);
+			}
+
+		} /* while */
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		*refSpadCount = refSpadCount_int;
+		*isApertureSpads = isApertureSpads_int;
+
+		VL_SETDEVICESPECIFICPARAMETER(Dev, RefSpadsInitialised, 1);
+		VL_SETDEVICESPECIFICPARAMETER(Dev,
+			ReferenceSpadCount, (uint8_t)(*refSpadCount));
+		VL_SETDEVICESPECIFICPARAMETER(Dev,
+			ReferenceSpadType, *isApertureSpads);
+	}
+
+	return Status;
+}
+
+int8_t VL_set_reference_spads(struct vl_data *Dev,
+				 uint32_t count, uint8_t isApertureSpads)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint32_t currentSpadIndex = 0;
+	uint8_t startSelect = 0xB4;
+	uint32_t spadArraySize = 6;
+	uint32_t maxSpadCount = 44;
+	uint32_t lastSpadIndex;
+	uint32_t index;
+
+	/*
+	 * This function applies a requested number of reference spads, either
+	 * aperture or
+	 * non-aperture, as requested.
+	 * The good spad map will be applied.
+	 */
+
+	Status = VL_WrByte(Dev, 0xFF, 0x01);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev,
+			VL_REG_DYNAMIC_SPAD_REF_EN_START_OFFSET, 0x00);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev,
+			VL_REG_DYNAMIC_SPAD_NUM_REQUESTED_REF_SPAD, 0x2C);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev, 0xFF, 0x00);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev,
+			VL_REG_GLOBAL_CONFIG_REF_EN_START_SELECT,
+			startSelect);
+
+	for (index = 0; index < spadArraySize; index++)
+		Dev->Data.SpadData.RefSpadEnables[index] = 0;
+
+	if (isApertureSpads) {
+		/* Increment to the first APERTURE spad */
+		while ((is_aperture(startSelect + currentSpadIndex) == 0) &&
+			  (currentSpadIndex < maxSpadCount)) {
+			currentSpadIndex++;
+		}
+	}
+	Status = enable_ref_spads(Dev,
+				isApertureSpads,
+				Dev->Data.SpadData.RefGoodSpadMap,
+				Dev->Data.SpadData.RefSpadEnables,
+				spadArraySize,
+				startSelect,
+				currentSpadIndex,
+				count,
+				&lastSpadIndex);
+
+	if (Status == VL_ERROR_NONE) {
+		VL_SETDEVICESPECIFICPARAMETER(Dev, RefSpadsInitialised, 1);
+		VL_SETDEVICESPECIFICPARAMETER(Dev,
+			ReferenceSpadCount, (uint8_t)(count));
+		VL_SETDEVICESPECIFICPARAMETER(Dev,
+			ReferenceSpadType, isApertureSpads);
+	}
+
+	return Status;
+}
+
+int8_t VL_get_reference_spads(struct vl_data *Dev,
+			uint32_t *pSpadCount, uint8_t *pIsApertureSpads)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t refSpadsInitialised;
+	uint8_t refSpadArray[6];
+	uint32_t cMaxSpadCount = 44;
+	uint32_t cSpadArraySize = 6;
+	uint32_t spadsEnabled;
+	uint8_t isApertureSpads = 0;
+
+	refSpadsInitialised = VL_GETDEVICESPECIFICPARAMETER(Dev,
+					RefSpadsInitialised);
+
+	if (refSpadsInitialised == 1) {
+
+		*pSpadCount = (uint32_t)VL_GETDEVICESPECIFICPARAMETER(Dev,
+			ReferenceSpadCount);
+		*pIsApertureSpads = VL_GETDEVICESPECIFICPARAMETER(Dev,
+			ReferenceSpadType);
+	} else {
+
+		/* obtain spad info from device.*/
+		Status = get_ref_spad_map(Dev, refSpadArray);
+
+		if (Status == VL_ERROR_NONE) {
+			/* count enabled spads within spad map array and
+			 * determine if Aperture or Non-Aperture.
+			 */
+			Status = count_enabled_spads(refSpadArray,
+							cSpadArraySize,
+							cMaxSpadCount,
+							&spadsEnabled,
+							&isApertureSpads);
+
+			if (Status == VL_ERROR_NONE) {
+
+				*pSpadCount = spadsEnabled;
+				*pIsApertureSpads = isApertureSpads;
+
+				VL_SETDEVICESPECIFICPARAMETER(Dev,
+					RefSpadsInitialised, 1);
+				VL_SETDEVICESPECIFICPARAMETER(Dev,
+					ReferenceSpadCount,
+					(uint8_t)spadsEnabled);
+				VL_SETDEVICESPECIFICPARAMETER(Dev,
+					ReferenceSpadType, isApertureSpads);
+			}
+		}
+	}
+
+	return Status;
+}
+
+
+int8_t VL_perform_single_ref_calibration(struct vl_data *Dev,
+		uint8_t vhv_init_byte)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev, VL_REG_SYSRANGE_START,
+				VL_REG_SYSRANGE_MODE_START_STOP |
+				vhv_init_byte);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_measurement_poll_for_completion(Dev);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_ClearInterruptMask(Dev, 0);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_WrByte(Dev, VL_REG_SYSRANGE_START, 0x00);
+
+	return Status;
+}
+
+
+int8_t VL_ref_calibration_io(struct vl_data *Dev,
+	uint8_t read_not_write, uint8_t VhvSettings, uint8_t PhaseCal,
+	uint8_t *pVhvSettings, uint8_t *pPhaseCal,
+	const uint8_t vhv_enable, const uint8_t phase_enable)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t PhaseCalint = 0;
+
+	/* Read VHV from device */
+	Status |= VL_WrByte(Dev, 0xFF, 0x01);
+	Status |= VL_WrByte(Dev, 0x00, 0x00);
+	Status |= VL_WrByte(Dev, 0xFF, 0x00);
+
+	if (read_not_write) {
+		if (vhv_enable)
+			Status |= VL_RdByte(Dev, 0xCB, pVhvSettings);
+		if (phase_enable)
+			Status |= VL_RdByte(Dev, 0xEE, &PhaseCalint);
+	} else {
+		if (vhv_enable)
+			Status |= VL_WrByte(Dev, 0xCB, VhvSettings);
+		if (phase_enable)
+			Status |= VL_UpdateByte(Dev, 0xEE, 0x80, PhaseCal);
+	}
+
+	Status |= VL_WrByte(Dev, 0xFF, 0x01);
+	Status |= VL_WrByte(Dev, 0x00, 0x01);
+	Status |= VL_WrByte(Dev, 0xFF, 0x00);
+
+	*pPhaseCal = (uint8_t)(PhaseCalint&0xEF);
+
+	return Status;
+}
+
+
+int8_t VL_perform_vhv_calibration(struct vl_data *Dev,
+	uint8_t *pVhvSettings, const uint8_t get_data_enable,
+	const uint8_t restore_config)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t SequenceConfig = 0;
+	uint8_t VhvSettings = 0;
+	uint8_t PhaseCal = 0;
+	uint8_t PhaseCalInt = 0;
+
+	/* store the value of the sequence config,
+	 * this will be reset before the end of the function
+	 */
+
+	if (restore_config)
+		SequenceConfig = PALDevDataGet(Dev, SequenceConfig);
+
+	/* Run VHV */
+	Status = VL_WrByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG, 0x01);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_perform_single_ref_calibration(Dev, 0x40);
+
+	/* Read VHV from device */
+	if ((Status == VL_ERROR_NONE) && (get_data_enable == 1)) {
+		Status = VL_ref_calibration_io(Dev, 1,
+			VhvSettings, PhaseCal, /* Not used here */
+			pVhvSettings, &PhaseCalInt,
+			1, 0);
+	} else
+		*pVhvSettings = 0;
+
+
+	if ((Status == VL_ERROR_NONE) && restore_config) {
+		/* restore the previous Sequence Config */
+		Status = VL_WrByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG,
+				SequenceConfig);
+		if (Status == VL_ERROR_NONE)
+			PALDevDataSet(Dev, SequenceConfig, SequenceConfig);
+
+	}
+
+	return Status;
+}
+
+int8_t VL_perform_phase_calibration(struct vl_data *Dev,
+	uint8_t *pPhaseCal, const uint8_t get_data_enable,
+	const uint8_t restore_config)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t SequenceConfig = 0;
+	uint8_t VhvSettings = 0;
+	uint8_t PhaseCal = 0;
+	uint8_t VhvSettingsint;
+
+	/* store the value of the sequence config,
+	 * this will be reset before the end of the function
+	 */
+
+	if (restore_config)
+		SequenceConfig = PALDevDataGet(Dev, SequenceConfig);
+
+	/* Run PhaseCal */
+	Status = VL_WrByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG, 0x02);
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_perform_single_ref_calibration(Dev, 0x0);
+
+	/* Read PhaseCal from device */
+	if ((Status == VL_ERROR_NONE) && (get_data_enable == 1)) {
+		Status = VL_ref_calibration_io(Dev, 1,
+			VhvSettings, PhaseCal, /* Not used here */
+			&VhvSettingsint, pPhaseCal,
+			0, 1);
+	} else
+		*pPhaseCal = 0;
+
+
+	if ((Status == VL_ERROR_NONE) && restore_config) {
+		/* restore the previous Sequence Config */
+		Status = VL_WrByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG,
+				SequenceConfig);
+		if (Status == VL_ERROR_NONE)
+			PALDevDataSet(Dev, SequenceConfig, SequenceConfig);
+
+	}
+
+	return Status;
+}
+
+int8_t VL_perform_ref_calibration(struct vl_data *Dev,
+	uint8_t *pVhvSettings, uint8_t *pPhaseCal, uint8_t get_data_enable)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t SequenceConfig = 0;
+
+	/* store the value of the sequence config,
+	 * this will be reset before the end of the function
+	 */
+
+	SequenceConfig = PALDevDataGet(Dev, SequenceConfig);
+
+	/* In the following function we don't save the config to optimize */
+	/* writes on device. Config is saved and restored only once. */
+	Status = VL_perform_vhv_calibration(
+			Dev, pVhvSettings, get_data_enable, 0);
+
+
+	if (Status == VL_ERROR_NONE)
+		Status = VL_perform_phase_calibration(
+			Dev, pPhaseCal, get_data_enable, 0);
+
+
+	if (Status == VL_ERROR_NONE) {
+		/* restore the previous Sequence Config */
+		Status = VL_WrByte(Dev, VL_REG_SYSTEM_SEQUENCE_CONFIG,
+				SequenceConfig);
+		if (Status == VL_ERROR_NONE)
+			PALDevDataSet(Dev, SequenceConfig, SequenceConfig);
+
+	}
+
+	return Status;
+}
+
+int8_t VL_set_ref_calibration(struct vl_data *Dev,
+		uint8_t VhvSettings, uint8_t PhaseCal)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t pVhvSettings;
+	uint8_t pPhaseCal;
+
+	Status = VL_ref_calibration_io(Dev, 0,
+		VhvSettings, PhaseCal,
+		&pVhvSettings, &pPhaseCal,
+		1, 1);
+
+	return Status;
+}
+
+int8_t VL_get_ref_calibration(struct vl_data *Dev,
+		uint8_t *pVhvSettings, uint8_t *pPhaseCal)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t VhvSettings = 0;
+	uint8_t PhaseCal = 0;
+
+	Status = VL_ref_calibration_io(Dev, 1,
+		VhvSettings, PhaseCal,
+		pVhvSettings, pPhaseCal,
+		1, 1);
+
+	return Status;
+}
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_api_core.c b/drivers/input/misc/vl53l0x/src/vl53l0x_api_core.c
new file mode 100644
index 0000000..6824c02
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_api_core.c
@@ -0,0 +1,2220 @@
+/*
+ *  vl53l0x_api_core.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include "vl53l0x_api.h"
+#include "vl53l0x_api_core.h"
+#include "vl53l0x_api_calibration.h"
+
+
+#ifndef __KERNEL__
+#include <stdlib.h>
+#endif
+#define LOG_FUNCTION_START(fmt, ...) \
+	_LOG_FUNCTION_START(TRACE_MODULE_API, fmt, ##__VA_ARGS__)
+#define LOG_FUNCTION_END(status, ...) \
+	_LOG_FUNCTION_END(TRACE_MODULE_API, status, ##__VA_ARGS__)
+#define LOG_FUNCTION_END_FMT(status, fmt, ...) \
+	_LOG_FUNCTION_END_FMT(TRACE_MODULE_API, status, fmt, ##__VA_ARGS__)
+
+int8_t VL_reverse_bytes(uint8_t *data, uint32_t size)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t tempData;
+	uint32_t mirrorIndex;
+	uint32_t middle = size/2;
+	uint32_t index;
+
+	for (index = 0; index < middle; index++) {
+		mirrorIndex		 = size - index - 1;
+		tempData		 = data[index];
+		data[index]		 = data[mirrorIndex];
+		data[mirrorIndex] = tempData;
+	}
+	return Status;
+}
+
+int8_t VL_measurement_poll_for_completion(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t NewDataReady = 0;
+	uint32_t LoopNb;
+
+	LOG_FUNCTION_START("");
+
+	LoopNb = 0;
+
+	do {
+		Status = VL_GetMeasurementDataReady(Dev, &NewDataReady);
+		if (Status != 0)
+			break; /* the error is set */
+
+		if (NewDataReady == 1)
+			break; /* done note that status == 0 */
+
+		LoopNb++;
+		if (LoopNb >= VL_DEFAULT_MAX_LOOP) {
+			Status = VL_ERROR_TIME_OUT;
+			break;
+		}
+
+		VL_PollingDelay(Dev);
+	} while (1);
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+
+uint8_t VL_decode_vcsel_period(uint8_t vcsel_period_reg)
+{
+	/*!
+	 * Converts the encoded VCSEL period register value into the real
+	 * period in PLL clocks
+	 */
+
+	uint8_t vcsel_period_pclks = 0;
+
+	vcsel_period_pclks = (vcsel_period_reg + 1) << 1;
+
+	return vcsel_period_pclks;
+}
+
+uint8_t VL_encode_vcsel_period(uint8_t vcsel_period_pclks)
+{
+	/*!
+	 * Converts the encoded VCSEL period register value into the real period
+	 * in PLL clocks
+	 */
+
+	uint8_t vcsel_period_reg = 0;
+
+	vcsel_period_reg = (vcsel_period_pclks >> 1) - 1;
+
+	return vcsel_period_reg;
+}
+
+
+uint32_t VL_isqrt(uint32_t num)
+{
+	/*
+	 * Implements an integer square root
+	 *
+	 * From: http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
+	 */
+
+	uint32_t  res = 0;
+	uint32_t  bit = 1 << 30;
+	/* The second-to-top bit is set: */
+	/* 1 << 14 for 16-bits, 1 << 30 for 32 bits */
+
+	 /* "bit" starts at the highest power of four <= the argument. */
+	while (bit > num)
+		bit >>= 2;
+
+
+	while (bit != 0) {
+		if (num >= res + bit) {
+			num -= res + bit;
+			res = (res >> 1) + bit;
+		} else
+			res >>= 1;
+
+		bit >>= 2;
+	}
+
+	return res;
+}
+
+
+uint32_t VL_quadrature_sum(uint32_t a, uint32_t b)
+{
+	/*
+	 * Implements a quadrature sum
+	 *
+	 * rea = sqrt(a^2 + b^2)
+	 *
+	 * Trap overflow case max input value is 65535 (16-bit value)
+	 * as internal calc are 32-bit wide
+	 *
+	 * If overflow then seta output to maximum
+	 */
+	uint32_t  res = 0;
+
+	if (a > 65535 || b > 65535)
+		res = 65535;
+	else
+		res = VL_isqrt(a * a + b * b);
+
+	return res;
+}
+
+
+int8_t VL_device_read_strobe(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t strobe;
+	uint32_t LoopNb;
+
+	LOG_FUNCTION_START("");
+
+	Status |= VL_WrByte(Dev, 0x83, 0x00);
+
+	/* polling use timeout to avoid deadlock*/
+	if (Status == VL_ERROR_NONE) {
+		LoopNb = 0;
+		do {
+			Status = VL_RdByte(Dev, 0x83, &strobe);
+			if ((strobe != 0x00) || Status != VL_ERROR_NONE)
+				break;
+			LoopNb = LoopNb + 1;
+		} while (LoopNb < VL_DEFAULT_MAX_LOOP);
+
+		if (LoopNb >= VL_DEFAULT_MAX_LOOP)
+			Status = VL_ERROR_TIME_OUT;
+
+	}
+
+	Status |= VL_WrByte(Dev, 0x83, 0x01);
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+
+}
+
+int8_t VL_get_info_from_device(struct vl_data *Dev, uint8_t option)
+{
+
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t byte;
+	uint32_t TmpDWord;
+	uint8_t ModuleId;
+	uint8_t Revision;
+	uint8_t ReferenceSpadCount = 0;
+	uint8_t ReferenceSpadType = 0;
+	uint32_t PartUIDUpper = 0;
+	uint32_t PartUIDLower = 0;
+	uint32_t OffsetFixed1104_mm = 0;
+	int16_t OffsetMicroMeters = 0;
+	uint32_t DistMeasTgtFixed1104_mm = 400 << 4;
+	uint32_t DistMeasFixed1104_400_mm = 0;
+	uint32_t SignalRateMeasFixed1104_400_mm = 0;
+	char ProductId[19];
+	char *ProductId_tmp;
+	uint8_t ReadDataFromDeviceDone;
+	unsigned int SignalRateMeasFixed400mmFix = 0;
+	uint8_t NvmRefGoodSpadMap[VL_REF_SPAD_BUFFER_SIZE];
+	int i;
+
+
+	LOG_FUNCTION_START("");
+
+	ReadDataFromDeviceDone = VL_GETDEVICESPECIFICPARAMETER(Dev,
+			ReadDataFromDeviceDone);
+
+	/* This access is done only once after that a GetDeviceInfo or */
+	/* datainit is done*/
+	if (ReadDataFromDeviceDone != 7) {
+
+		Status |= VL_WrByte(Dev, 0x80, 0x01);
+		Status |= VL_WrByte(Dev, 0xFF, 0x01);
+		Status |= VL_WrByte(Dev, 0x00, 0x00);
+
+		Status |= VL_WrByte(Dev, 0xFF, 0x06);
+		Status |= VL_RdByte(Dev, 0x83, &byte);
+		Status |= VL_WrByte(Dev, 0x83, byte|4);
+		Status |= VL_WrByte(Dev, 0xFF, 0x07);
+		Status |= VL_WrByte(Dev, 0x81, 0x01);
+
+		Status |= VL_PollingDelay(Dev);
+
+		Status |= VL_WrByte(Dev, 0x80, 0x01);
+
+		if (((option & 1) == 1) &&
+			((ReadDataFromDeviceDone & 1) == 0)) {
+			Status |= VL_WrByte(Dev, 0x94, 0x6b);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+			ReferenceSpadCount = (uint8_t)((TmpDWord >> 8) & 0x07f);
+			ReferenceSpadType  = (uint8_t)((TmpDWord >> 15) & 0x01);
+
+			Status |= VL_WrByte(Dev, 0x94, 0x24);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+
+			NvmRefGoodSpadMap[0] = (uint8_t)((TmpDWord >> 24)
+				& 0xff);
+			NvmRefGoodSpadMap[1] = (uint8_t)((TmpDWord >> 16)
+				& 0xff);
+			NvmRefGoodSpadMap[2] = (uint8_t)((TmpDWord >> 8)
+				& 0xff);
+			NvmRefGoodSpadMap[3] = (uint8_t)(TmpDWord & 0xff);
+
+			Status |= VL_WrByte(Dev, 0x94, 0x25);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+			NvmRefGoodSpadMap[4] = (uint8_t)((TmpDWord >> 24)
+				& 0xff);
+			NvmRefGoodSpadMap[5] = (uint8_t)((TmpDWord >> 16)
+				& 0xff);
+		}
+
+		if (((option & 2) == 2) &&
+			((ReadDataFromDeviceDone & 2) == 0)) {
+
+			Status |= VL_WrByte(Dev, 0x94, 0x02);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdByte(Dev, 0x90, &ModuleId);
+
+			Status |= VL_WrByte(Dev, 0x94, 0x7B);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdByte(Dev, 0x90, &Revision);
+
+			Status |= VL_WrByte(Dev, 0x94, 0x77);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+			ProductId[0] = (char)((TmpDWord >> 25) & 0x07f);
+			ProductId[1] = (char)((TmpDWord >> 18) & 0x07f);
+			ProductId[2] = (char)((TmpDWord >> 11) & 0x07f);
+			ProductId[3] = (char)((TmpDWord >> 4) & 0x07f);
+
+			byte = (uint8_t)((TmpDWord & 0x00f) << 3);
+
+			Status |= VL_WrByte(Dev, 0x94, 0x78);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+			ProductId[4] = (char)(byte +
+					((TmpDWord >> 29) & 0x07f));
+			ProductId[5] = (char)((TmpDWord >> 22) & 0x07f);
+			ProductId[6] = (char)((TmpDWord >> 15) & 0x07f);
+			ProductId[7] = (char)((TmpDWord >> 8) & 0x07f);
+			ProductId[8] = (char)((TmpDWord >> 1) & 0x07f);
+
+			byte = (uint8_t)((TmpDWord & 0x001) << 6);
+
+			Status |= VL_WrByte(Dev, 0x94, 0x79);
+
+			Status |= VL_device_read_strobe(Dev);
+
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+			ProductId[9] = (char)(byte +
+					((TmpDWord >> 26) & 0x07f));
+			ProductId[10] = (char)((TmpDWord >> 19) & 0x07f);
+			ProductId[11] = (char)((TmpDWord >> 12) & 0x07f);
+			ProductId[12] = (char)((TmpDWord >> 5) & 0x07f);
+
+			byte = (uint8_t)((TmpDWord & 0x01f) << 2);
+
+			Status |= VL_WrByte(Dev, 0x94, 0x7A);
+
+			Status |= VL_device_read_strobe(Dev);
+
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+			ProductId[13] = (char)(byte +
+					((TmpDWord >> 30) & 0x07f));
+			ProductId[14] = (char)((TmpDWord >> 23) & 0x07f);
+			ProductId[15] = (char)((TmpDWord >> 16) & 0x07f);
+			ProductId[16] = (char)((TmpDWord >> 9) & 0x07f);
+			ProductId[17] = (char)((TmpDWord >> 2) & 0x07f);
+			ProductId[18] = '\0';
+
+		}
+
+		if (((option & 4) == 4) &&
+			((ReadDataFromDeviceDone & 4) == 0)) {
+
+			Status |= VL_WrByte(Dev, 0x94, 0x7B);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &PartUIDUpper);
+
+			Status |= VL_WrByte(Dev, 0x94, 0x7C);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &PartUIDLower);
+
+			Status |= VL_WrByte(Dev, 0x94, 0x73);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+			SignalRateMeasFixed1104_400_mm = (TmpDWord &
+				0x0000000ff) << 8;
+
+			Status |= VL_WrByte(Dev, 0x94, 0x74);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+			SignalRateMeasFixed1104_400_mm |= ((TmpDWord &
+				0xff000000) >> 24);
+
+			Status |= VL_WrByte(Dev, 0x94, 0x75);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+			DistMeasFixed1104_400_mm = (TmpDWord & 0x0000000ff)
+							<< 8;
+
+			Status |= VL_WrByte(Dev, 0x94, 0x76);
+			Status |= VL_device_read_strobe(Dev);
+			Status |= VL_RdDWord(Dev, 0x90, &TmpDWord);
+
+			DistMeasFixed1104_400_mm |= ((TmpDWord & 0xff000000)
+							>> 24);
+		}
+
+		Status |= VL_WrByte(Dev, 0x81, 0x00);
+		Status |= VL_WrByte(Dev, 0xFF, 0x06);
+		Status |= VL_RdByte(Dev, 0x83, &byte);
+		Status |= VL_WrByte(Dev, 0x83, byte&0xfb);
+		Status |= VL_WrByte(Dev, 0xFF, 0x01);
+		Status |= VL_WrByte(Dev, 0x00, 0x01);
+
+		Status |= VL_WrByte(Dev, 0xFF, 0x00);
+		Status |= VL_WrByte(Dev, 0x80, 0x00);
+	}
+
+	if ((Status == VL_ERROR_NONE) &&
+		(ReadDataFromDeviceDone != 7)) {
+		/* Assign to variable if status is ok */
+		if (((option & 1) == 1) &&
+			((ReadDataFromDeviceDone & 1) == 0)) {
+			VL_SETDEVICESPECIFICPARAMETER(Dev,
+				ReferenceSpadCount, ReferenceSpadCount);
+
+			VL_SETDEVICESPECIFICPARAMETER(Dev,
+				ReferenceSpadType, ReferenceSpadType);
+
+			for (i = 0; i < VL_REF_SPAD_BUFFER_SIZE; i++) {
+				Dev->Data.SpadData.RefGoodSpadMap[i] =
+					NvmRefGoodSpadMap[i];
+			}
+		}
+
+		if (((option & 2) == 2) &&
+			((ReadDataFromDeviceDone & 2) == 0)) {
+			VL_SETDEVICESPECIFICPARAMETER(Dev,
+					ModuleId, ModuleId);
+
+			VL_SETDEVICESPECIFICPARAMETER(Dev,
+					Revision, Revision);
+
+			ProductId_tmp = VL_GETDEVICESPECIFICPARAMETER(Dev,
+					ProductId);
+			VL_COPYSTRING(ProductId_tmp, ProductId);
+
+		}
+
+		if (((option & 4) == 4) &&
+			((ReadDataFromDeviceDone & 4) == 0)) {
+			VL_SETDEVICESPECIFICPARAMETER(Dev,
+						PartUIDUpper, PartUIDUpper);
+
+			VL_SETDEVICESPECIFICPARAMETER(Dev,
+						PartUIDLower, PartUIDLower);
+
+			SignalRateMeasFixed400mmFix =
+				VL_FIXPOINT97TOFIXPOINT1616(
+					SignalRateMeasFixed1104_400_mm);
+
+			VL_SETDEVICESPECIFICPARAMETER(Dev,
+				SignalRateMeasFixed400mm,
+				SignalRateMeasFixed400mmFix);
+
+			OffsetMicroMeters = 0;
+			if (DistMeasFixed1104_400_mm != 0) {
+				OffsetFixed1104_mm = DistMeasFixed1104_400_mm -
+				DistMeasTgtFixed1104_mm;
+				OffsetMicroMeters = (OffsetFixed1104_mm
+				* 1000) >> 4;
+				OffsetMicroMeters *= -1;
+			}
+
+			PALDevDataSet(Dev,
+				Part2PartOffsetAdjustmentNVMMicroMeter,
+				OffsetMicroMeters);
+		}
+		byte = (uint8_t)(ReadDataFromDeviceDone|option);
+		VL_SETDEVICESPECIFICPARAMETER(Dev, ReadDataFromDeviceDone,
+				byte);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+
+uint32_t VL_calc_macro_period_ps(struct vl_data *Dev,
+	uint8_t vcsel_period_pclks)
+{
+	uint64_t PLL_period_ps;
+	uint32_t macro_period_vclks;
+	uint32_t macro_period_ps;
+
+	LOG_FUNCTION_START("");
+
+	/* The above calculation will produce rounding errors, */
+	/* therefore set fixed value */
+	PLL_period_ps = 1655;
+
+	macro_period_vclks = 2304;
+	macro_period_ps = (uint32_t)(macro_period_vclks
+			* vcsel_period_pclks * PLL_period_ps);
+
+	LOG_FUNCTION_END("");
+	return macro_period_ps;
+}
+
+uint16_t VL_encode_timeout(uint32_t timeout_macro_clks)
+{
+	/*!
+	 * Encode timeout in macro periods in (LSByte * 2^MSByte) + 1 format
+	 */
+
+	uint16_t encoded_timeout = 0;
+	uint32_t ls_byte = 0;
+	uint16_t ms_byte = 0;
+
+	if (timeout_macro_clks > 0) {
+		ls_byte = timeout_macro_clks - 1;
+
+		while ((ls_byte & 0xFFFFFF00) > 0) {
+			ls_byte = ls_byte >> 1;
+			ms_byte++;
+		}
+
+		encoded_timeout = (ms_byte << 8)
+				+ (uint16_t) (ls_byte & 0x000000FF);
+	}
+
+	return encoded_timeout;
+
+}
+
+uint32_t VL_decode_timeout(uint16_t encoded_timeout)
+{
+	/*!
+	 * Decode 16-bit timeout register value - format (LSByte * 2^MSByte) + 1
+	 */
+
+	uint32_t timeout_macro_clks = 0;
+
+	timeout_macro_clks = ((uint32_t) (encoded_timeout & 0x00FF)
+			<< (uint32_t) ((encoded_timeout & 0xFF00) >> 8)) + 1;
+
+	return timeout_macro_clks;
+}
+
+
+/* To convert ms into register value */
+uint32_t VL_calc_timeout_mclks(struct vl_data *Dev,
+		uint32_t timeout_period_us,
+		uint8_t vcsel_period_pclks)
+{
+	uint32_t macro_period_ps;
+	uint32_t macro_period_ns;
+	uint32_t timeout_period_mclks = 0;
+
+	macro_period_ps = VL_calc_macro_period_ps(Dev, vcsel_period_pclks);
+	macro_period_ns = (macro_period_ps + 500) / 1000;
+
+	timeout_period_mclks =
+		(uint32_t) (((timeout_period_us * 1000)
+		+ (macro_period_ns / 2)) / macro_period_ns);
+
+	return timeout_period_mclks;
+}
+
+/* To convert register value into us */
+uint32_t VL_calc_timeout_us(struct vl_data *Dev,
+		uint16_t timeout_period_mclks,
+		uint8_t vcsel_period_pclks)
+{
+	uint32_t macro_period_ps;
+	uint32_t macro_period_ns;
+	uint32_t actual_timeout_period_us = 0;
+
+	macro_period_ps = VL_calc_macro_period_ps(Dev, vcsel_period_pclks);
+	macro_period_ns = (macro_period_ps + 500) / 1000;
+
+	actual_timeout_period_us =
+		((timeout_period_mclks * macro_period_ns) + 500) / 1000;
+
+	return actual_timeout_period_us;
+}
+
+
+int8_t get_sequence_step_timeout(struct vl_data *Dev,
+				uint8_t SequenceStepId,
+				uint32_t *pTimeOutMicroSecs)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t CurrentVCSELPulsePeriodPClk;
+	uint8_t EncodedTimeOutByte = 0;
+	uint32_t TimeoutMicroSeconds = 0;
+	uint16_t PreRangeEncodedTimeOut = 0;
+	uint16_t MsrcTimeOutMClks;
+	uint16_t PreRangeTimeOutMClks;
+	uint16_t FinalRangeTimeOutMClks = 0;
+	uint16_t FinalRangeEncodedTimeOut;
+	struct VL_SchedulerSequenceSteps_t SchedulerSequenceSteps;
+
+	if ((SequenceStepId == VL_SEQUENCESTEP_TCC)	 ||
+		(SequenceStepId == VL_SEQUENCESTEP_DSS)	 ||
+		(SequenceStepId == VL_SEQUENCESTEP_MSRC)) {
+
+		Status = VL_GetVcselPulsePeriod(Dev,
+					VL_VCSEL_PERIOD_PRE_RANGE,
+					&CurrentVCSELPulsePeriodPClk);
+		if (Status == VL_ERROR_NONE) {
+			Status = VL_RdByte(Dev,
+					VL_REG_MSRC_CONFIG_TIMEOUT_MACROP,
+					&EncodedTimeOutByte);
+		}
+		MsrcTimeOutMClks = VL_decode_timeout(EncodedTimeOutByte);
+
+		TimeoutMicroSeconds = VL_calc_timeout_us(Dev,
+						MsrcTimeOutMClks,
+						CurrentVCSELPulsePeriodPClk);
+	} else if (SequenceStepId == VL_SEQUENCESTEP_PRE_RANGE) {
+		/* Retrieve PRE-RANGE VCSEL Period */
+		Status = VL_GetVcselPulsePeriod(Dev,
+						VL_VCSEL_PERIOD_PRE_RANGE,
+						&CurrentVCSELPulsePeriodPClk);
+
+		/* Retrieve PRE-RANGE Timeout in Macro periods (MCLKS) */
+		if (Status == VL_ERROR_NONE) {
+
+			/* Retrieve PRE-RANGE VCSEL Period */
+			Status = VL_GetVcselPulsePeriod(Dev,
+					VL_VCSEL_PERIOD_PRE_RANGE,
+					&CurrentVCSELPulsePeriodPClk);
+
+			if (Status == VL_ERROR_NONE) {
+				Status = VL_RdWord(Dev,
+				VL_REG_PRE_RANGE_CONFIG_TIMEOUT_MACROP_HI,
+				&PreRangeEncodedTimeOut);
+			}
+
+			PreRangeTimeOutMClks = VL_decode_timeout(
+					PreRangeEncodedTimeOut);
+
+			TimeoutMicroSeconds = VL_calc_timeout_us(Dev,
+					PreRangeTimeOutMClks,
+					CurrentVCSELPulsePeriodPClk);
+		}
+	} else if (SequenceStepId == VL_SEQUENCESTEP_FINAL_RANGE) {
+
+		VL_GetSequenceStepEnables(Dev, &SchedulerSequenceSteps);
+		PreRangeTimeOutMClks = 0;
+
+		if (SchedulerSequenceSteps.PreRangeOn) {
+			/* Retrieve PRE-RANGE VCSEL Period */
+			Status = VL_GetVcselPulsePeriod(Dev,
+				VL_VCSEL_PERIOD_PRE_RANGE,
+				&CurrentVCSELPulsePeriodPClk);
+
+			/* Retrieve PRE-RANGE Timeout in Macro periods */
+			/* (MCLKS) */
+			if (Status == VL_ERROR_NONE) {
+				Status = VL_RdWord(Dev,
+				VL_REG_PRE_RANGE_CONFIG_TIMEOUT_MACROP_HI,
+				&PreRangeEncodedTimeOut);
+				PreRangeTimeOutMClks = VL_decode_timeout(
+						PreRangeEncodedTimeOut);
+			}
+		}
+
+		if (Status == VL_ERROR_NONE) {
+			/* Retrieve FINAL-RANGE VCSEL Period */
+			Status = VL_GetVcselPulsePeriod(Dev,
+					VL_VCSEL_PERIOD_FINAL_RANGE,
+					&CurrentVCSELPulsePeriodPClk);
+		}
+
+		/* Retrieve FINAL-RANGE Timeout in Macro periods (MCLKS) */
+		if (Status == VL_ERROR_NONE) {
+			Status = VL_RdWord(Dev,
+			    VL_REG_FINAL_RANGE_CONFIG_TIMEOUT_MACROP_HI,
+				&FinalRangeEncodedTimeOut);
+			FinalRangeTimeOutMClks = VL_decode_timeout(
+					FinalRangeEncodedTimeOut);
+		}
+
+		FinalRangeTimeOutMClks -= PreRangeTimeOutMClks;
+		TimeoutMicroSeconds = VL_calc_timeout_us(Dev,
+						FinalRangeTimeOutMClks,
+						CurrentVCSELPulsePeriodPClk);
+	}
+
+	*pTimeOutMicroSecs = TimeoutMicroSeconds;
+
+	return Status;
+}
+
+
+int8_t set_sequence_step_timeout(struct vl_data *Dev,
+					uint8_t SequenceStepId,
+					uint32_t TimeOutMicroSecs)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t CurrentVCSELPulsePeriodPClk;
+	uint8_t MsrcEncodedTimeOut;
+	uint16_t PreRangeEncodedTimeOut;
+	uint16_t PreRangeTimeOutMClks;
+	uint16_t MsrcRangeTimeOutMClks;
+	uint32_t FinalRangeTimeOutMClks;
+	uint16_t FinalRangeEncodedTimeOut;
+	struct VL_SchedulerSequenceSteps_t SchedulerSequenceSteps;
+
+	if ((SequenceStepId == VL_SEQUENCESTEP_TCC)	 ||
+		(SequenceStepId == VL_SEQUENCESTEP_DSS)	 ||
+		(SequenceStepId == VL_SEQUENCESTEP_MSRC)) {
+
+		Status = VL_GetVcselPulsePeriod(Dev,
+					VL_VCSEL_PERIOD_PRE_RANGE,
+					&CurrentVCSELPulsePeriodPClk);
+
+		if (Status == VL_ERROR_NONE) {
+			MsrcRangeTimeOutMClks = VL_calc_timeout_mclks(Dev,
+					TimeOutMicroSecs,
+					(uint8_t)CurrentVCSELPulsePeriodPClk);
+
+			if (MsrcRangeTimeOutMClks > 256)
+				MsrcEncodedTimeOut = 255;
+			else
+				MsrcEncodedTimeOut =
+					(uint8_t)MsrcRangeTimeOutMClks - 1;
+
+			VL_SETDEVICESPECIFICPARAMETER(Dev,
+				LastEncodedTimeout,
+				MsrcEncodedTimeOut);
+		}
+
+		if (Status == VL_ERROR_NONE) {
+			Status = VL_WrByte(Dev,
+				VL_REG_MSRC_CONFIG_TIMEOUT_MACROP,
+				MsrcEncodedTimeOut);
+		}
+	} else {
+
+		if (SequenceStepId == VL_SEQUENCESTEP_PRE_RANGE) {
+
+			if (Status == VL_ERROR_NONE) {
+				Status = VL_GetVcselPulsePeriod(Dev,
+						VL_VCSEL_PERIOD_PRE_RANGE,
+						&CurrentVCSELPulsePeriodPClk);
+				PreRangeTimeOutMClks =
+					VL_calc_timeout_mclks(Dev,
+					TimeOutMicroSecs,
+					(uint8_t)CurrentVCSELPulsePeriodPClk);
+				PreRangeEncodedTimeOut = VL_encode_timeout(
+					PreRangeTimeOutMClks);
+
+				VL_SETDEVICESPECIFICPARAMETER(Dev,
+					LastEncodedTimeout,
+					PreRangeEncodedTimeOut);
+			}
+
+			if (Status == VL_ERROR_NONE) {
+				Status = VL_WrWord(Dev,
+				VL_REG_PRE_RANGE_CONFIG_TIMEOUT_MACROP_HI,
+				PreRangeEncodedTimeOut);
+			}
+
+			if (Status == VL_ERROR_NONE) {
+				VL_SETDEVICESPECIFICPARAMETER(
+					Dev,
+					PreRangeTimeoutMicroSecs,
+					TimeOutMicroSecs);
+			}
+		} else if (SequenceStepId == VL_SEQUENCESTEP_FINAL_RANGE) {
+
+			/* For the final range timeout, the pre-range timeout
+			 * must be added. To do this both final and pre-range
+			 * timeouts must be expressed in macro periods MClks
+			 * because they have different vcsel periods.
+			 */
+
+			VL_GetSequenceStepEnables(Dev,
+					&SchedulerSequenceSteps);
+			PreRangeTimeOutMClks = 0;
+			if (SchedulerSequenceSteps.PreRangeOn) {
+
+				/* Retrieve PRE-RANGE VCSEL Period */
+				Status = VL_GetVcselPulsePeriod(Dev,
+					VL_VCSEL_PERIOD_PRE_RANGE,
+					&CurrentVCSELPulsePeriodPClk);
+
+				/* Retrieve PRE-RANGE Timeout in Macro */
+				/* periods (MCLKS) */
+				if (Status == VL_ERROR_NONE) {
+					Status = VL_RdWord(Dev, 0x51,
+						&PreRangeEncodedTimeOut);
+					PreRangeTimeOutMClks =
+						VL_decode_timeout(
+							PreRangeEncodedTimeOut);
+				}
+			}
+
+			/* Calculate FINAL RANGE Timeout in Macro Periods */
+			/* (MCLKS) and add PRE-RANGE value */
+			if (Status == VL_ERROR_NONE) {
+
+				Status = VL_GetVcselPulsePeriod(Dev,
+					VL_VCSEL_PERIOD_FINAL_RANGE,
+					&CurrentVCSELPulsePeriodPClk);
+			}
+			if (Status == VL_ERROR_NONE) {
+
+				FinalRangeTimeOutMClks =
+					VL_calc_timeout_mclks(Dev,
+					TimeOutMicroSecs,
+					(uint8_t) CurrentVCSELPulsePeriodPClk);
+
+				FinalRangeTimeOutMClks += PreRangeTimeOutMClks;
+
+				FinalRangeEncodedTimeOut =
+				VL_encode_timeout(FinalRangeTimeOutMClks);
+
+				if (Status == VL_ERROR_NONE) {
+					Status = VL_WrWord(Dev, 0x71,
+					FinalRangeEncodedTimeOut);
+				}
+
+				if (Status == VL_ERROR_NONE) {
+					VL_SETDEVICESPECIFICPARAMETER(
+						Dev,
+						FinalRangeTimeoutMicroSecs,
+						TimeOutMicroSecs);
+				}
+			}
+		} else
+			Status = VL_ERROR_INVALID_PARAMS;
+
+	}
+	return Status;
+}
+
+int8_t VL_set_vcsel_pulse_period(struct vl_data *Dev,
+	uint8_t VcselPeriodType, uint8_t VCSELPulsePeriodPCLK)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t vcsel_period_reg;
+	uint8_t MinPreVcselPeriodPCLK = 12;
+	uint8_t MaxPreVcselPeriodPCLK = 18;
+	uint8_t MinFinalVcselPeriodPCLK = 8;
+	uint8_t MaxFinalVcselPeriodPCLK = 14;
+	uint32_t MeasurementTimingBudgetMicroSeconds;
+	uint32_t FinalRangeTimeoutMicroSeconds;
+	uint32_t PreRangeTimeoutMicroSeconds;
+	uint32_t MsrcTimeoutMicroSeconds;
+	uint8_t PhaseCalInt = 0;
+
+	/* Check if valid clock period requested */
+
+	if ((VCSELPulsePeriodPCLK % 2) != 0) {
+		/* Value must be an even number */
+		Status = VL_ERROR_INVALID_PARAMS;
+	} else if (VcselPeriodType == VL_VCSEL_PERIOD_PRE_RANGE &&
+		(VCSELPulsePeriodPCLK < MinPreVcselPeriodPCLK ||
+		VCSELPulsePeriodPCLK > MaxPreVcselPeriodPCLK)) {
+		Status = VL_ERROR_INVALID_PARAMS;
+	} else if (VcselPeriodType == VL_VCSEL_PERIOD_FINAL_RANGE &&
+		(VCSELPulsePeriodPCLK < MinFinalVcselPeriodPCLK ||
+		 VCSELPulsePeriodPCLK > MaxFinalVcselPeriodPCLK)) {
+
+		Status = VL_ERROR_INVALID_PARAMS;
+	}
+
+	/* Apply specific settings for the requested clock period */
+
+	if (Status != VL_ERROR_NONE)
+		return Status;
+
+
+	if (VcselPeriodType == VL_VCSEL_PERIOD_PRE_RANGE) {
+
+		/* Set phase check limits */
+		if (VCSELPulsePeriodPCLK == 12) {
+
+			Status = VL_WrByte(Dev,
+				VL_REG_PRE_RANGE_CONFIG_VALID_PHASE_HIGH,
+				0x18);
+			Status = VL_WrByte(Dev,
+				VL_REG_PRE_RANGE_CONFIG_VALID_PHASE_LOW,
+				0x08);
+		} else if (VCSELPulsePeriodPCLK == 14) {
+
+			Status = VL_WrByte(Dev,
+				VL_REG_PRE_RANGE_CONFIG_VALID_PHASE_HIGH,
+				0x30);
+			Status = VL_WrByte(Dev,
+				VL_REG_PRE_RANGE_CONFIG_VALID_PHASE_LOW,
+				0x08);
+		} else if (VCSELPulsePeriodPCLK == 16) {
+
+			Status = VL_WrByte(Dev,
+				VL_REG_PRE_RANGE_CONFIG_VALID_PHASE_HIGH,
+				0x40);
+			Status = VL_WrByte(Dev,
+				VL_REG_PRE_RANGE_CONFIG_VALID_PHASE_LOW,
+				0x08);
+		} else if (VCSELPulsePeriodPCLK == 18) {
+
+			Status = VL_WrByte(Dev,
+				VL_REG_PRE_RANGE_CONFIG_VALID_PHASE_HIGH,
+				0x50);
+			Status = VL_WrByte(Dev,
+				VL_REG_PRE_RANGE_CONFIG_VALID_PHASE_LOW,
+				0x08);
+		}
+	} else if (VcselPeriodType == VL_VCSEL_PERIOD_FINAL_RANGE) {
+
+		if (VCSELPulsePeriodPCLK == 8) {
+
+			Status = VL_WrByte(Dev,
+				VL_REG_FINAL_RANGE_CONFIG_VALID_PHASE_HIGH,
+				0x10);
+			Status = VL_WrByte(Dev,
+				VL_REG_FINAL_RANGE_CONFIG_VALID_PHASE_LOW,
+				0x08);
+
+			Status |= VL_WrByte(Dev,
+				VL_REG_GLOBAL_CONFIG_VCSEL_WIDTH, 0x02);
+			Status |= VL_WrByte(Dev,
+				VL_REG_ALGO_PHASECAL_CONFIG_TIMEOUT, 0x0C);
+
+			Status |= VL_WrByte(Dev, 0xff, 0x01);
+			Status |= VL_WrByte(Dev,
+				VL_REG_ALGO_PHASECAL_LIM,
+				0x30);
+			Status |= VL_WrByte(Dev, 0xff, 0x00);
+		} else if (VCSELPulsePeriodPCLK == 10) {
+
+			Status = VL_WrByte(Dev,
+				VL_REG_FINAL_RANGE_CONFIG_VALID_PHASE_HIGH,
+				0x28);
+			Status = VL_WrByte(Dev,
+				VL_REG_FINAL_RANGE_CONFIG_VALID_PHASE_LOW,
+				0x08);
+
+			Status |= VL_WrByte(Dev,
+				VL_REG_GLOBAL_CONFIG_VCSEL_WIDTH, 0x03);
+			Status |= VL_WrByte(Dev,
+				VL_REG_ALGO_PHASECAL_CONFIG_TIMEOUT, 0x09);
+
+			Status |= VL_WrByte(Dev, 0xff, 0x01);
+			Status |= VL_WrByte(Dev,
+				VL_REG_ALGO_PHASECAL_LIM,
+				0x20);
+			Status |= VL_WrByte(Dev, 0xff, 0x00);
+		} else if (VCSELPulsePeriodPCLK == 12) {
+
+			Status = VL_WrByte(Dev,
+				VL_REG_FINAL_RANGE_CONFIG_VALID_PHASE_HIGH,
+				0x38);
+			Status = VL_WrByte(Dev,
+				VL_REG_FINAL_RANGE_CONFIG_VALID_PHASE_LOW,
+				0x08);
+
+			Status |= VL_WrByte(Dev,
+				VL_REG_GLOBAL_CONFIG_VCSEL_WIDTH, 0x03);
+			Status |= VL_WrByte(Dev,
+				VL_REG_ALGO_PHASECAL_CONFIG_TIMEOUT, 0x08);
+
+			Status |= VL_WrByte(Dev, 0xff, 0x01);
+			Status |= VL_WrByte(Dev,
+				VL_REG_ALGO_PHASECAL_LIM,
+				0x20);
+			Status |= VL_WrByte(Dev, 0xff, 0x00);
+		} else if (VCSELPulsePeriodPCLK == 14) {
+
+			Status = VL_WrByte(Dev,
+				VL_REG_FINAL_RANGE_CONFIG_VALID_PHASE_HIGH,
+				0x048);
+			Status = VL_WrByte(Dev,
+				VL_REG_FINAL_RANGE_CONFIG_VALID_PHASE_LOW,
+				0x08);
+
+			Status |= VL_WrByte(Dev,
+				VL_REG_GLOBAL_CONFIG_VCSEL_WIDTH, 0x03);
+			Status |= VL_WrByte(Dev,
+				VL_REG_ALGO_PHASECAL_CONFIG_TIMEOUT, 0x07);
+
+			Status |= VL_WrByte(Dev, 0xff, 0x01);
+			Status |= VL_WrByte(Dev,
+				VL_REG_ALGO_PHASECAL_LIM,
+				0x20);
+			Status |= VL_WrByte(Dev, 0xff, 0x00);
+		}
+	}
+
+
+	/* Re-calculate and apply timeouts, in macro periods */
+
+	if (Status == VL_ERROR_NONE) {
+		vcsel_period_reg = VL_encode_vcsel_period((uint8_t)
+			VCSELPulsePeriodPCLK);
+
+	/* When the VCSEL period for the pre or final range is changed, */
+	/* the corresponding timeout must be read from the device using */
+	/* the current VCSEL period, then the new VCSEL period can be */
+	/* applied. The timeout then must be written back to the device */
+	/* using the new VCSEL period. */
+	/* For the MSRC timeout, the same applies - this timeout being */
+	/* dependent on the pre-range vcsel period. */
+		switch (VcselPeriodType) {
+		case VL_VCSEL_PERIOD_PRE_RANGE:
+			Status = get_sequence_step_timeout(Dev,
+				VL_SEQUENCESTEP_PRE_RANGE,
+				&PreRangeTimeoutMicroSeconds);
+
+			if (Status == VL_ERROR_NONE)
+				Status = get_sequence_step_timeout(Dev,
+					VL_SEQUENCESTEP_MSRC,
+					&MsrcTimeoutMicroSeconds);
+
+			if (Status == VL_ERROR_NONE)
+				Status = VL_WrByte(Dev,
+				VL_REG_PRE_RANGE_CONFIG_VCSEL_PERIOD,
+					vcsel_period_reg);
+
+
+			if (Status == VL_ERROR_NONE)
+				Status = set_sequence_step_timeout(Dev,
+					VL_SEQUENCESTEP_PRE_RANGE,
+					PreRangeTimeoutMicroSeconds);
+
+
+			if (Status == VL_ERROR_NONE)
+				Status = set_sequence_step_timeout(Dev,
+					VL_SEQUENCESTEP_MSRC,
+					MsrcTimeoutMicroSeconds);
+
+			VL_SETDEVICESPECIFICPARAMETER(
+				Dev,
+				PreRangeVcselPulsePeriod,
+				VCSELPulsePeriodPCLK);
+			break;
+		case VL_VCSEL_PERIOD_FINAL_RANGE:
+			Status = get_sequence_step_timeout(Dev,
+				VL_SEQUENCESTEP_FINAL_RANGE,
+				&FinalRangeTimeoutMicroSeconds);
+
+			if (Status == VL_ERROR_NONE)
+				Status = VL_WrByte(Dev,
+				VL_REG_FINAL_RANGE_CONFIG_VCSEL_PERIOD,
+					vcsel_period_reg);
+
+
+			if (Status == VL_ERROR_NONE)
+				Status = set_sequence_step_timeout(Dev,
+					VL_SEQUENCESTEP_FINAL_RANGE,
+					FinalRangeTimeoutMicroSeconds);
+
+			VL_SETDEVICESPECIFICPARAMETER(
+				Dev,
+				FinalRangeVcselPulsePeriod,
+				VCSELPulsePeriodPCLK);
+			break;
+		default:
+			Status = VL_ERROR_INVALID_PARAMS;
+		}
+	}
+
+	/* Finally, the timing budget must be re-applied */
+	if (Status == VL_ERROR_NONE) {
+		VL_GETPARAMETERFIELD(Dev,
+			MeasurementTimingBudgetMicroSeconds,
+			MeasurementTimingBudgetMicroSeconds);
+
+		Status = VL_SetMeasurementTimingBudgetMicroSeconds(Dev,
+				MeasurementTimingBudgetMicroSeconds);
+	}
+
+	/* Perform the phase calibration. This is needed after changing on */
+	/* vcsel period. */
+	/* get_data_enable = 0, restore_config = 1 */
+	if (Status == VL_ERROR_NONE)
+		Status = VL_perform_phase_calibration(
+			Dev, &PhaseCalInt, 0, 1);
+
+	return Status;
+}
+
+int8_t VL_get_vcsel_pulse_period(struct vl_data *Dev,
+	uint8_t VcselPeriodType, uint8_t *pVCSELPulsePeriodPCLK)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t vcsel_period_reg;
+
+	switch (VcselPeriodType) {
+	case VL_VCSEL_PERIOD_PRE_RANGE:
+		Status = VL_RdByte(Dev,
+			VL_REG_PRE_RANGE_CONFIG_VCSEL_PERIOD,
+			&vcsel_period_reg);
+	break;
+	case VL_VCSEL_PERIOD_FINAL_RANGE:
+		Status = VL_RdByte(Dev,
+			VL_REG_FINAL_RANGE_CONFIG_VCSEL_PERIOD,
+			&vcsel_period_reg);
+	break;
+	default:
+		Status = VL_ERROR_INVALID_PARAMS;
+	}
+
+	if (Status == VL_ERROR_NONE)
+		*pVCSELPulsePeriodPCLK =
+			VL_decode_vcsel_period(vcsel_period_reg);
+
+	return Status;
+}
+
+
+
+int8_t VL_set_measurement_timing_budget_micro_seconds(
+	struct vl_data *Dev, uint32_t MeasurementTimingBudgetMicroSeconds)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint32_t FinalRangeTimingBudgetMicroSeconds;
+	struct VL_SchedulerSequenceSteps_t SchedulerSequenceSteps;
+	uint32_t MsrcDccTccTimeoutMicroSeconds	= 2000;
+	uint32_t StartOverheadMicroSeconds		= 1910;
+	uint32_t EndOverheadMicroSeconds		= 960;
+	uint32_t MsrcOverheadMicroSeconds		= 660;
+	uint32_t TccOverheadMicroSeconds		= 590;
+	uint32_t DssOverheadMicroSeconds		= 690;
+	uint32_t PreRangeOverheadMicroSeconds	= 660;
+	uint32_t FinalRangeOverheadMicroSeconds = 550;
+	uint32_t PreRangeTimeoutMicroSeconds	= 0;
+	uint32_t cMinTimingBudgetMicroSeconds	= 20000;
+	uint32_t SubTimeout = 0;
+
+	LOG_FUNCTION_START("");
+
+	if (MeasurementTimingBudgetMicroSeconds
+			< cMinTimingBudgetMicroSeconds) {
+		Status = VL_ERROR_INVALID_PARAMS;
+		return Status;
+	}
+
+	FinalRangeTimingBudgetMicroSeconds =
+		MeasurementTimingBudgetMicroSeconds -
+		(StartOverheadMicroSeconds + EndOverheadMicroSeconds);
+
+	Status = VL_GetSequenceStepEnables(Dev, &SchedulerSequenceSteps);
+
+	if (Status == VL_ERROR_NONE &&
+		(SchedulerSequenceSteps.TccOn  ||
+		SchedulerSequenceSteps.MsrcOn ||
+		SchedulerSequenceSteps.DssOn)) {
+
+		/* TCC, MSRC and DSS all share the same timeout */
+		Status = get_sequence_step_timeout(Dev,
+					VL_SEQUENCESTEP_MSRC,
+					&MsrcDccTccTimeoutMicroSeconds);
+
+		/* Subtract the TCC, MSRC and DSS timeouts if they are */
+		/* enabled. */
+
+		if (Status != VL_ERROR_NONE)
+			return Status;
+
+		/* TCC */
+		if (SchedulerSequenceSteps.TccOn) {
+
+			SubTimeout = MsrcDccTccTimeoutMicroSeconds
+				+ TccOverheadMicroSeconds;
+
+			if (SubTimeout <
+				FinalRangeTimingBudgetMicroSeconds) {
+				FinalRangeTimingBudgetMicroSeconds -=
+							SubTimeout;
+			} else {
+				/* Requested timeout too big. */
+				Status = VL_ERROR_INVALID_PARAMS;
+			}
+		}
+
+		if (Status != VL_ERROR_NONE) {
+			LOG_FUNCTION_END(Status);
+			return Status;
+		}
+
+		/* DSS */
+		if (SchedulerSequenceSteps.DssOn) {
+
+			SubTimeout = 2 * (MsrcDccTccTimeoutMicroSeconds +
+				DssOverheadMicroSeconds);
+
+			if (SubTimeout < FinalRangeTimingBudgetMicroSeconds) {
+				FinalRangeTimingBudgetMicroSeconds
+							-= SubTimeout;
+			} else {
+				/* Requested timeout too big. */
+				Status = VL_ERROR_INVALID_PARAMS;
+			}
+		} else if (SchedulerSequenceSteps.MsrcOn) {
+			/* MSRC */
+			SubTimeout = MsrcDccTccTimeoutMicroSeconds +
+						MsrcOverheadMicroSeconds;
+
+			if (SubTimeout < FinalRangeTimingBudgetMicroSeconds) {
+				FinalRangeTimingBudgetMicroSeconds
+							-= SubTimeout;
+			} else {
+				/* Requested timeout too big. */
+				Status = VL_ERROR_INVALID_PARAMS;
+			}
+		}
+
+	}
+
+	if (Status != VL_ERROR_NONE) {
+		LOG_FUNCTION_END(Status);
+		return Status;
+	}
+
+	if (SchedulerSequenceSteps.PreRangeOn) {
+
+		/* Subtract the Pre-range timeout if enabled. */
+
+		Status = get_sequence_step_timeout(Dev,
+				VL_SEQUENCESTEP_PRE_RANGE,
+				&PreRangeTimeoutMicroSeconds);
+
+		SubTimeout = PreRangeTimeoutMicroSeconds +
+				PreRangeOverheadMicroSeconds;
+
+		if (SubTimeout < FinalRangeTimingBudgetMicroSeconds) {
+			FinalRangeTimingBudgetMicroSeconds -= SubTimeout;
+		} else {
+			/* Requested timeout too big. */
+			Status = VL_ERROR_INVALID_PARAMS;
+		}
+	}
+
+
+	if (Status == VL_ERROR_NONE &&
+		SchedulerSequenceSteps.FinalRangeOn) {
+
+		FinalRangeTimingBudgetMicroSeconds -=
+				FinalRangeOverheadMicroSeconds;
+
+		/* Final Range Timeout
+		 * Note that the final range timeout is determined by the timing
+		 * budget and the sum of all other timeouts within the sequence.
+		 * If there is no room for the final range timeout,then an error
+		 * will be set. Otherwise the remaining time will be applied to
+		 * the final range.
+		 */
+		Status = set_sequence_step_timeout(Dev,
+			VL_SEQUENCESTEP_FINAL_RANGE,
+			FinalRangeTimingBudgetMicroSeconds);
+
+		VL_SETPARAMETERFIELD(Dev,
+			MeasurementTimingBudgetMicroSeconds,
+			MeasurementTimingBudgetMicroSeconds);
+	}
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+int8_t VL_get_measurement_timing_budget_micro_seconds(
+	struct vl_data *Dev, uint32_t *pMeasurementTimingBudgetMicroSeconds)
+{
+	int8_t Status = VL_ERROR_NONE;
+	struct VL_SchedulerSequenceSteps_t SchedulerSequenceSteps;
+	uint32_t FinalRangeTimeoutMicroSeconds;
+	uint32_t MsrcDccTccTimeoutMicroSeconds	= 2000;
+	uint32_t StartOverheadMicroSeconds		= 1910;
+	uint32_t EndOverheadMicroSeconds		= 960;
+	uint32_t MsrcOverheadMicroSeconds		= 660;
+	uint32_t TccOverheadMicroSeconds		= 590;
+	uint32_t DssOverheadMicroSeconds		= 690;
+	uint32_t PreRangeOverheadMicroSeconds	= 660;
+	uint32_t FinalRangeOverheadMicroSeconds = 550;
+	uint32_t PreRangeTimeoutMicroSeconds	= 0;
+
+	LOG_FUNCTION_START("");
+
+	/* Start and end overhead times always present */
+	*pMeasurementTimingBudgetMicroSeconds
+		= StartOverheadMicroSeconds + EndOverheadMicroSeconds;
+
+	Status = VL_GetSequenceStepEnables(Dev, &SchedulerSequenceSteps);
+
+	if (Status != VL_ERROR_NONE) {
+		LOG_FUNCTION_END(Status);
+		return Status;
+	}
+
+
+	if (SchedulerSequenceSteps.TccOn  ||
+		SchedulerSequenceSteps.MsrcOn ||
+		SchedulerSequenceSteps.DssOn) {
+
+		Status = get_sequence_step_timeout(Dev,
+				VL_SEQUENCESTEP_MSRC,
+				&MsrcDccTccTimeoutMicroSeconds);
+
+		if (Status == VL_ERROR_NONE) {
+			if (SchedulerSequenceSteps.TccOn) {
+				*pMeasurementTimingBudgetMicroSeconds +=
+					MsrcDccTccTimeoutMicroSeconds +
+					TccOverheadMicroSeconds;
+			}
+
+			if (SchedulerSequenceSteps.DssOn) {
+				*pMeasurementTimingBudgetMicroSeconds +=
+				2 * (MsrcDccTccTimeoutMicroSeconds +
+					DssOverheadMicroSeconds);
+			} else if (SchedulerSequenceSteps.MsrcOn) {
+				*pMeasurementTimingBudgetMicroSeconds +=
+					MsrcDccTccTimeoutMicroSeconds +
+					MsrcOverheadMicroSeconds;
+			}
+		}
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		if (SchedulerSequenceSteps.PreRangeOn) {
+			Status = get_sequence_step_timeout(Dev,
+				VL_SEQUENCESTEP_PRE_RANGE,
+				&PreRangeTimeoutMicroSeconds);
+			*pMeasurementTimingBudgetMicroSeconds +=
+				PreRangeTimeoutMicroSeconds +
+				PreRangeOverheadMicroSeconds;
+		}
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		if (SchedulerSequenceSteps.FinalRangeOn) {
+			Status = get_sequence_step_timeout(Dev,
+					VL_SEQUENCESTEP_FINAL_RANGE,
+					&FinalRangeTimeoutMicroSeconds);
+			*pMeasurementTimingBudgetMicroSeconds +=
+				(FinalRangeTimeoutMicroSeconds +
+				FinalRangeOverheadMicroSeconds);
+		}
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		VL_SETPARAMETERFIELD(Dev,
+			MeasurementTimingBudgetMicroSeconds,
+			*pMeasurementTimingBudgetMicroSeconds);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+
+
+int8_t VL_load_tuning_settings(struct vl_data *Dev,
+		uint8_t *pTuningSettingBuffer)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int i;
+	int Index;
+	uint8_t msb;
+	uint8_t lsb;
+	uint8_t SelectParam;
+	uint8_t NumberOfWrites;
+	uint8_t Address;
+	uint8_t localBuffer[4]; /* max */
+	uint16_t Temp16;
+
+	LOG_FUNCTION_START("");
+
+	Index = 0;
+
+	while ((*(pTuningSettingBuffer + Index) != 0) &&
+			(Status == VL_ERROR_NONE)) {
+		NumberOfWrites = *(pTuningSettingBuffer + Index);
+		Index++;
+		if (NumberOfWrites == 0xFF) {
+			/* internal parameters */
+			SelectParam = *(pTuningSettingBuffer + Index);
+			Index++;
+			switch (SelectParam) {
+			case 0: /* uint16_t SigmaEstRefArray -> 2 bytes */
+				msb = *(pTuningSettingBuffer + Index);
+				Index++;
+				lsb = *(pTuningSettingBuffer + Index);
+				Index++;
+				Temp16 = VL_MAKEUINT16(lsb, msb);
+				PALDevDataSet(Dev, SigmaEstRefArray, Temp16);
+				break;
+			case 1: /* uint16_t SigmaEstEffPulseWidth -> 2 bytes */
+				msb = *(pTuningSettingBuffer + Index);
+				Index++;
+				lsb = *(pTuningSettingBuffer + Index);
+				Index++;
+				Temp16 = VL_MAKEUINT16(lsb, msb);
+				PALDevDataSet(Dev, SigmaEstEffPulseWidth,
+					Temp16);
+				break;
+			case 2: /* uint16_t SigmaEstEffAmbWidth -> 2 bytes */
+				msb = *(pTuningSettingBuffer + Index);
+				Index++;
+				lsb = *(pTuningSettingBuffer + Index);
+				Index++;
+				Temp16 = VL_MAKEUINT16(lsb, msb);
+				PALDevDataSet(Dev, SigmaEstEffAmbWidth, Temp16);
+				break;
+			case 3: /* uint16_t targetRefRate -> 2 bytes */
+				msb = *(pTuningSettingBuffer + Index);
+				Index++;
+				lsb = *(pTuningSettingBuffer + Index);
+				Index++;
+				Temp16 = VL_MAKEUINT16(lsb, msb);
+				PALDevDataSet(Dev, targetRefRate, Temp16);
+				break;
+			default: /* invalid parameter */
+				Status = VL_ERROR_INVALID_PARAMS;
+			}
+
+		} else if (NumberOfWrites <= 4) {
+			Address = *(pTuningSettingBuffer + Index);
+			Index++;
+
+			for (i = 0; i < NumberOfWrites; i++) {
+				localBuffer[i] = *(pTuningSettingBuffer +
+							Index);
+				Index++;
+			}
+
+			Status = VL_WriteMulti(Dev, Address, localBuffer,
+					NumberOfWrites);
+
+		} else {
+			Status = VL_ERROR_INVALID_PARAMS;
+		}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_get_total_xtalk_rate(struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData,
+	unsigned int *ptotal_xtalk_rate_mcps)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	uint8_t xtalkCompEnable;
+	unsigned int totalXtalkMegaCps;
+	unsigned int xtalkPerSpadMegaCps;
+
+	*ptotal_xtalk_rate_mcps = 0;
+
+	Status = VL_GetXTalkCompensationEnable(Dev, &xtalkCompEnable);
+	if (Status == VL_ERROR_NONE) {
+
+		if (xtalkCompEnable) {
+
+			VL_GETPARAMETERFIELD(
+				Dev,
+				XTalkCompensationRateMegaCps,
+				xtalkPerSpadMegaCps);
+
+			/* FixPoint1616 * FixPoint 8:8 = FixPoint0824 */
+			totalXtalkMegaCps =
+				pRangingMeasurementData->EffectiveSpadRtnCount *
+				xtalkPerSpadMegaCps;
+
+			/* FixPoint0824 >> 8 = FixPoint1616 */
+			*ptotal_xtalk_rate_mcps =
+				(totalXtalkMegaCps + 0x80) >> 8;
+		}
+	}
+
+	return Status;
+}
+
+int8_t VL_get_total_signal_rate(struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData,
+	unsigned int *ptotal_signal_rate_mcps)
+{
+	int8_t Status = VL_ERROR_NONE;
+	unsigned int totalXtalkMegaCps;
+
+	LOG_FUNCTION_START("");
+
+	*ptotal_signal_rate_mcps =
+		pRangingMeasurementData->SignalRateRtnMegaCps;
+
+	Status = VL_get_total_xtalk_rate(
+		Dev, pRangingMeasurementData, &totalXtalkMegaCps);
+
+	if (Status == VL_ERROR_NONE)
+		*ptotal_signal_rate_mcps += totalXtalkMegaCps;
+
+	return Status;
+}
+
+int8_t VL_calc_dmax(
+	struct vl_data *Dev,
+	unsigned int totalSignalRate_mcps,
+	unsigned int totalCorrSignalRate_mcps,
+	unsigned int pwMult,
+	uint32_t sigmaEstimateP1,
+	unsigned int sigmaEstimateP2,
+	uint32_t peakVcselDuration_us,
+	uint32_t *pdmax_mm)
+{
+	const uint32_t cSigmaLimit		= 18;
+	const unsigned int cSignalLimit	= 0x4000; /* 0.25 */
+	const unsigned int cSigmaEstRef	= 0x00000042; /* 0.001 */
+	const uint32_t cAmbEffWidthSigmaEst_ns = 6;
+	const uint32_t cAmbEffWidthDMax_ns	   = 7;
+	uint32_t dmaxCalRange_mm;
+	unsigned int dmaxCalSignalRateRtn_mcps;
+	unsigned int minSignalNeeded;
+	unsigned int minSignalNeeded_p1;
+	unsigned int minSignalNeeded_p2;
+	unsigned int minSignalNeeded_p3;
+	unsigned int minSignalNeeded_p4;
+	unsigned int sigmaLimitTmp;
+	unsigned int sigmaEstSqTmp;
+	unsigned int signalLimitTmp;
+	unsigned int SignalAt0mm;
+	unsigned int dmaxDark;
+	unsigned int dmaxAmbient;
+	unsigned int dmaxDarkTmp;
+	unsigned int sigmaEstP2Tmp;
+	uint32_t signalRateTemp_mcps;
+
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	dmaxCalRange_mm =
+		PALDevDataGet(Dev, DmaxCalRangeMilliMeter);
+
+	dmaxCalSignalRateRtn_mcps =
+		PALDevDataGet(Dev, DmaxCalSignalRateRtnMegaCps);
+
+	/* uint32 * FixPoint1616 = FixPoint1616 */
+	SignalAt0mm = dmaxCalRange_mm * dmaxCalSignalRateRtn_mcps;
+
+	/* FixPoint1616 >> 8 = FixPoint2408 */
+	SignalAt0mm = (SignalAt0mm + 0x80) >> 8;
+	SignalAt0mm *= dmaxCalRange_mm;
+
+	minSignalNeeded_p1 = 0;
+	if (totalCorrSignalRate_mcps > 0) {
+
+		/* Shift by 10 bits to increase resolution prior to the */
+		/* division */
+		signalRateTemp_mcps = totalSignalRate_mcps << 10;
+
+		/* Add rounding value prior to division */
+		minSignalNeeded_p1 = signalRateTemp_mcps +
+			(totalCorrSignalRate_mcps/2);
+
+		/* FixPoint0626/FixPoint1616 = FixPoint2210 */
+		minSignalNeeded_p1 /= totalCorrSignalRate_mcps;
+
+		/* Apply a factored version of the speed of light. */
+		/* Correction to be applied at the end */
+		minSignalNeeded_p1 *= 3;
+
+		/* FixPoint2210 * FixPoint2210 = FixPoint1220 */
+		minSignalNeeded_p1 *= minSignalNeeded_p1;
+
+		/* FixPoint1220 >> 16 = FixPoint2804 */
+		minSignalNeeded_p1 = (minSignalNeeded_p1 + 0x8000) >> 16;
+	}
+
+	minSignalNeeded_p2 = pwMult * sigmaEstimateP1;
+
+	/* FixPoint1616 >> 16 =	 uint32 */
+	minSignalNeeded_p2 = (minSignalNeeded_p2 + 0x8000) >> 16;
+
+	/* uint32 * uint32	=  uint32 */
+	minSignalNeeded_p2 *= minSignalNeeded_p2;
+
+	/* Check sigmaEstimateP2
+	 * If this value is too high there is not enough signal rate
+	 * to calculate dmax value so set a suitable value to ensure
+	 * a very small dmax.
+	 */
+	sigmaEstP2Tmp = (sigmaEstimateP2 + 0x8000) >> 16;
+	sigmaEstP2Tmp = (sigmaEstP2Tmp + cAmbEffWidthSigmaEst_ns/2)/
+		cAmbEffWidthSigmaEst_ns;
+	sigmaEstP2Tmp *= cAmbEffWidthDMax_ns;
+
+	if (sigmaEstP2Tmp > 0xffff) {
+		minSignalNeeded_p3 = 0xfff00000;
+	} else {
+
+		/* DMAX uses a different ambient width from sigma, so apply
+		 * correction.
+		 * Perform division before multiplication to prevent overflow.
+		 */
+		sigmaEstimateP2 = (sigmaEstimateP2 + cAmbEffWidthSigmaEst_ns/2)/
+			cAmbEffWidthSigmaEst_ns;
+		sigmaEstimateP2 *= cAmbEffWidthDMax_ns;
+
+		/* FixPoint1616 >> 16 = uint32 */
+		minSignalNeeded_p3 = (sigmaEstimateP2 + 0x8000) >> 16;
+
+		minSignalNeeded_p3 *= minSignalNeeded_p3;
+
+	}
+
+	/* FixPoint1814 / uint32 = FixPoint1814 */
+	sigmaLimitTmp = ((cSigmaLimit << 14) + 500) / 1000;
+
+	/* FixPoint1814 * FixPoint1814 = FixPoint3628 := FixPoint0428 */
+	sigmaLimitTmp *= sigmaLimitTmp;
+
+	/* FixPoint1616 * FixPoint1616 = FixPoint3232 */
+	sigmaEstSqTmp = cSigmaEstRef * cSigmaEstRef;
+
+	/* FixPoint3232 >> 4 = FixPoint0428 */
+	sigmaEstSqTmp = (sigmaEstSqTmp + 0x08) >> 4;
+
+	/* FixPoint0428 - FixPoint0428	= FixPoint0428 */
+	sigmaLimitTmp -=  sigmaEstSqTmp;
+
+	/* uint32_t * FixPoint0428 = FixPoint0428 */
+	minSignalNeeded_p4 = 4 * 12 * sigmaLimitTmp;
+
+	/* FixPoint0428 >> 14 = FixPoint1814 */
+	minSignalNeeded_p4 = (minSignalNeeded_p4 + 0x2000) >> 14;
+
+	/* uint32 + uint32 = uint32 */
+	minSignalNeeded = (minSignalNeeded_p2 + minSignalNeeded_p3);
+
+	/* uint32 / uint32 = uint32 */
+	minSignalNeeded += (peakVcselDuration_us/2);
+	minSignalNeeded /= peakVcselDuration_us;
+
+	/* uint32 << 14 = FixPoint1814 */
+	minSignalNeeded <<= 14;
+
+	/* FixPoint1814 / FixPoint1814 = uint32 */
+	minSignalNeeded += (minSignalNeeded_p4/2);
+	minSignalNeeded /= minSignalNeeded_p4;
+
+	/* FixPoint3200 * FixPoint2804 := FixPoint2804*/
+	minSignalNeeded *= minSignalNeeded_p1;
+
+	/* Apply correction by dividing by 1000000.
+	 * This assumes 10E16 on the numerator of the equation
+	 * and 10E-22 on the denominator.
+	 * We do this because 32bit fix point calculation can't
+	 * handle the larger and smaller elements of this equation,
+	 * i.e. speed of light and pulse widths.
+	 */
+	minSignalNeeded = (minSignalNeeded + 500) / 1000;
+	minSignalNeeded <<= 4;
+
+	minSignalNeeded = (minSignalNeeded + 500) / 1000;
+
+	/* FixPoint1616 >> 8 = FixPoint2408 */
+	signalLimitTmp = (cSignalLimit + 0x80) >> 8;
+
+	/* FixPoint2408/FixPoint2408 = uint32 */
+	if (signalLimitTmp != 0)
+		dmaxDarkTmp = (SignalAt0mm + (signalLimitTmp / 2))
+			/ signalLimitTmp;
+	else
+		dmaxDarkTmp = 0;
+
+	dmaxDark = VL_isqrt(dmaxDarkTmp);
+
+	/* FixPoint2408/FixPoint2408 = uint32 */
+	if (minSignalNeeded != 0)
+		dmaxAmbient = (SignalAt0mm + minSignalNeeded/2)
+			/ minSignalNeeded;
+	else
+		dmaxAmbient = 0;
+
+	dmaxAmbient = VL_isqrt(dmaxAmbient);
+
+	*pdmax_mm = dmaxDark;
+	if (dmaxDark > dmaxAmbient)
+		*pdmax_mm = dmaxAmbient;
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+
+int8_t VL_calc_sigma_estimate(struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData,
+	unsigned int *pSigmaEstimate,
+	uint32_t *pDmax_mm)
+{
+	/* Expressed in 100ths of a ns, i.e. centi-ns */
+	const uint32_t cPulseEffectiveWidth_centi_ns   = 800;
+	/* Expressed in 100ths of a ns, i.e. centi-ns */
+	const uint32_t cAmbientEffectiveWidth_centi_ns = 600;
+	/* 25ms */
+	const unsigned int cDfltFinalRangeIntegrationTimeMilliSecs =
+		0x00190000;
+	const uint32_t cVcselPulseWidth_ps	= 4700; /* pico secs */
+	const unsigned int cSigmaEstMax	= 0x028F87AE;
+	const unsigned int cSigmaEstRtnMax	= 0xF000;
+	const unsigned int cAmbToSignalRatioMax = 0xF0000000/
+		cAmbientEffectiveWidth_centi_ns;
+	/* Time Of Flight per mm (6.6 pico secs) */
+	const unsigned int cTOF_per_mm_ps		= 0x0006999A;
+	const uint32_t c16BitRoundingParam		= 0x00008000;
+	const unsigned int cMaxXTalk_kcps		= 0x00320000;
+	const uint32_t cPllPeriod_ps			= 1655;
+
+	uint32_t vcselTotalEventsRtn;
+	uint32_t finalRangeTimeoutMicroSecs;
+	uint32_t preRangeTimeoutMicroSecs;
+	uint32_t finalRangeIntegrationTimeMilliSecs;
+	unsigned int sigmaEstimateP1;
+	unsigned int sigmaEstimateP2;
+	unsigned int sigmaEstimateP3;
+	unsigned int deltaT_ps;
+	unsigned int pwMult;
+	unsigned int sigmaEstRtn;
+	unsigned int sigmaEstimate;
+	unsigned int xTalkCorrection;
+	unsigned int ambientRate_kcps;
+	unsigned int peakSignalRate_kcps;
+	unsigned int xTalkCompRate_mcps;
+	uint32_t xTalkCompRate_kcps;
+	int8_t Status = VL_ERROR_NONE;
+	unsigned int diff1_mcps;
+	unsigned int diff2_mcps;
+	unsigned int sqr1;
+	unsigned int sqr2;
+	unsigned int sqrSum;
+	unsigned int sqrtResult_centi_ns;
+	unsigned int sqrtResult;
+	unsigned int totalSignalRate_mcps;
+	unsigned int correctedSignalRate_mcps;
+	unsigned int sigmaEstRef;
+	uint32_t vcselWidth;
+	uint32_t finalRangeMacroPCLKS;
+	uint32_t preRangeMacroPCLKS;
+	uint32_t peakVcselDuration_us;
+	uint8_t finalRangeVcselPCLKS;
+	uint8_t preRangeVcselPCLKS;
+	/*! \addtogroup calc_sigma_estimate
+	 * @{
+	 *
+	 * Estimates the range sigma
+	 */
+
+	LOG_FUNCTION_START("");
+
+	VL_GETPARAMETERFIELD(Dev, XTalkCompensationRateMegaCps,
+			xTalkCompRate_mcps);
+
+	/*
+	 * We work in kcps rather than mcps as this helps keep within the
+	 * confines of the 32 Fix1616 type.
+	 */
+
+	ambientRate_kcps =
+		(pRangingMeasurementData->AmbientRateRtnMegaCps * 1000) >> 16;
+
+	correctedSignalRate_mcps =
+		pRangingMeasurementData->SignalRateRtnMegaCps;
+
+
+	Status = VL_get_total_signal_rate(
+		Dev, pRangingMeasurementData, &totalSignalRate_mcps);
+	Status = VL_get_total_xtalk_rate(
+		Dev, pRangingMeasurementData, &xTalkCompRate_mcps);
+
+
+	/* Signal rate measurement provided by device is the
+	 * peak signal rate, not average.
+	 */
+	peakSignalRate_kcps = (totalSignalRate_mcps * 1000);
+	peakSignalRate_kcps = (peakSignalRate_kcps + 0x8000) >> 16;
+
+	xTalkCompRate_kcps = xTalkCompRate_mcps * 1000;
+
+	if (xTalkCompRate_kcps > cMaxXTalk_kcps)
+		xTalkCompRate_kcps = cMaxXTalk_kcps;
+
+	if (Status == VL_ERROR_NONE) {
+
+		/* Calculate final range macro periods */
+		finalRangeTimeoutMicroSecs = VL_GETDEVICESPECIFICPARAMETER(
+			Dev, FinalRangeTimeoutMicroSecs);
+
+		finalRangeVcselPCLKS = VL_GETDEVICESPECIFICPARAMETER(
+			Dev, FinalRangeVcselPulsePeriod);
+
+		finalRangeMacroPCLKS = VL_calc_timeout_mclks(
+			Dev, finalRangeTimeoutMicroSecs, finalRangeVcselPCLKS);
+
+		/* Calculate pre-range macro periods */
+		preRangeTimeoutMicroSecs = VL_GETDEVICESPECIFICPARAMETER(
+			Dev, PreRangeTimeoutMicroSecs);
+
+		preRangeVcselPCLKS = VL_GETDEVICESPECIFICPARAMETER(
+			Dev, PreRangeVcselPulsePeriod);
+
+		preRangeMacroPCLKS = VL_calc_timeout_mclks(
+			Dev, preRangeTimeoutMicroSecs, preRangeVcselPCLKS);
+
+		vcselWidth = 3;
+		if (finalRangeVcselPCLKS == 8)
+			vcselWidth = 2;
+
+
+		peakVcselDuration_us = vcselWidth * 2048 *
+			(preRangeMacroPCLKS + finalRangeMacroPCLKS);
+		peakVcselDuration_us = (peakVcselDuration_us + 500)/1000;
+		peakVcselDuration_us *= cPllPeriod_ps;
+		peakVcselDuration_us = (peakVcselDuration_us + 500)/1000;
+
+		/* Fix1616 >> 8 = Fix2408 */
+		totalSignalRate_mcps = (totalSignalRate_mcps + 0x80) >> 8;
+
+		/* Fix2408 * uint32 = Fix2408 */
+		vcselTotalEventsRtn = totalSignalRate_mcps *
+			peakVcselDuration_us;
+
+		/* Fix2408 >> 8 = uint32 */
+		vcselTotalEventsRtn = (vcselTotalEventsRtn + 0x80) >> 8;
+
+		/* Fix2408 << 8 = Fix1616 = */
+		totalSignalRate_mcps <<= 8;
+	}
+
+	if (Status != VL_ERROR_NONE) {
+		LOG_FUNCTION_END(Status);
+		return Status;
+	}
+
+	if (peakSignalRate_kcps == 0) {
+		*pSigmaEstimate = cSigmaEstMax;
+		PALDevDataSet(Dev, SigmaEstimate, cSigmaEstMax);
+		*pDmax_mm = 0;
+	} else {
+		if (vcselTotalEventsRtn < 1)
+			vcselTotalEventsRtn = 1;
+
+		sigmaEstimateP1 = cPulseEffectiveWidth_centi_ns;
+
+		/* ((FixPoint1616 << 16)* uint32)/uint32 = FixPoint1616 */
+		sigmaEstimateP2 = (ambientRate_kcps << 16)/peakSignalRate_kcps;
+		if (sigmaEstimateP2 > cAmbToSignalRatioMax) {
+			/* Clip to prevent overflow. Will ensure safe */
+			/* max result. */
+			sigmaEstimateP2 = cAmbToSignalRatioMax;
+		}
+		sigmaEstimateP2 *= cAmbientEffectiveWidth_centi_ns;
+
+		sigmaEstimateP3 = 2 * VL_isqrt(vcselTotalEventsRtn * 12);
+
+		/* uint32 * FixPoint1616 = FixPoint1616 */
+		deltaT_ps = pRangingMeasurementData->RangeMilliMeter *
+					cTOF_per_mm_ps;
+
+		/* vcselRate - xtalkCompRate */
+		/* (uint32 << 16) - FixPoint1616 = FixPoint1616. */
+		/* Divide result by 1000 to convert to mcps. */
+		/* 500 is added to ensure rounding when integer division */
+		/* truncates. */
+		diff1_mcps = (((peakSignalRate_kcps << 16) -
+			2 * xTalkCompRate_kcps) + 500)/1000;
+
+		/* vcselRate + xtalkCompRate */
+		diff2_mcps = ((peakSignalRate_kcps << 16) + 500)/1000;
+
+		/* Shift by 8 bits to increase resolution prior to the */
+		/* division */
+		diff1_mcps <<= 8;
+
+		/* FixPoint0824/FixPoint1616 = FixPoint2408 */
+		xTalkCorrection	 = abs(diff1_mcps/diff2_mcps);
+
+		/* FixPoint2408 << 8 = FixPoint1616 */
+		xTalkCorrection <<= 8;
+
+		if (pRangingMeasurementData->RangeStatus != 0) {
+			pwMult = 1 << 16;
+		} else {
+			/* FixPoint1616/uint32 = FixPoint1616 *i */
+			/* smaller than 1.0f */
+			pwMult = deltaT_ps/cVcselPulseWidth_ps;
+
+		/* FixPoint1616 * FixPoint1616 = FixPoint3232, however both */
+		/* values are small enough such that32 bits will not be */
+		/* exceeded. */
+			pwMult *= ((1 << 16) - xTalkCorrection);
+
+			/* (FixPoint3232 >> 16) = FixPoint1616 */
+			pwMult =  (pwMult + c16BitRoundingParam) >> 16;
+
+			/* FixPoint1616 + FixPoint1616 = FixPoint1616 */
+			pwMult += (1 << 16);
+
+		/* At this point the value will be 1.xx, */
+		/* therefore if we square */
+		/* the value this will exceed 32 bits. */
+		/* To address this perform */
+		/* a single shift to the right before the multiplication. */
+			pwMult >>= 1;
+			/* FixPoint1715 * FixPoint1715 = FixPoint3430 */
+			pwMult = pwMult * pwMult;
+
+			/* (FixPoint3430 >> 14) = Fix1616 */
+			pwMult >>= 14;
+		}
+
+		/* FixPoint1616 * uint32 = FixPoint1616 */
+		sqr1 = pwMult * sigmaEstimateP1;
+
+		/* (FixPoint1616 >> 16) = FixPoint3200 */
+		sqr1 = (sqr1 + 0x8000) >> 16;
+
+		/* FixPoint3200 * FixPoint3200 = FixPoint6400 */
+		sqr1 *= sqr1;
+
+		sqr2 = sigmaEstimateP2;
+
+		/* (FixPoint1616 >> 16) = FixPoint3200 */
+		sqr2 = (sqr2 + 0x8000) >> 16;
+
+		/* FixPoint3200 * FixPoint3200 = FixPoint6400 */
+		sqr2 *= sqr2;
+
+		/* FixPoint64000 + FixPoint6400 = FixPoint6400 */
+		sqrSum = sqr1 + sqr2;
+
+		/* SQRT(FixPoin6400) = FixPoint3200 */
+		sqrtResult_centi_ns = VL_isqrt(sqrSum);
+
+		/* (FixPoint3200 << 16) = FixPoint1616 */
+		sqrtResult_centi_ns <<= 16;
+
+		/*
+		 * Note that the Speed Of Light is expressed in um per 1E-10
+		 * seconds (2997) Therefore to get mm/ns we have to divide by
+		 * 10000
+		 */
+		sigmaEstRtn = (((sqrtResult_centi_ns+50)/100) /
+				sigmaEstimateP3);
+		sigmaEstRtn		 *= VL_SPEED_OF_LIGHT_IN_AIR;
+
+		/* Add 5000 before dividing by 10000 to ensure rounding. */
+		sigmaEstRtn		 += 5000;
+		sigmaEstRtn		 /= 10000;
+
+		if (sigmaEstRtn > cSigmaEstRtnMax) {
+			/* Clip to prevent overflow. Will ensure safe */
+			/* max result. */
+			sigmaEstRtn = cSigmaEstRtnMax;
+		}
+		finalRangeIntegrationTimeMilliSecs =
+		    (finalRangeTimeoutMicroSecs +
+			preRangeTimeoutMicroSecs + 500)/1000;
+
+		/* sigmaEstRef = 1mm * 25ms/final range integration time */
+		/* (inc pre-range) sqrt(FixPoint1616/int) = FixPoint2408) */
+		sigmaEstRef =
+			VL_isqrt((cDfltFinalRangeIntegrationTimeMilliSecs +
+				finalRangeIntegrationTimeMilliSecs/2)/
+				finalRangeIntegrationTimeMilliSecs);
+
+		/* FixPoint2408 << 8 = FixPoint1616 */
+		sigmaEstRef <<= 8;
+		sigmaEstRef = (sigmaEstRef + 500)/1000;
+
+		/* FixPoint1616 * FixPoint1616 = FixPoint3232 */
+		sqr1 = sigmaEstRtn * sigmaEstRtn;
+		/* FixPoint1616 * FixPoint1616 = FixPoint3232 */
+		sqr2 = sigmaEstRef * sigmaEstRef;
+
+		/* sqrt(FixPoint3232) = FixPoint1616 */
+		sqrtResult = VL_isqrt((sqr1 + sqr2));
+		/* Note that the Shift by 4 bits increases */
+		/*resolution prior to */
+		/* the sqrt, therefore the result must be */
+		/* shifted by 2 bits to */
+		/* the right to revert back to the FixPoint1616 format. */
+
+		sigmaEstimate	 = 1000 * sqrtResult;
+
+		if ((peakSignalRate_kcps < 1) || (vcselTotalEventsRtn < 1) ||
+			(sigmaEstimate > cSigmaEstMax)) {
+			sigmaEstimate = cSigmaEstMax;
+		}
+
+		*pSigmaEstimate = (uint32_t)(sigmaEstimate);
+		PALDevDataSet(Dev, SigmaEstimate, *pSigmaEstimate);
+		Status = VL_calc_dmax(
+			Dev,
+			totalSignalRate_mcps,
+			correctedSignalRate_mcps,
+			pwMult,
+			sigmaEstimateP1,
+			sigmaEstimateP2,
+			peakVcselDuration_us,
+			pDmax_mm);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_get_pal_range_status(struct vl_data *Dev,
+		uint8_t DeviceRangeStatus,
+		unsigned int SignalRate,
+		uint16_t EffectiveSpadRtnCount,
+		struct VL_RangingMeasurementData_t *pRangingMeasurementData,
+		uint8_t *pPalRangeStatus)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t NoneFlag;
+	uint8_t SigmaLimitflag = 0;
+	uint8_t SignalRefClipflag = 0;
+	uint8_t RangeIgnoreThresholdflag = 0;
+	uint8_t SigmaLimitCheckEnable = 0;
+	uint8_t SignalRateFinalRangeLimitCheckEnable = 0;
+	uint8_t SignalRefClipLimitCheckEnable = 0;
+	uint8_t RangeIgnoreThresholdLimitCheckEnable = 0;
+	unsigned int SigmaEstimate;
+	unsigned int SigmaLimitValue;
+	unsigned int SignalRefClipValue;
+	unsigned int RangeIgnoreThresholdValue;
+	unsigned int SignalRatePerSpad;
+	uint8_t DeviceRangeStatusInternal = 0;
+	uint16_t tmpWord = 0;
+	uint8_t Temp8;
+	uint32_t Dmax_mm = 0;
+	unsigned int LastSignalRefMcps;
+
+	LOG_FUNCTION_START("");
+
+
+	/*
+	 * VL53L0X has a good ranging when the value of the
+	 * DeviceRangeStatus = 11. This function will replace the value 0 with
+	 * the value 11 in the DeviceRangeStatus.
+	 * In addition, the SigmaEstimator is not included in the VL53L0X
+	 * DeviceRangeStatus, this will be added in the PalRangeStatus.
+	 */
+
+	DeviceRangeStatusInternal = ((DeviceRangeStatus & 0x78) >> 3);
+
+	if (DeviceRangeStatusInternal == 0 ||
+		DeviceRangeStatusInternal == 5 ||
+		DeviceRangeStatusInternal == 7 ||
+		DeviceRangeStatusInternal == 12 ||
+		DeviceRangeStatusInternal == 13 ||
+		DeviceRangeStatusInternal == 14 ||
+		DeviceRangeStatusInternal == 15
+			) {
+		NoneFlag = 1;
+	} else {
+		NoneFlag = 0;
+	}
+
+	/*
+	 * Check if Sigma limit is enabled, if yes then do comparison with limit
+	 * value and put the result back into pPalRangeStatus.
+	 */
+	if (Status == VL_ERROR_NONE)
+		Status =  VL_GetLimitCheckEnable(Dev,
+			VL_CHECKENABLE_SIGMA_FINAL_RANGE,
+			&SigmaLimitCheckEnable);
+
+	if ((SigmaLimitCheckEnable != 0) && (Status == VL_ERROR_NONE)) {
+		/* compute the Sigma and check with limit */
+		Status = VL_calc_sigma_estimate(
+			Dev,
+			pRangingMeasurementData,
+			&SigmaEstimate,
+			&Dmax_mm);
+		if (Status == VL_ERROR_NONE)
+			pRangingMeasurementData->RangeDMaxMilliMeter = Dmax_mm;
+
+		if (Status == VL_ERROR_NONE) {
+			Status = VL_GetLimitCheckValue(Dev,
+				VL_CHECKENABLE_SIGMA_FINAL_RANGE,
+				&SigmaLimitValue);
+
+			if ((SigmaLimitValue > 0) &&
+				(SigmaEstimate > SigmaLimitValue))
+				/* Limit Fail */
+				SigmaLimitflag = 1;
+		}
+	}
+
+	/* Check if Signal ref clip limit is enabled, */
+	/* if yes then do comparison */
+	/* with limit value and put the result back into pPalRangeStatus. */
+	if (Status == VL_ERROR_NONE)
+		Status =  VL_GetLimitCheckEnable(Dev,
+				VL_CHECKENABLE_SIGNAL_REF_CLIP,
+				&SignalRefClipLimitCheckEnable);
+
+	if ((SignalRefClipLimitCheckEnable != 0) &&
+			(Status == VL_ERROR_NONE)) {
+
+		Status = VL_GetLimitCheckValue(Dev,
+				VL_CHECKENABLE_SIGNAL_REF_CLIP,
+				&SignalRefClipValue);
+
+		/* Read LastSignalRefMcps from device */
+		if (Status == VL_ERROR_NONE)
+			Status = VL_WrByte(Dev, 0xFF, 0x01);
+
+		if (Status == VL_ERROR_NONE)
+			Status = VL_RdWord(Dev,
+				VL_REG_RESULT_PEAK_SIGNAL_RATE_REF,
+				&tmpWord);
+
+		if (Status == VL_ERROR_NONE)
+			Status = VL_WrByte(Dev, 0xFF, 0x00);
+
+		LastSignalRefMcps = VL_FIXPOINT97TOFIXPOINT1616(tmpWord);
+		PALDevDataSet(Dev, LastSignalRefMcps, LastSignalRefMcps);
+
+		if ((SignalRefClipValue > 0) &&
+				(LastSignalRefMcps > SignalRefClipValue)) {
+			/* Limit Fail */
+			SignalRefClipflag = 1;
+		}
+	}
+
+	/*
+	 * Check if Signal ref clip limit is enabled, if yes then do comparison
+	 * with limit value and put the result back into pPalRangeStatus.
+	 * EffectiveSpadRtnCount has a format 8.8
+	 * If (Return signal rate < (1.5 x Xtalk x number of Spads)) : FAIL
+	 */
+	if (Status == VL_ERROR_NONE)
+		Status =  VL_GetLimitCheckEnable(Dev,
+				VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD,
+				&RangeIgnoreThresholdLimitCheckEnable);
+
+	if ((RangeIgnoreThresholdLimitCheckEnable != 0) &&
+			(Status == VL_ERROR_NONE)) {
+
+		/* Compute the signal rate per spad */
+		if (EffectiveSpadRtnCount == 0) {
+			SignalRatePerSpad = 0;
+		} else {
+			SignalRatePerSpad = (unsigned int)((256 * SignalRate)
+				/ EffectiveSpadRtnCount);
+		}
+
+		Status = VL_GetLimitCheckValue(Dev,
+				VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD,
+				&RangeIgnoreThresholdValue);
+
+		if ((RangeIgnoreThresholdValue > 0) &&
+			(SignalRatePerSpad < RangeIgnoreThresholdValue)) {
+			/* Limit Fail add 2^6 to range status */
+			RangeIgnoreThresholdflag = 1;
+		}
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		if (NoneFlag == 1) {
+			*pPalRangeStatus = 255;	 /* NONE */
+		} else if (DeviceRangeStatusInternal == 1 ||
+					DeviceRangeStatusInternal == 2 ||
+					DeviceRangeStatusInternal == 3) {
+			*pPalRangeStatus = 5; /* HW fail */
+		} else if (DeviceRangeStatusInternal == 6 ||
+					DeviceRangeStatusInternal == 9) {
+			*pPalRangeStatus = 4;  /* Phase fail */
+		} else if (DeviceRangeStatusInternal == 8 ||
+					DeviceRangeStatusInternal == 10 ||
+					SignalRefClipflag == 1) {
+			*pPalRangeStatus = 3;  /* Min range */
+		} else if (DeviceRangeStatusInternal == 4 ||
+					RangeIgnoreThresholdflag == 1) {
+			*pPalRangeStatus = 2;  /* Signal Fail */
+		} else if (SigmaLimitflag == 1) {
+			*pPalRangeStatus = 1;  /* Sigma	 Fail */
+		} else {
+			*pPalRangeStatus = 0; /* Range Valid */
+		}
+	}
+
+	/* DMAX only relevant during range error */
+	if (*pPalRangeStatus == 0)
+		pRangingMeasurementData->RangeDMaxMilliMeter = 0;
+
+	/* fill the Limit Check Status */
+
+	Status =  VL_GetLimitCheckEnable(Dev,
+			VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE,
+			&SignalRateFinalRangeLimitCheckEnable);
+
+	if (Status == VL_ERROR_NONE) {
+		if ((SigmaLimitCheckEnable == 0) || (SigmaLimitflag == 1))
+			Temp8 = 1;
+		else
+			Temp8 = 0;
+		VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksStatus,
+				VL_CHECKENABLE_SIGMA_FINAL_RANGE, Temp8);
+
+		if ((DeviceRangeStatusInternal == 4) ||
+				(SignalRateFinalRangeLimitCheckEnable == 0))
+			Temp8 = 1;
+		else
+			Temp8 = 0;
+		VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksStatus,
+				VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE,
+				Temp8);
+
+		if ((SignalRefClipLimitCheckEnable == 0) ||
+					(SignalRefClipflag == 1))
+			Temp8 = 1;
+		else
+			Temp8 = 0;
+
+		VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksStatus,
+				VL_CHECKENABLE_SIGNAL_REF_CLIP, Temp8);
+
+		if ((RangeIgnoreThresholdLimitCheckEnable == 0) ||
+				(RangeIgnoreThresholdflag == 1))
+			Temp8 = 1;
+		else
+			Temp8 = 0;
+
+		VL_SETARRAYPARAMETERFIELD(Dev, LimitChecksStatus,
+				VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD,
+				Temp8);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+
+}
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_api_ranging.c b/drivers/input/misc/vl53l0x/src/vl53l0x_api_ranging.c
new file mode 100644
index 0000000..a1f4683
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_api_ranging.c
@@ -0,0 +1,32 @@
+/*
+ *  vl53l0x_api_ranging.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include "vl53l0x_api.h"
+#include "vl53l0x_api_core.h"
+
+
+#ifndef __KERNEL__
+#include <stdlib.h>
+#endif
+#define LOG_FUNCTION_START(fmt, ...) \
+	_LOG_FUNCTION_START(TRACE_MODULE_API, fmt, ##__VA_ARGS__)
+#define LOG_FUNCTION_END(status, ...) \
+	_LOG_FUNCTION_END(TRACE_MODULE_API, status, ##__VA_ARGS__)
+#define LOG_FUNCTION_END_FMT(status, fmt, ...) \
+	_LOG_FUNCTION_END_FMT(TRACE_MODULE_API, status, fmt, ##__VA_ARGS__)
+
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_api_strings.c b/drivers/input/misc/vl53l0x/src/vl53l0x_api_strings.c
new file mode 100644
index 0000000..71fec10
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_api_strings.c
@@ -0,0 +1,455 @@
+/*
+ *  vl53l0x_api_strings.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include "vl53l0x_api.h"
+#include "vl53l0x_api_core.h"
+#include "vl53l0x_api_strings.h"
+
+#ifndef __KERNEL__
+#include <stdlib.h>
+#endif
+
+#define LOG_FUNCTION_START(fmt, ...) \
+	_LOG_FUNCTION_START(TRACE_MODULE_API, fmt, ##__VA_ARGS__)
+#define LOG_FUNCTION_END(status, ...) \
+	_LOG_FUNCTION_END(TRACE_MODULE_API, status, ##__VA_ARGS__)
+#define LOG_FUNCTION_END_FMT(status, fmt, ...) \
+	_LOG_FUNCTION_END_FMT(TRACE_MODULE_API, status, fmt, ##__VA_ARGS__)
+
+
+int8_t VL_check_part_used(struct vl_data *Dev,
+		uint8_t *Revision,
+		struct VL_DeviceInfo_t *pVL_DeviceInfo)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t ModuleIdInt;
+	char *ProductId_tmp;
+
+	LOG_FUNCTION_START("");
+
+	Status = VL_get_info_from_device(Dev, 2);
+
+	if (Status == VL_ERROR_NONE) {
+		ModuleIdInt = VL_GETDEVICESPECIFICPARAMETER(Dev, ModuleId);
+
+	if (ModuleIdInt == 0) {
+		*Revision = 0;
+		VL_COPYSTRING(pVL_DeviceInfo->ProductId, "");
+	} else {
+		*Revision = VL_GETDEVICESPECIFICPARAMETER(Dev, Revision);
+		ProductId_tmp = VL_GETDEVICESPECIFICPARAMETER(Dev,
+			ProductId);
+		VL_COPYSTRING(pVL_DeviceInfo->ProductId,
+			ProductId_tmp);
+	}
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+
+int8_t VL_get_device_info(struct vl_data *Dev,
+				struct VL_DeviceInfo_t *pVL_DeviceInfo)
+{
+	int8_t Status = VL_ERROR_NONE;
+	uint8_t revision_id;
+	uint8_t Revision;
+
+	Status = VL_check_part_used(Dev, &Revision, pVL_DeviceInfo);
+
+	if (Status == VL_ERROR_NONE) {
+		if (Revision == 0) {
+			VL_COPYSTRING(pVL_DeviceInfo->Name,
+					VL_STRING_DEVICE_INFO_NAME_TS0);
+		} else if ((Revision <= 34) && (Revision != 32)) {
+			VL_COPYSTRING(pVL_DeviceInfo->Name,
+					VL_STRING_DEVICE_INFO_NAME_TS1);
+		} else if (Revision < 39) {
+			VL_COPYSTRING(pVL_DeviceInfo->Name,
+					VL_STRING_DEVICE_INFO_NAME_TS2);
+		} else {
+			VL_COPYSTRING(pVL_DeviceInfo->Name,
+					VL_STRING_DEVICE_INFO_NAME_ES1);
+		}
+
+		VL_COPYSTRING(pVL_DeviceInfo->Type,
+				VL_STRING_DEVICE_INFO_TYPE);
+
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_RdByte(Dev,
+			VL_REG_IDENTIFICATION_MODEL_ID,
+			&pVL_DeviceInfo->ProductType);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		Status = VL_RdByte(Dev,
+			VL_REG_IDENTIFICATION_REVISION_ID,
+				&revision_id);
+		pVL_DeviceInfo->ProductRevisionMajor = 1;
+		pVL_DeviceInfo->ProductRevisionMinor =
+					(revision_id & 0xF0) >> 4;
+	}
+
+	return Status;
+}
+
+
+int8_t VL_get_device_error_string(uint8_t ErrorCode,
+		char *pDeviceErrorString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	switch (ErrorCode) {
+	case VL_DEVICEERROR_NONE:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_NONE);
+	break;
+	case VL_DEVICEERROR_VCSELCONTINUITYTESTFAILURE:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_VCSELCONTINUITYTESTFAILURE);
+	break;
+	case VL_DEVICEERROR_VCSELWATCHDOGTESTFAILURE:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_VCSELWATCHDOGTESTFAILURE);
+	break;
+	case VL_DEVICEERROR_NOVHVVALUEFOUND:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_NOVHVVALUEFOUND);
+	break;
+	case VL_DEVICEERROR_MSRCNOTARGET:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_MSRCNOTARGET);
+	break;
+	case VL_DEVICEERROR_SNRCHECK:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_SNRCHECK);
+	break;
+	case VL_DEVICEERROR_RANGEPHASECHECK:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_RANGEPHASECHECK);
+	break;
+	case VL_DEVICEERROR_SIGMATHRESHOLDCHECK:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_SIGMATHRESHOLDCHECK);
+	break;
+	case VL_DEVICEERROR_TCC:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_TCC);
+	break;
+	case VL_DEVICEERROR_PHASECONSISTENCY:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_PHASECONSISTENCY);
+	break;
+	case VL_DEVICEERROR_MINCLIP:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_MINCLIP);
+	break;
+	case VL_DEVICEERROR_RANGECOMPLETE:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_RANGECOMPLETE);
+	break;
+	case VL_DEVICEERROR_ALGOUNDERFLOW:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_ALGOUNDERFLOW);
+	break;
+	case VL_DEVICEERROR_ALGOOVERFLOW:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_ALGOOVERFLOW);
+	break;
+	case VL_DEVICEERROR_RANGEIGNORETHRESHOLD:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_DEVICEERROR_RANGEIGNORETHRESHOLD);
+	break;
+
+	default:
+		VL_COPYSTRING(pDeviceErrorString,
+			VL_STRING_UNKNOWN_ERROR_CODE);
+
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_get_range_status_string(uint8_t RangeStatus,
+		char *pRangeStatusString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	switch (RangeStatus) {
+	case 0:
+		VL_COPYSTRING(pRangeStatusString,
+			VL_STRING_RANGESTATUS_RANGEVALID);
+	break;
+	case 1:
+		VL_COPYSTRING(pRangeStatusString,
+			VL_STRING_RANGESTATUS_SIGMA);
+	break;
+	case 2:
+		VL_COPYSTRING(pRangeStatusString,
+			VL_STRING_RANGESTATUS_SIGNAL);
+	break;
+	case 3:
+		VL_COPYSTRING(pRangeStatusString,
+			VL_STRING_RANGESTATUS_MINRANGE);
+	break;
+	case 4:
+		VL_COPYSTRING(pRangeStatusString,
+			VL_STRING_RANGESTATUS_PHASE);
+	break;
+	case 5:
+		VL_COPYSTRING(pRangeStatusString,
+			VL_STRING_RANGESTATUS_HW);
+	break;
+
+	default: /**/
+		VL_COPYSTRING(pRangeStatusString,
+				VL_STRING_RANGESTATUS_NONE);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_get_pal_error_string(int8_t PalErrorCode,
+		char *pPalErrorString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	switch (PalErrorCode) {
+	case VL_ERROR_NONE:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_NONE);
+	break;
+	case VL_ERROR_CALIBRATION_WARNING:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_CALIBRATION_WARNING);
+	break;
+	case VL_ERROR_MIN_CLIPPED:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_MIN_CLIPPED);
+	break;
+	case VL_ERROR_UNDEFINED:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_UNDEFINED);
+	break;
+	case VL_ERROR_INVALID_PARAMS:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_INVALID_PARAMS);
+	break;
+	case VL_ERROR_NOT_SUPPORTED:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_NOT_SUPPORTED);
+	break;
+	case VL_ERROR_INTERRUPT_NOT_CLEARED:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_INTERRUPT_NOT_CLEARED);
+	break;
+	case VL_ERROR_RANGE_ERROR:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_RANGE_ERROR);
+	break;
+	case VL_ERROR_TIME_OUT:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_TIME_OUT);
+	break;
+	case VL_ERROR_MODE_NOT_SUPPORTED:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_MODE_NOT_SUPPORTED);
+	break;
+	case VL_ERROR_BUFFER_TOO_SMALL:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_BUFFER_TOO_SMALL);
+	break;
+	case VL_ERROR_GPIO_NOT_EXISTING:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_GPIO_NOT_EXISTING);
+	break;
+	case VL_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED);
+	break;
+	case VL_ERROR_CONTROL_INTERFACE:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_CONTROL_INTERFACE);
+	break;
+	case VL_ERROR_INVALID_COMMAND:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_INVALID_COMMAND);
+	break;
+	case VL_ERROR_DIVISION_BY_ZERO:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_DIVISION_BY_ZERO);
+	break;
+	case VL_ERROR_REF_SPAD_INIT:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_REF_SPAD_INIT);
+	break;
+	case VL_ERROR_NOT_IMPLEMENTED:
+		VL_COPYSTRING(pPalErrorString,
+			VL_STRING_ERROR_NOT_IMPLEMENTED);
+	break;
+
+	default:
+		VL_COPYSTRING(pPalErrorString,
+				VL_STRING_UNKNOWN_ERROR_CODE);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_get_pal_state_string(uint8_t PalStateCode,
+		char *pPalStateString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	switch (PalStateCode) {
+	case VL_STATE_POWERDOWN:
+		VL_COPYSTRING(pPalStateString,
+			VL_STRING_STATE_POWERDOWN);
+	break;
+	case VL_STATE_WAIT_STATICINIT:
+		VL_COPYSTRING(pPalStateString,
+			VL_STRING_STATE_WAIT_STATICINIT);
+	break;
+	case VL_STATE_STANDBY:
+		VL_COPYSTRING(pPalStateString,
+			VL_STRING_STATE_STANDBY);
+	break;
+	case VL_STATE_IDLE:
+		VL_COPYSTRING(pPalStateString,
+			VL_STRING_STATE_IDLE);
+	break;
+	case VL_STATE_RUNNING:
+		VL_COPYSTRING(pPalStateString,
+			VL_STRING_STATE_RUNNING);
+	break;
+	case VL_STATE_UNKNOWN:
+		VL_COPYSTRING(pPalStateString,
+			VL_STRING_STATE_UNKNOWN);
+	break;
+	case VL_STATE_ERROR:
+		VL_COPYSTRING(pPalStateString,
+			VL_STRING_STATE_ERROR);
+	break;
+
+	default:
+		VL_COPYSTRING(pPalStateString,
+			VL_STRING_STATE_UNKNOWN);
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
+
+int8_t VL_get_sequence_steps_info(
+		uint8_t SequenceStepId,
+		char *pSequenceStepsString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	switch (SequenceStepId) {
+	case VL_SEQUENCESTEP_TCC:
+		VL_COPYSTRING(pSequenceStepsString,
+			VL_STRING_SEQUENCESTEP_TCC);
+	break;
+	case VL_SEQUENCESTEP_DSS:
+		VL_COPYSTRING(pSequenceStepsString,
+			VL_STRING_SEQUENCESTEP_DSS);
+	break;
+	case VL_SEQUENCESTEP_MSRC:
+		VL_COPYSTRING(pSequenceStepsString,
+			VL_STRING_SEQUENCESTEP_MSRC);
+	break;
+	case VL_SEQUENCESTEP_PRE_RANGE:
+		VL_COPYSTRING(pSequenceStepsString,
+			VL_STRING_SEQUENCESTEP_PRE_RANGE);
+	break;
+	case VL_SEQUENCESTEP_FINAL_RANGE:
+		VL_COPYSTRING(pSequenceStepsString,
+			VL_STRING_SEQUENCESTEP_FINAL_RANGE);
+	break;
+
+	default:
+		Status = VL_ERROR_INVALID_PARAMS;
+	}
+
+	LOG_FUNCTION_END(Status);
+
+	return Status;
+}
+
+
+int8_t VL_get_limit_check_info(struct vl_data *Dev,
+	uint16_t LimitCheckId, char *pLimitCheckString)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+
+	switch (LimitCheckId) {
+	case VL_CHECKENABLE_SIGMA_FINAL_RANGE:
+		VL_COPYSTRING(pLimitCheckString,
+			VL_STRING_CHECKENABLE_SIGMA_FINAL_RANGE);
+	break;
+	case VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE:
+		VL_COPYSTRING(pLimitCheckString,
+			VL_STRING_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE);
+	break;
+	case VL_CHECKENABLE_SIGNAL_REF_CLIP:
+		VL_COPYSTRING(pLimitCheckString,
+			VL_STRING_CHECKENABLE_SIGNAL_REF_CLIP);
+	break;
+	case VL_CHECKENABLE_RANGE_IGNORE_THRESHOLD:
+		VL_COPYSTRING(pLimitCheckString,
+			VL_STRING_CHECKENABLE_RANGE_IGNORE_THRESHOLD);
+	break;
+
+	case VL_CHECKENABLE_SIGNAL_RATE_MSRC:
+		VL_COPYSTRING(pLimitCheckString,
+			VL_STRING_CHECKENABLE_SIGNAL_RATE_MSRC);
+	break;
+
+	case VL_CHECKENABLE_SIGNAL_RATE_PRE_RANGE:
+		VL_COPYSTRING(pLimitCheckString,
+			VL_STRING_CHECKENABLE_SIGNAL_RATE_PRE_RANGE);
+	break;
+
+	default:
+		VL_COPYSTRING(pLimitCheckString,
+			VL_STRING_UNKNOWN_ERROR_CODE);
+
+	}
+
+	LOG_FUNCTION_END(Status);
+	return Status;
+}
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_i2c_platform.c b/drivers/input/misc/vl53l0x/src/vl53l0x_i2c_platform.c
new file mode 100644
index 0000000..edc4b5c
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_i2c_platform.c
@@ -0,0 +1,384 @@
+/*
+ *  vl53l0x_i2c_platform.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+/*!
+ * \file   VL_platform.c
+ * \brief  Code function definitions for EWOK Platform Layer
+ *
+ */
+
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include "stmvl53l0x-i2c.h"
+#include "stmvl53l0x-cci.h"
+
+#include "vl53l0x_platform.h"
+#include "vl53l0x_i2c_platform.h"
+#include "vl53l0x_def.h"
+
+#include "vl53l0x_platform_log.h"
+
+#ifdef VL_LOG_ENABLE
+#define trace_print(level, ...) \
+	trace_print_module_function(TRACE_MODULE_PLATFORM, level,\
+	TRACE_FUNCTION_NONE, ##__VA_ARGS__)
+#define trace_i2c(...) \
+	trace_print_module_function(TRACE_MODULE_NONE, \
+	TRACE_LEVEL_NONE, TRACE_FUNCTION_I2C, ##__VA_ARGS__)
+#endif
+
+/**
+ * @def I2C_BUFFER_CONFIG
+ *
+ * @brief Configure Device register I2C access
+ *
+ * @li 0 : one GLOBAL buffer \n
+ *   Use one global buffer of MAX_I2C_XFER_SIZE byte in data space \n
+ *   This solution is not multi-Device compliant nor multi-thread cpu safe \n
+ *   It can be the best option for small 8/16 bit MCU without stack and limited
+ *   ram  (STM8s, 80C51 ...)
+ *
+ * @li 1 : ON_STACK/local \n
+ *   Use local variable (on stack) buffer \n
+ *   This solution is multi-thread with use of i2c resource lock or mutex see
+ *   VL6180x_GetI2CAccess() \n
+ *
+ * @li 2 : User defined \n
+ *    Per Device potentially dynamic allocated. Requires VL6180x_GetI2cBuffer()
+ *    to be implemented.
+ * @ingroup Configuration
+ */
+#define I2C_BUFFER_CONFIG 1
+
+#if I2C_BUFFER_CONFIG == 0
+    /* GLOBAL config buffer */
+	uint8_t i2c_global_buffer[VL_MAX_I2C_XFER_SIZE];
+
+    #define DECL_I2C_BUFFER
+    #define VL_GetLocalBuffer(Dev, n_byte)  i2c_global_buffer
+
+#elif I2C_BUFFER_CONFIG == 1
+    /* ON STACK */
+	uint8_t LocBuffer[VL_MAX_I2C_XFER_SIZE];
+    #define VL_GetLocalBuffer(Dev, n_byte)  LocBuffer
+#elif I2C_BUFFER_CONFIG == 2
+    /* user define buffer type declare DECL_I2C_BUFFER  as access  via */
+    /* VL_GetLocalBuffer */
+    #define DECL_I2C_BUFFER
+#else
+#error "invalid I2C_BUFFER_CONFIG "
+#endif
+
+
+#define VL_I2C_USER_VAR         /* none but could be for a flag var to */
+	/* get/pass to mutex interruptible  return flags and try again */
+#define VL_GetI2CAccess(Dev)    /* todo mutex acquire */
+#define VL_DoneI2CAcces(Dev)    /* todo mutex release */
+
+
+char  debug_string[VL_MAX_STRING_LENGTH_PLT];
+
+
+#define MIN_COMMS_VERSION_MAJOR     1
+#define MIN_COMMS_VERSION_MINOR     8
+#define MIN_COMMS_VERSION_BUILD     1
+#define MIN_COMMS_VERSION_REVISION  0
+
+#define STATUS_OK              0x00
+#define STATUS_FAIL            0x01
+
+bool _check_min_version(void)
+{
+	bool min_version_comms_dll = false;
+
+	min_version_comms_dll = true;
+
+	return min_version_comms_dll;
+}
+
+int32_t VL_comms_initialise(uint8_t comms_type, uint16_t comms_speed_khz)
+{
+	int32_t status   = STATUS_OK;
+
+	return status;
+}
+
+int32_t VL_comms_close(void)
+{
+	int32_t status = STATUS_OK;
+
+
+	return status;
+}
+
+int32_t VL_set_page(struct vl_data *dev, uint8_t page_data)
+{
+	int32_t status = STATUS_OK;
+	uint16_t page_index = 0xFF;
+	uint8_t *buffer;
+
+	buffer =  VL_GetLocalBuffer(dev, 3);
+	buffer[0] = page_index >> 8;
+	buffer[1] = page_index & 0xff;
+	buffer[2] = page_data;
+
+	status = VL_I2CWrite(dev, buffer, (uint8_t) 3);
+	return status;
+}
+
+int32_t VL_write_multi(struct vl_data *dev, uint8_t index, uint8_t *pdata,
+			int32_t count)
+{
+	int32_t status = STATUS_OK;
+	uint8_t *buffer;
+
+#ifdef VL_LOG_ENABLE
+	int32_t i = 0;
+	char value_as_str[VL_MAX_STRING_LENGTH_PLT];
+	char *pvalue_as_str;
+
+	pvalue_as_str =  value_as_str;
+
+	for (i = 0 ; i < count ; i++) {
+		snprintf(pvalue_as_str, sizeof(pvalue_as_str),
+			"%02X", *(pdata + i));
+
+		pvalue_as_str += 2;
+	}
+	trace_i2c("Write reg : 0x%04X, Val : 0x%s\n", index, value_as_str);
+#endif
+	if ((count + 1) > VL_MAX_I2C_XFER_SIZE)
+		return STATUS_FAIL;
+	buffer =  VL_GetLocalBuffer(dev, (count+1));
+	buffer[0] = index;
+	memcpy(&buffer[1], pdata, count);
+	status = VL_I2CWrite(dev, buffer, (count+1));
+
+	return status;
+}
+
+int32_t VL_read_multi(struct vl_data *dev, uint8_t index, uint8_t *pdata,
+			int32_t count)
+{
+	int32_t status = STATUS_OK;
+	uint8_t *buffer;
+
+#ifdef VL_LOG_ENABLE
+	int32_t      i = 0;
+	char   value_as_str[VL_MAX_STRING_LENGTH_PLT];
+	char *pvalue_as_str;
+#endif
+
+	if ((count + 1) > VL_MAX_I2C_XFER_SIZE)
+		return STATUS_FAIL;
+
+	buffer =  VL_GetLocalBuffer(dev, 1);
+	buffer[0] = index;
+	status = VL_I2CWrite(dev, (uint8_t *)buffer, (uint8_t)1);
+	if (!status) {
+		pdata[0] = index;
+		status = VL_I2CRead(dev, pdata, count);
+	}
+
+#ifdef VL_LOG_ENABLE
+	pvalue_as_str =  value_as_str;
+
+	for (i = 0 ; i < count ; i++) {
+		snprintf(pvalue_as_str, sizeof(value_as_str),
+			"%02X", *(pdata+i));
+		pvalue_as_str += 2;
+	}
+
+	trace_i2c("Read  reg : 0x%04X, Val : 0x%s\n", index, value_as_str);
+#endif
+
+	return status;
+}
+
+
+int32_t VL_write_byte(struct vl_data *dev, uint8_t index, uint8_t data)
+{
+	int32_t status = STATUS_OK;
+	const int32_t cbyte_count = 1;
+
+	status = VL_write_multi(dev, index, &data, cbyte_count);
+
+	return status;
+
+}
+
+
+int32_t VL_write_word(struct vl_data *dev, uint8_t index, uint16_t data)
+{
+	int32_t status = STATUS_OK;
+
+	uint8_t  buffer[BYTES_PER_WORD];
+
+	/* Split 16-bit word into MS and LS uint8_t */
+	buffer[0] = (uint8_t)(data >> 8);
+	buffer[1] = (uint8_t)(data &  0x00FF);
+
+	status = VL_write_multi(dev, index, buffer, BYTES_PER_WORD);
+
+	return status;
+
+}
+
+
+int32_t VL_write_dword(struct vl_data *dev, uint8_t index, uint32_t data)
+{
+	int32_t status = STATUS_OK;
+	uint8_t  buffer[BYTES_PER_DWORD];
+
+	/* Split 32-bit word into MS ... LS bytes */
+	buffer[0] = (uint8_t) (data >> 24);
+	buffer[1] = (uint8_t)((data &  0x00FF0000) >> 16);
+	buffer[2] = (uint8_t)((data &  0x0000FF00) >> 8);
+	buffer[3] = (uint8_t) (data &  0x000000FF);
+
+	status = VL_write_multi(dev, index, buffer, BYTES_PER_DWORD);
+
+	return status;
+
+}
+
+
+int32_t VL_read_byte(struct vl_data *dev, uint8_t index, uint8_t *pdata)
+{
+	int32_t status = STATUS_OK;
+	int32_t cbyte_count = 1;
+
+	status = VL_read_multi(dev, index, pdata, cbyte_count);
+
+	return status;
+
+}
+
+
+int32_t VL_read_word(struct vl_data *dev, uint8_t index, uint16_t *pdata)
+{
+	int32_t  status = STATUS_OK;
+	uint8_t  buffer[BYTES_PER_WORD];
+
+	status = VL_read_multi(dev, index, buffer, BYTES_PER_WORD);
+	*pdata = ((uint16_t)buffer[0]<<8) + (uint16_t)buffer[1];
+
+	return status;
+
+}
+
+int32_t VL_read_dword(struct vl_data *dev, uint8_t index, uint32_t *pdata)
+{
+	int32_t status = STATUS_OK;
+	uint8_t  buffer[BYTES_PER_DWORD];
+
+	status = VL_read_multi(dev, index, buffer, BYTES_PER_DWORD);
+	*pdata = ((uint32_t)buffer[0]<<24) + ((uint32_t)buffer[1]<<16) +
+			((uint32_t)buffer[2]<<8) + (uint32_t)buffer[3];
+
+	return status;
+
+}
+
+int32_t VL_platform_wait_us(int32_t wait_us)
+{
+	int32_t status = STATUS_OK;
+
+	msleep((wait_us/1000));
+
+#ifdef VL_LOG_ENABLE
+	trace_i2c("Wait us : %6d\n", wait_us);
+#endif
+
+	return status;
+
+}
+
+
+int32_t VL_wait_ms(int32_t wait_ms)
+{
+	int32_t status = STATUS_OK;
+
+	msleep(wait_ms);
+
+#ifdef VL_LOG_ENABLE
+	trace_i2c("Wait ms : %6d\n", wait_ms);
+#endif
+
+	return status;
+
+}
+
+
+int32_t VL_set_gpio(uint8_t level)
+{
+	int32_t status = STATUS_OK;
+#ifdef VL_LOG_ENABLE
+	trace_i2c("// Set GPIO = %d;\n", level);
+#endif
+
+	return status;
+
+}
+
+
+int32_t VL_get_gpio(uint8_t *plevel)
+{
+	int32_t status = STATUS_OK;
+#ifdef VL_LOG_ENABLE
+	trace_i2c("// Get GPIO = %d;\n", *plevel);
+#endif
+	return status;
+}
+
+
+int32_t VL_release_gpio(void)
+{
+	int32_t status = STATUS_OK;
+#ifdef VL_LOG_ENABLE
+	trace_i2c("// Releasing force on GPIO\n");
+#endif
+	return status;
+
+}
+
+int32_t VL_cycle_power(void)
+{
+	int32_t status = STATUS_OK;
+#ifdef VL_LOG_ENABLE
+	trace_i2c("// cycle sensor power\n");
+#endif
+
+	return status;
+}
+
+
+int32_t VL_get_timer_frequency(int32_t *ptimer_freq_hz)
+{
+	*ptimer_freq_hz = 0;
+	return STATUS_FAIL;
+}
+
+
+int32_t VL_get_timer_value(int32_t *ptimer_count)
+{
+	*ptimer_count = 0;
+	return STATUS_FAIL;
+}
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_platform.c b/drivers/input/misc/vl53l0x/src/vl53l0x_platform.c
new file mode 100644
index 0000000..79df3db
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_platform.c
@@ -0,0 +1,232 @@
+/*
+ *  vl53l0x_platform.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+/**
+ * @file VL_i2c.c
+ *
+ * Copyright (C) 2014 ST MicroElectronics
+ *
+ * provide variable word size byte/Word/dword VL6180x register access via i2c
+ *
+ */
+#include "vl53l0x_platform.h"
+#include "vl53l0x_i2c_platform.h"
+#include "vl53l0x_api.h"
+
+#define LOG_FUNCTION_START(fmt, ...) \
+		_LOG_FUNCTION_START(TRACE_MODULE_PLATFORM, fmt, ##__VA_ARGS__)
+#define LOG_FUNCTION_END(status, ...) \
+		_LOG_FUNCTION_END(TRACE_MODULE_PLATFORM, status, ##__VA_ARGS__)
+#define LOG_FUNCTION_END_FMT(status, fmt, ...)\
+		_LOG_FUNCTION_END_FMT(TRACE_MODULE_PLATFORM, status,\
+		fmt, ##__VA_ARGS__)
+
+
+
+int8_t VL_LockSequenceAccess(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	return Status;
+}
+
+int8_t VL_UnlockSequenceAccess(struct vl_data *Dev)
+{
+	int8_t Status = VL_ERROR_NONE;
+
+	return Status;
+}
+
+/* the ranging_sensor_comms.dll will take care of the page selection */
+int8_t VL_WriteMulti(struct vl_data *Dev, uint8_t index,
+				uint8_t *pdata, uint32_t count)
+{
+
+	int8_t Status = VL_ERROR_NONE;
+	int32_t status_int = 0;
+	uint8_t deviceAddress;
+
+	if (count >= VL_MAX_I2C_XFER_SIZE)
+		Status = VL_ERROR_INVALID_PARAMS;
+
+
+	deviceAddress = Dev->I2cDevAddr;
+
+	status_int = VL_write_multi(Dev, index, pdata, count);
+
+	if (status_int != 0)
+		Status = VL_ERROR_CONTROL_INTERFACE;
+
+	return Status;
+}
+
+/* the ranging_sensor_comms.dll will take care of the page selection */
+int8_t VL_ReadMulti(struct vl_data *Dev, uint8_t index,
+				uint8_t *pdata, uint32_t count)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int32_t status_int;
+	uint8_t deviceAddress;
+
+	if (count >= VL_MAX_I2C_XFER_SIZE)
+		Status = VL_ERROR_INVALID_PARAMS;
+
+
+	deviceAddress = Dev->I2cDevAddr;
+
+	status_int = VL_read_multi(Dev, index, pdata, count);
+
+	if (status_int != 0)
+		Status = VL_ERROR_CONTROL_INTERFACE;
+
+	return Status;
+}
+
+
+int8_t VL_WrByte(struct vl_data *Dev, uint8_t index, uint8_t data)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int32_t status_int;
+	uint8_t deviceAddress;
+
+	deviceAddress = Dev->I2cDevAddr;
+
+	status_int = VL_write_byte(Dev, index, data);
+
+	if (status_int != 0)
+		Status = VL_ERROR_CONTROL_INTERFACE;
+
+	return Status;
+}
+
+int8_t VL_WrWord(struct vl_data *Dev, uint8_t index, uint16_t data)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int32_t status_int;
+	uint8_t deviceAddress;
+
+	deviceAddress = Dev->I2cDevAddr;
+
+	status_int = VL_write_word(Dev, index, data);
+
+	if (status_int != 0)
+		Status = VL_ERROR_CONTROL_INTERFACE;
+
+	return Status;
+}
+
+int8_t VL_WrDWord(struct vl_data *Dev, uint8_t index, uint32_t data)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int32_t status_int;
+	uint8_t deviceAddress;
+
+	deviceAddress = Dev->I2cDevAddr;
+
+	status_int = VL_write_dword(Dev, index, data);
+
+	if (status_int != 0)
+		Status = VL_ERROR_CONTROL_INTERFACE;
+
+	return Status;
+}
+
+int8_t VL_UpdateByte(struct vl_data *Dev, uint8_t index,
+				uint8_t AndData, uint8_t OrData)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int32_t status_int;
+	uint8_t deviceAddress;
+	uint8_t data;
+
+	deviceAddress = Dev->I2cDevAddr;
+
+	status_int = VL_read_byte(Dev, index, &data);
+
+	if (status_int != 0)
+		Status = VL_ERROR_CONTROL_INTERFACE;
+
+	if (Status == VL_ERROR_NONE) {
+		data = (data & AndData) | OrData;
+		status_int = VL_write_byte(Dev, index, data);
+
+		if (status_int != 0)
+			Status = VL_ERROR_CONTROL_INTERFACE;
+	}
+
+	return Status;
+}
+
+int8_t VL_RdByte(struct vl_data *Dev, uint8_t index, uint8_t *data)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int32_t status_int;
+	uint8_t deviceAddress;
+
+	deviceAddress = Dev->I2cDevAddr;
+
+	status_int = VL_read_byte(Dev, index, data);
+
+	if (status_int != 0)
+		Status = VL_ERROR_CONTROL_INTERFACE;
+
+	return Status;
+}
+
+int8_t VL_RdWord(struct vl_data *Dev, uint8_t index, uint16_t *data)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int32_t status_int;
+	uint8_t deviceAddress;
+
+	deviceAddress = Dev->I2cDevAddr;
+
+	status_int = VL_read_word(Dev, index, data);
+
+	if (status_int != 0)
+		Status = VL_ERROR_CONTROL_INTERFACE;
+
+	return Status;
+}
+
+int8_t  VL_RdDWord(struct vl_data *Dev, uint8_t index, uint32_t *data)
+{
+	int8_t Status = VL_ERROR_NONE;
+	int32_t status_int;
+	uint8_t deviceAddress;
+
+	deviceAddress = Dev->I2cDevAddr;
+
+	status_int = VL_read_dword(Dev, index, data);
+
+	if (status_int != 0)
+		Status = VL_ERROR_CONTROL_INTERFACE;
+
+	return Status;
+}
+
+#define VL_POLLINGDELAY_LOOPNB  250
+int8_t VL_PollingDelay(struct vl_data *Dev)
+{
+	int8_t status = VL_ERROR_NONE;
+
+	LOG_FUNCTION_START("");
+	usleep_range(1000, 1001);
+	LOG_FUNCTION_END(status);
+	return status;
+}
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_port_i2c.c b/drivers/input/misc/vl53l0x/src/vl53l0x_port_i2c.c
new file mode 100644
index 0000000..9e14f79
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_port_i2c.c
@@ -0,0 +1,168 @@
+/*
+ *  vl53l0x_port_i2c.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include "stmvl53l0x-i2c.h"
+#include "stmvl53l0x-cci.h"
+#include "vl53l0x_platform.h"
+#include "vl53l0x_i2c_platform.h"
+#include "stmvl53l0x.h"
+
+#define I2C_M_WR			0x00
+#define STATUS_OK			0x00
+#define STATUS_FAIL			(-1)
+/** int  VL_I2CWrite(VL_Dev_t dev, void *buff, uint8_t len);
+ * @brief       Write data buffer to VL53L0 device via i2c
+ * @param dev   The device to write to
+ * @param buff  The data buffer
+ * @param len   The length of the transaction in byte
+ * @return      0 on success
+ */
+int VL_I2CWrite(struct vl_data *dev, uint8_t *buff, uint8_t len)
+{
+
+
+	int err = 0;
+
+	if (dev->bus_type == CCI_BUS) {
+#ifdef CAMERA_CCI
+		uint16_t index;
+		struct cci_data *cci_client_obj =
+			(struct cci_data *)dev->client_object;
+		struct msm_camera_i2c_client *client = cci_client_obj->client;
+
+		index = buff[0];
+		/*dbg("%s: index: %d len: %d\n", __func__, index, len); */
+
+		if (len == 2) {
+			uint8_t data;
+
+			data = buff[1];
+			/* for byte access */
+			err = client->i2c_func_tbl->i2c_write(client, index,
+					data, MSM_CAMERA_I2C_BYTE_DATA);
+			if (err < 0) {
+				dbg("%s:%d failed status=%d\n",
+					__func__, __LINE__, err);
+				return err;
+			}
+		} else if (len == 3) {
+			uint16_t data;
+
+			data = ((uint16_t)buff[1] << 8) | (uint16_t)buff[2];
+			err = client->i2c_func_tbl->i2c_write(client, index,
+					data, MSM_CAMERA_I2C_WORD_DATA);
+			if (err < 0) {
+				dbg("%s:%d failed status=%d\n",
+					__func__, __LINE__, err);
+				return err;
+			}
+		} else if (len >= 5) {
+			err = client->i2c_func_tbl->i2c_write_seq(client,
+					index, &buff[1], (len-1));
+			if (err < 0) {
+				dbg("%s:%d failed status=%d\n",
+					__func__, __LINE__, err);
+				return err;
+			}
+
+		}
+#endif
+#ifndef CAMERA_CCI
+	} else {
+		struct i2c_msg msg[1];
+		struct i2c_data *i2c_client_obj =
+					(struct i2c_data *)dev->client_object;
+		struct i2c_client *client =
+				(struct i2c_client *)i2c_client_obj->client;
+
+		msg[0].addr = client->addr;
+		msg[0].flags = I2C_M_WR;
+		msg[0].buf = buff;
+		msg[0].len = len;
+
+		err = i2c_transfer(client->adapter, msg, 1);
+		/* return the actual messages transfer */
+		if (err != 1) {
+			dbg("%s: i2c_transfer err:%d, addr:0x%x, reg:0x%x\n",
+				__func__, err, client->addr,
+				(buff[0] << 8 | buff[1]));
+			return STATUS_FAIL;
+		}
+#endif
+	}
+
+	return 0;
+}
+
+
+/** int VL_I2CRead(VL_Dev_t dev, void *buff, uint8_t len);
+ * @brief       Read data buffer from VL53L0 device via i2c
+ * @param dev   The device to read from
+ * @param buff  The data buffer to fill
+ * @param len   The length of the transaction in byte
+ * @return      transaction status
+ */
+int VL_I2CRead(struct vl_data *dev, uint8_t *buff, uint8_t len)
+{
+
+	int err = 0;
+
+	if (dev->bus_type == CCI_BUS) {
+#ifdef CAMERA_CCI
+		uint16_t index;
+		struct cci_data *cci_client_obj =
+				(struct cci_data *)dev->client_object;
+		struct msm_camera_i2c_client *client = cci_client_obj->client;
+
+		index = buff[0];
+		/* dbg("%s: index: %d\n", __func__, index); */
+		err = client->i2c_func_tbl->i2c_read_seq(client,
+							index, buff, len);
+		if (err < 0) {
+			dbg("%s:%d failed status=%d\n",
+					__func__, __LINE__, err);
+			return err;
+		}
+#endif
+	} else {
+#ifndef CAMERA_CCI
+		struct i2c_msg msg[1];
+		struct i2c_data *i2c_client_obj =
+					(struct i2c_data *)dev->client_object;
+		struct i2c_client *client =
+				(struct i2c_client *) i2c_client_obj->client;
+
+		msg[0].addr = client->addr;
+		msg[0].flags = I2C_M_RD|client->flags;
+		msg[0].buf = buff;
+		msg[0].len = len;
+
+		err = i2c_transfer(client->adapter, &msg[0], 1);
+		/* return the actual message transfer */
+		if (err != 1) {
+			dbg("%s: Read i2c_transfer err:%d, addr:0x%x\n",
+				__func__, err, client->addr);
+			return STATUS_FAIL;
+		}
+#endif
+	}
+
+	return 0;
+}
diff --git a/drivers/input/misc/vl53l0x/stmvl53l0x-cci.h b/drivers/input/misc/vl53l0x/stmvl53l0x-cci.h
new file mode 100644
index 0000000..5476ef9
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/stmvl53l0x-cci.h
@@ -0,0 +1,57 @@
+/*
+ *  stmvl53l0x-cci.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef STMVL_CCI_H
+#define STMVL_CCI_H
+#include <linux/types.h>
+
+#ifdef CAMERA_CCI
+#include <soc/qcom/camera2.h>
+#include "msm_camera_i2c.h"
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+#include "msm_cci.h"
+
+#define	MSM_TOF_MAX_VREGS (10)
+
+struct msm_tof_vreg {
+	struct camera_vreg_t *cam_vreg;
+	void *data[MSM_TOF_MAX_VREGS];
+	int num_vreg;
+};
+
+struct cci_data {
+	struct msm_camera_i2c_client g_client;
+	struct msm_camera_i2c_client *client;
+	struct platform_device *pdev;
+	enum msm_camera_device_type_t device_type;
+	enum cci_i2c_master_t cci_master;
+	struct msm_tof_vreg vreg_cfg;
+	struct msm_sd_subdev msm_sd;
+	struct v4l2_subdev sdev;
+	struct v4l2_subdev_ops *subdev_ops;
+	char subdev_initialized;
+	uint32_t subdev_id;
+	uint8_t power_up;
+};
+int stmvl53l0x_init_cci(void);
+void stmvl53l0x_exit_cci(void *cci_object);
+int stmvl53l0x_power_down_cci(void *cci_object);
+int stmvl53l0x_power_up_cci(void *cci_object, unsigned int *preset_flag);
+#endif /* CAMERA_CCI */
+#endif /* STMVL_CCI_H */
diff --git a/drivers/input/misc/vl53l0x/stmvl53l0x-i2c.h b/drivers/input/misc/vl53l0x/stmvl53l0x-i2c.h
new file mode 100644
index 0000000..68f7d37
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/stmvl53l0x-i2c.h
@@ -0,0 +1,35 @@
+/*
+ *  stmvl53l0x-i2c.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef STMVL_I2C_H
+#define STMVL_I2C_H
+#include <linux/types.h>
+
+#ifndef CAMERA_CCI
+struct i2c_data {
+	struct i2c_client *client;
+	struct regulator *vana;
+	uint8_t power_up;
+};
+int stmvl53l0x_init_i2c(void);
+void stmvl53l0x_exit_i2c(void *i2c_object);
+int stmvl53l0x_power_up_i2c(void *i2c_object, unsigned int *preset_flag);
+int stmvl53l0x_power_down_i2c(void *i2c_object);
+
+#endif /* NOT CAMERA_CCI */
+#endif /* STMVL_I2C_H */
diff --git a/drivers/input/misc/vl53l0x/stmvl53l0x.h b/drivers/input/misc/vl53l0x/stmvl53l0x.h
new file mode 100644
index 0000000..1be251f
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/stmvl53l0x.h
@@ -0,0 +1,191 @@
+/*
+ *  stmvl53l0x.h - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef STMVL_H
+#define STMVL_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/miscdevice.h>
+#include <linux/wait.h>
+
+
+#define STMVL_DRV_NAME	"stmvl53l0"
+#define STMVL_SLAVE_ADDR	(0x52>>1)
+
+#define DRIVER_VERSION		"1.0.5"
+#define I2C_M_WR			0x00
+/* #define INT_POLLING_DELAY	20 */
+
+/* if don't want to have output from dbg, comment out #DEBUG macro */
+#define DEBUG
+#ifdef DEBUG
+#define dbg(fmt, ...)	\
+	printk(fmt, ##__VA_ARGS__)
+#else
+#define dbg(fmt, ...)
+#endif
+
+#define err(fmt, ...) \
+	printk(fmt, ##__VA_ARGS__)
+
+#define VL_VDD_MIN      2600000
+#define VL_VDD_MAX      3000000
+
+enum init_mode_e {
+	NORMAL_MODE = 0,
+	OFFSETCALIB_MODE = 1,
+	XTALKCALIB_MODE = 2,
+	SPADCALIB_MODE = 3,
+	REFCALIB_MODE = 4,
+};
+
+enum parameter_name_e {
+	OFFSET_PAR = 0,
+	XTALKRATE_PAR = 1,
+	XTALKENABLE_PAR = 2,
+	GPIOFUNC_PAR = 3,
+	LOWTHRESH_PAR = 4,
+	HIGHTHRESH_PAR = 5,
+	DEVICEMODE_PAR = 6,
+	INTERMEASUREMENT_PAR = 7,
+	REFERENCESPADS_PAR = 8,
+	REFCALIBRATION_PAR = 9,
+};
+
+enum {
+	CCI_BUS = 0,
+	I2C_BUS = 1,
+};
+
+/*
+ *  IOCTL register data structs
+ */
+struct stmvl53l0x_register {
+	uint32_t is_read; /*1: read 0: write*/
+	uint32_t reg_index;
+	uint32_t reg_bytes;
+	uint32_t reg_data;
+	int32_t status;
+};
+
+/*
+ *  IOCTL parameter structs
+ */
+struct stmvl53l0x_parameter {
+	uint32_t is_read; /*1: Get 0: Set*/
+	enum parameter_name_e name;
+	int32_t value;
+	int32_t value2;
+	int32_t status;
+};
+
+/*
+ *  driver data structs
+ */
+struct vl_data {
+
+	struct VL_DevData_t Data; /* !<embed ST VL53L0 Dev data as "dev_data" */
+	uint8_t   I2cDevAddr;	/* !< i2c device address user specific field */
+	uint8_t   comms_type;	/* !< Type of comms : */
+				/* VL_COMMS_I2C or VL_COMMS_SPI */
+	uint16_t  comms_speed_khz;	/*!< Comms speed [kHz] : */
+					/*typically 400kHz for I2C */
+	uint8_t   bus_type;		/* CCI_BUS; I2C_BUS */
+
+	void *client_object; /* cci or i2c client */
+
+	struct mutex update_lock;
+	struct delayed_work	dwork;		/* for PS  work handler */
+	struct input_dev *input_dev_ps;
+	struct kobject *range_kobj;
+
+	const char *dev_name;
+	/* function pointer */
+
+	/* misc device */
+	struct miscdevice miscdev;
+
+	int irq;
+	unsigned int reset;
+
+	/* control flag from HAL */
+	unsigned int enable_ps_sensor;
+
+	/* PS parameters */
+	unsigned int ps_data;			/* to store PS data */
+
+	/* Calibration parameters */
+	unsigned int offsetCalDistance;
+	unsigned int xtalkCalDistance;
+
+
+	/* Range Data */
+	struct VL_RangingMeasurementData_t rangeData;
+
+	/* Device parameters */
+	uint8_t			deviceMode;
+	uint32_t				interMeasurems;
+	uint8_t gpio_function;
+	uint8_t gpio_polarity;
+	unsigned int low_threshold;
+	unsigned int high_threshold;
+
+	/* delay time in miniseconds*/
+	uint8_t delay_ms;
+
+	/* Timing Budget */
+	uint32_t	timingBudget;
+	/* Use this threshold to force restart ranging */
+	uint32_t	noInterruptCount;
+	/* Use this flag to use long ranging*/
+	int			  useLongRange;
+
+	/* Polling thread */
+	struct task_struct *poll_thread;
+	/* Wait Queue on which the poll thread blocks */
+	wait_queue_head_t poll_thread_wq;
+
+	/* Recent interrupt status */
+	uint32_t		interruptStatus;
+
+	struct mutex work_mutex;
+
+	/* Debug */
+	unsigned int enableDebug;
+	uint8_t interrupt_received;
+	int32_t default_offset_calibration;
+	unsigned int default_xtalk_Compensation;
+};
+
+/*
+ *  function pointer structs
+ */
+struct stmvl53l0x_module_fn_t {
+	int (*init)(void);
+	void (*deinit)(void *);
+	int (*power_up)(void *, unsigned int *);
+	int (*power_down)(void *);
+};
+
+
+
+int stmvl53l0x_setup(struct vl_data *data);
+
+#endif /* STMVL_H */
diff --git a/drivers/input/misc/vl53l0x/stmvl53l0x_module-cci.c b/drivers/input/misc/vl53l0x/stmvl53l0x_module-cci.c
new file mode 100644
index 0000000..39b802f
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/stmvl53l0x_module-cci.c
@@ -0,0 +1,411 @@
+/*
+ *  stmvl53l0x_module-cci.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+/*
+ * power specific includes
+ */
+#include <linux/pwm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/clk.h>
+#include <linux/of_gpio.h>
+/*
+ * API includes
+ */
+#include "vl53l0x_api.h"
+#include "vl53l0x_def.h"
+#include "vl53l0x_platform.h"
+#include "stmvl53l0x-cci.h"
+#include "stmvl53l0x-i2c.h"
+#include "stmvl53l0x.h"
+
+#ifdef CAMERA_CCI
+/*
+ * Global data
+ */
+static struct v4l2_file_operations msm_tof_v4l2_subdev_fops;
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
+	.i2c_read = msm_camera_cci_i2c_read,
+	.i2c_read_seq = msm_camera_cci_i2c_read_seq,
+	.i2c_write = msm_camera_cci_i2c_write,
+	.i2c_write_seq = msm_camera_cci_i2c_write_seq,
+	.i2c_write_table = msm_camera_cci_i2c_write_table,
+	.i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+	.i2c_write_table_w_microdelay =
+		msm_camera_cci_i2c_write_table_w_microdelay,
+	.i2c_util = msm_sensor_cci_i2c_util,
+	.i2c_poll =  msm_camera_cci_i2c_poll,
+};
+static int stmvl53l0x_get_dt_data(struct device *dev, struct cci_data *data);
+
+/*
+ * QCOM specific functions
+ */
+static int stmvl53l0x_get_dt_data(struct device *dev, struct cci_data *data)
+{
+	int rc = 0;
+
+	dbg("Enter\n");
+
+	if (dev->of_node) {
+		struct device_node *of_node = dev->of_node;
+		struct msm_tof_vreg *vreg_cfg;
+
+		if (!of_node) {
+			err("failed %d\n", __LINE__);
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(of_node,
+				"cell-index", &data->pdev->id);
+		if (rc < 0) {
+			err("failed %d\n", __LINE__);
+			return rc;
+		}
+		dbg("cell-index: %d\n", data->pdev->id);
+		rc = of_property_read_u32(of_node, "qcom,cci-master",
+				&data->cci_master);
+		if (rc < 0) {
+			err("failed %d\n", __LINE__);
+			/* Set default master 0 */
+			data->cci_master = MASTER_0;
+			rc = 0;
+		}
+		dbg("cci_master: %d\n", data->cci_master);
+		if (of_find_property(of_node, "qcom,cam-vreg-name", NULL)) {
+			vreg_cfg = &data->vreg_cfg;
+			rc = msm_camera_get_dt_vreg_data(of_node,
+				&vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
+			if (rc < 0) {
+				err("failed %d\n", __LINE__);
+				return rc;
+			}
+		}
+		dbg("vreg-name: %s min_volt: %d max_volt: %d",
+			vreg_cfg->cam_vreg->reg_name,
+			vreg_cfg->cam_vreg->min_voltage,
+			vreg_cfg->cam_vreg->max_voltage);
+	}
+	dbg("End rc =%d\n", rc);
+
+	return rc;
+}
+
+static int32_t stmvl53l0x_vreg_control(struct cci_data *data, int config)
+{
+	int rc = 0, i, cnt;
+	struct msm_tof_vreg *vreg_cfg;
+
+	dbg("Enter\n");
+
+	vreg_cfg = &data->vreg_cfg;
+	cnt = vreg_cfg->num_vreg;
+	dbg("num_vreg: %d\n", cnt);
+	if (!cnt) {
+		err("failed %d\n", __LINE__);
+		return 0;
+	}
+
+	if (cnt >= MSM_TOF_MAX_VREGS) {
+		err("failed %d cnt %d\n", __LINE__, cnt);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < cnt; i++) {
+		rc = msm_camera_config_single_vreg(&(data->pdev->dev),
+				&vreg_cfg->cam_vreg[i],
+				(struct regulator **)&vreg_cfg->data[i],
+				config);
+	}
+
+	dbg("EXIT rc =%d\n", rc);
+	return rc;
+}
+
+
+static int msm_tof_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	return 0;
+}
+
+
+static const struct v4l2_subdev_internal_ops msm_tof_internal_ops = {
+	.close = msm_tof_close,
+};
+
+static long msm_tof_subdev_ioctl(struct v4l2_subdev *sd,
+			unsigned int cmd, void *arg)
+{
+	dbg("Subdev_ioctl not handled\n");
+	return 0;
+}
+
+static int32_t msm_tof_power(struct v4l2_subdev *sd, int on)
+{
+	dbg("TOF power called\n");
+	return 0;
+}
+
+static struct v4l2_subdev_core_ops msm_tof_subdev_core_ops = {
+	.ioctl = msm_tof_subdev_ioctl,
+	.s_power = msm_tof_power,
+};
+
+static struct v4l2_subdev_ops msm_tof_subdev_ops = {
+	.core = &msm_tof_subdev_core_ops,
+};
+
+static int stmvl53l0x_cci_init(struct cci_data *data)
+{
+	int rc = 0;
+	struct msm_camera_cci_client *cci_client = data->client->cci_client;
+
+	if (data->subdev_initialized == FALSE) {
+		data->client->i2c_func_tbl = &msm_sensor_cci_func_tbl;
+		data->client->cci_client =
+			kzalloc(sizeof(struct msm_camera_cci_client),
+			GFP_KERNEL);
+		if (!data->client->cci_client) {
+			err("%d, failed no memory\n", __LINE__);
+			return -ENOMEM;
+		}
+		cci_client = data->client->cci_client;
+		cci_client->cci_subdev = msm_cci_get_subdev();
+		cci_client->cci_i2c_master = data->cci_master;
+		v4l2_subdev_init(&data->msm_sd.sd, data->subdev_ops);
+		v4l2_set_subdevdata(&data->msm_sd.sd, data);
+		data->msm_sd.sd.internal_ops = &msm_tof_internal_ops;
+		data->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+		snprintf(data->msm_sd.sd.name, ARRAY_SIZE(data->msm_sd.sd.name),
+			"msm_tof");
+		media_entity_init(&data->msm_sd.sd.entity, 0, NULL, 0);
+		data->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+		data->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_TOF;
+		data->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2;
+		msm_sd_register(&data->msm_sd);
+		msm_tof_v4l2_subdev_fops = v4l2_subdev_fops;
+		data->msm_sd.sd.devnode->fops = &msm_tof_v4l2_subdev_fops;
+		data->subdev_initialized = TRUE;
+	}
+
+	cci_client->sid = 0x29;
+	cci_client->retries = 3;
+	cci_client->id_map = 0;
+	cci_client->cci_i2c_master = data->cci_master;
+	rc = data->client->i2c_func_tbl->i2c_util(data->client, MSM_CCI_INIT);
+	if (rc < 0) {
+		err("%d: CCI Init failed\n", __LINE__);
+		return rc;
+	}
+	dbg("CCI Init Succeeded\n");
+
+	data->client->addr_type = MSM_CAMERA_I2C_BYTE_ADDR;
+
+	return 0;
+}
+
+static int32_t stmvl53l0x_platform_probe(struct platform_device *pdev)
+{
+	struct vl_data *vl53l0x_data = NULL;
+	struct cci_data *cci_object = NULL;
+	int32_t rc = 0;
+
+	dbg("Enter\n");
+
+	if (!pdev->dev.of_node) {
+		err("of_node NULL\n");
+		return -EINVAL;
+	}
+
+	vl53l0x_data = kzalloc(sizeof(struct vl_data), GFP_KERNEL);
+	if (!vl53l0x_data) {
+		rc = -ENOMEM;
+		return rc;
+	}
+	if (vl53l0x_data) {
+		vl53l0x_data->client_object =
+			kzalloc(sizeof(struct cci_data), GFP_KERNEL);
+		cci_object = (struct cci_data *)vl53l0x_data->client_object;
+	}
+	cci_object->client =
+		(struct msm_camera_i2c_client *)&cci_object->g_client;
+
+	/* setup bus type */
+	vl53l0x_data->bus_type = CCI_BUS;
+
+	/* Set platform device handle */
+	cci_object->subdev_ops = &msm_tof_subdev_ops;
+	cci_object->pdev = pdev;
+	rc = stmvl53l0x_get_dt_data(&pdev->dev, cci_object);
+	if (rc < 0) {
+		err("%d, failed rc %d\n", __LINE__, rc);
+		return rc;
+	}
+	cci_object->subdev_id = pdev->id;
+
+	/* Set device type as platform device */
+	cci_object->device_type = MSM_CAMERA_PLATFORM_DEVICE;
+	cci_object->subdev_initialized = FALSE;
+
+	/* setup device name */
+	vl53l0x_data->dev_name = dev_name(&pdev->dev);
+
+	/* setup device data */
+	dev_set_drvdata(&pdev->dev, vl53l0x_data);
+
+	/* setup other stuff */
+	rc = stmvl53l0x_setup(vl53l0x_data);
+
+	/* init default value */
+	cci_object->power_up = 0;
+
+	dbg("End\n");
+
+	return rc;
+
+}
+
+static int32_t stmvl53l0x_platform_remove(struct platform_device *pdev)
+{
+	struct vl_data *vl53l0x_data = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	kfree(vl53l0x_data->client_object);
+	kfree(vl53l0x_data);
+
+	return 0;
+}
+
+static const struct of_device_id st_stmvl53l0x_dt_match[] = {
+	{ .compatible = "st,stmvl53l0", },
+	{ },
+};
+
+static struct platform_driver stmvl53l0x_platform_driver = {
+	.probe = stmvl53l0x_platform_probe,
+	.remove = stmvl53l0x_platform_remove,
+	.driver = {
+		.name = STMVL_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = st_stmvl53l0x_dt_match,
+	},
+};
+
+int stmvl53l0x_power_up_cci(void *cci_object, unsigned int *preset_flag)
+{
+	int ret = 0;
+	struct cci_data *data = (struct cci_data *)cci_object;
+
+	dbg("Enter");
+
+	/* need to init cci first */
+	ret = stmvl53l0x_cci_init(data);
+	if (ret) {
+		err("stmvl53l0x_cci_init failed %d\n", __LINE__);
+		return ret;
+	}
+	/* actual power up */
+	if (data && data->device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+		ret = stmvl53l0x_vreg_control(data, 1);
+		if (ret < 0) {
+			err("stmvl53l0x_vreg_control failed %d\n",
+				__LINE__);
+			return ret;
+		}
+	}
+	data->power_up = 1;
+	*preset_flag = 1;
+	dbg("End\n");
+
+	return ret;
+}
+
+int stmvl53l0x_power_down_cci(void *cci_object)
+{
+	int ret = 0;
+	struct cci_data *data = (struct cci_data *)cci_object;
+
+	dbg("Enter\n");
+	if (data->power_up) {
+		/* need to release cci first */
+		ret = data->client->i2c_func_tbl->i2c_util(data->client,
+				MSM_CCI_RELEASE);
+		if (ret < 0)
+			err("CCI Release failed rc %d\n", ret);
+
+		/* actual power down */
+		if (data->device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+			ret = stmvl53l0x_vreg_control(data, 0);
+			if (ret < 0) {
+				err(
+					"stmvl53l0x_vreg_control failed %d\n",
+					__LINE__);
+				return ret;
+			}
+		}
+	}
+	data->power_up = 0;
+	dbg("End\n");
+	return ret;
+}
+
+int stmvl53l0x_init_cci(void)
+{
+	int ret = 0;
+
+	dbg("Enter\n");
+
+	/* register as a platform device */
+	ret = platform_driver_register(&stmvl53l0x_platform_driver);
+	if (ret)
+		err("%d, error ret:%d\n", __LINE__, ret);
+
+	dbg("End\n");
+
+	return ret;
+}
+
+void stmvl53l0x_exit_cci(void *cci_object)
+{
+	struct cci_data *data = (struct cci_data *)cci_object;
+
+	dbg("Enter\n");
+
+	if (data && data->client->cci_client)
+		kfree(data->client->cci_client);
+
+	dbg("End\n");
+}
+#endif /* end of CAMERA_CCI */
diff --git a/drivers/input/misc/vl53l0x/stmvl53l0x_module-i2c.c b/drivers/input/misc/vl53l0x/stmvl53l0x_module-i2c.c
new file mode 100644
index 0000000..7e0cc15
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/stmvl53l0x_module-i2c.c
@@ -0,0 +1,246 @@
+/*
+ *  stmvl53l0x_module-i2c.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+/*
+ * power specific includes
+ */
+#include <linux/pwm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/clk.h>
+#include <linux/of_gpio.h>
+/*
+ * API includes
+ */
+#include "vl53l0x_api.h"
+#include "vl53l0x_def.h"
+#include "vl53l0x_platform.h"
+#include "stmvl53l0x-i2c.h"
+#include "stmvl53l0x-cci.h"
+#include "stmvl53l0x.h"
+#ifndef CAMERA_CCI
+
+/*
+ * Global data
+ */
+static int stmvl53l0x_parse_vdd(struct device *dev, struct i2c_data *data);
+
+/*
+ * QCOM specific functions
+ */
+static int stmvl53l0x_parse_vdd(struct device *dev, struct i2c_data *data)
+{
+	int ret = 0;
+
+	dbg("Enter\n");
+
+	if (dev->of_node) {
+		data->vana = regulator_get(dev, "vdd");
+		if (IS_ERR(data->vana)) {
+			err("vdd supply is not provided\n");
+			ret = -1;
+		}
+	}
+	dbg("End\n");
+
+	return ret;
+}
+
+static int stmvl53l0x_probe(struct i2c_client *client,
+				   const struct i2c_device_id *id)
+{
+	int rc = 0;
+	struct vl_data *vl53l0x_data = NULL;
+	struct i2c_data *i2c_object = NULL;
+
+	dbg("Enter\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
+		rc = -EIO;
+		return rc;
+	}
+
+	vl53l0x_data = kzalloc(sizeof(struct vl_data), GFP_KERNEL);
+	if (!vl53l0x_data) {
+		rc = -ENOMEM;
+		return rc;
+	}
+	if (vl53l0x_data) {
+		vl53l0x_data->client_object =
+			kzalloc(sizeof(struct i2c_data), GFP_KERNEL);
+		i2c_object = (struct i2c_data *)vl53l0x_data->client_object;
+	}
+	i2c_object->client = client;
+
+	/* setup bus type */
+	vl53l0x_data->bus_type = I2C_BUS;
+
+	/* setup regulator */
+	stmvl53l0x_parse_vdd(&i2c_object->client->dev, i2c_object);
+
+	/* setup device name */
+	vl53l0x_data->dev_name = dev_name(&client->dev);
+
+	/* setup device data */
+	dev_set_drvdata(&client->dev, vl53l0x_data);
+
+	/* setup client data */
+	i2c_set_clientdata(client, vl53l0x_data);
+
+	/* setup other stuff */
+	rc = stmvl53l0x_setup(vl53l0x_data);
+
+	/* init default value */
+	i2c_object->power_up = 0;
+
+	dbg("End\n");
+	return rc;
+}
+
+static int stmvl53l0x_remove(struct i2c_client *client)
+{
+	struct vl_data *data = i2c_get_clientdata(client);
+
+	dbg("Enter\n");
+
+	/* Power down the device */
+	stmvl53l0x_power_down_i2c(data->client_object);
+	kfree(data->client_object);
+	kfree(data);
+	dbg("End\n");
+	return 0;
+}
+
+static const struct i2c_device_id stmvl53l0x_id[] = {
+	{ STMVL_DRV_NAME, 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, stmvl53l0x_id);
+
+static const struct of_device_id st_stmvl53l0x_dt_match[] = {
+	{ .compatible = "st,stmvl53l0", },
+	{ },
+};
+
+static struct i2c_driver stmvl53l0x_driver = {
+	.driver = {
+		.name	= STMVL_DRV_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = st_stmvl53l0x_dt_match,
+	},
+	.probe	= stmvl53l0x_probe,
+	.remove	= stmvl53l0x_remove,
+	.id_table = stmvl53l0x_id,
+
+};
+
+int stmvl53l0x_power_up_i2c(void *i2c_object, unsigned int *preset_flag)
+{
+	int ret = 0;
+#ifndef STM_TEST
+	struct i2c_data *data = (struct i2c_data *)i2c_object;
+#endif
+
+	dbg("Enter\n");
+
+	/* actual power on */
+#ifndef STM_TEST
+	ret = regulator_set_voltage(data->vana,
+		VL_VDD_MIN, VL_VDD_MAX);
+	if (ret < 0) {
+		err("set_vol(%p) fail %d\n", data->vana, ret);
+		return ret;
+	}
+	ret = regulator_enable(data->vana);
+	usleep_range(3000, 3001);
+	if (ret < 0) {
+		err("reg enable(%p) failed.rc=%d\n",
+			data->vana, ret);
+		return ret;
+	}
+	data->power_up = 1;
+	*preset_flag = 1;
+#endif
+
+	dbg("End\n");
+	return ret;
+}
+
+int stmvl53l0x_power_down_i2c(void *i2c_object)
+{
+	int ret = 0;
+#ifndef STM_TEST
+	struct i2c_data *data = (struct i2c_data *)i2c_object;
+#endif
+
+	dbg("Enter\n");
+#ifndef STM_TEST
+	msleep(30);
+	ret = regulator_disable(data->vana);
+	if (ret < 0)
+		err("reg disable(%p) failed.rc=%d\n",
+			data->vana, ret);
+
+	data->power_up = 0;
+#endif
+
+	dbg("End\n");
+	return ret;
+}
+
+int stmvl53l0x_init_i2c(void)
+{
+	int ret = 0;
+
+	dbg("Enter\n");
+
+	/* register as a i2c client device */
+	ret = i2c_add_driver(&stmvl53l0x_driver);
+	if (ret)
+		err("%d erro ret:%d\n", __LINE__, ret);
+
+	dbg("End with rc:%d\n", ret);
+
+	return ret;
+}
+
+void stmvl53l0x_exit_i2c(void *i2c_object)
+{
+	dbg("Enter\n");
+	i2c_del_driver(&stmvl53l0x_driver);
+
+	dbg("End\n");
+}
+
+#endif /* end of NOT CAMERA_CCI */
diff --git a/drivers/input/misc/vl53l0x/stmvl53l0x_module.c b/drivers/input/misc/vl53l0x/stmvl53l0x_module.c
new file mode 100644
index 0000000..99cbc11
--- /dev/null
+++ b/drivers/input/misc/vl53l0x/stmvl53l0x_module.c
@@ -0,0 +1,2023 @@
+/*
+ *  stmvl53l0x_module.c - Linux kernel modules for
+ *  STM VL53L0 FlightSense TOF sensor
+ *
+ *  Copyright (C) 2016 STMicroelectronics Imaging Division.
+ *  Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/kobject.h>
+#include <linux/kthread.h>
+/*
+ * API includes
+ */
+#include "vl53l0x_api.h"
+
+#define IRQ_NUM	   59
+#ifdef DEBUG_TIME_LOG
+struct timeval start_tv, stop_tv;
+#endif
+
+/*
+ * Global data
+ */
+
+#ifdef CAMERA_CCI
+static struct stmvl53l0x_module_fn_t stmvl53l0x_module_func_tbl = {
+	.init = stmvl53l0x_init_cci,
+	.deinit = stmvl53l0x_exit_cci,
+	.power_up = stmvl53l0x_power_up_cci,
+	.power_down = stmvl53l0x_power_down_cci,
+};
+#else
+static struct stmvl53l0x_module_fn_t stmvl53l0x_module_func_tbl = {
+	.init = stmvl53l0x_init_i2c,
+	.deinit = stmvl53l0x_exit_i2c,
+	.power_up = stmvl53l0x_power_up_i2c,
+	.power_down = stmvl53l0x_power_down_i2c,
+};
+#endif
+struct stmvl53l0x_module_fn_t *pmodule_func_tbl;
+struct stmvl53l0x_api_fn_t {
+	int8_t (*GetVersion)(struct VL_Version_t *pVersion);
+	int8_t (*GetPalSpecVersion)(struct VL_Version_t *pPalSpecVersion);
+
+	int8_t (*GetProductRevision)(struct vl_data *Dev,
+					uint8_t *pProductRevisionMajor,
+					uint8_t *pProductRevisionMinor);
+	int8_t (*GetDeviceInfo)(struct vl_data *Dev,
+				struct VL_DeviceInfo_t *pVL_DeviceInfo);
+	int8_t (*GetDeviceErrorStatus)(struct vl_data *Dev,
+				uint8_t *pDeviceErrorStatus);
+	int8_t (*GetRangeStatusString)(uint8_t RangeStatus,
+				char *pRangeStatusString);
+	int8_t (*GetDeviceErrorString)(uint8_t ErrorCode,
+				char *pDeviceErrorString);
+	int8_t (*GetPalErrorString)(int8_t PalErrorCode,
+				char *pPalErrorString);
+	int8_t (*GetPalStateString)(uint8_t PalStateCode,
+				char *pPalStateString);
+	int8_t (*GetPalState)(struct vl_data *Dev, uint8_t *pPalState);
+	int8_t (*SetPowerMode)(struct vl_data *Dev,
+				uint8_t PowerMode);
+	int8_t (*GetPowerMode)(struct vl_data *Dev,
+				uint8_t *pPowerMode);
+	int8_t (*SetOffsetCalibrationDataMicroMeter)(struct vl_data *Dev,
+				int32_t OffsetCalibrationDataMicroMeter);
+	int8_t (*GetOffsetCalibrationDataMicroMeter)(struct vl_data *Dev,
+				int32_t *pOffsetCalibrationDataMicroMeter);
+	int8_t (*SetLinearityCorrectiveGain)(struct vl_data *Dev,
+				int16_t LinearityCorrectiveGain);
+	int8_t (*GetLinearityCorrectiveGain)(struct vl_data *Dev,
+				uint16_t *pLinearityCorrectiveGain);
+	int8_t (*SetGroupParamHold)(struct vl_data *Dev,
+				uint8_t GroupParamHold);
+	int8_t (*GetUpperLimitMilliMeter)(struct vl_data *Dev,
+				uint16_t *pUpperLimitMilliMeter);
+	int8_t (*SetDeviceAddress)(struct vl_data *Dev,
+				uint8_t DeviceAddress);
+	int8_t (*DataInit)(struct vl_data *Dev);
+	int8_t (*SetTuningSettingBuffer)(struct vl_data *Dev,
+				uint8_t *pTuningSettingBuffer,
+				uint8_t UseInternalTuningSettings);
+	int8_t (*GetTuningSettingBuffer)(struct vl_data *Dev,
+				uint8_t **pTuningSettingBuffer,
+				uint8_t *pUseInternalTuningSettings);
+	int8_t (*StaticInit)(struct vl_data *Dev);
+	int8_t (*WaitDeviceBooted)(struct vl_data *Dev);
+	int8_t (*ResetDevice)(struct vl_data *Dev);
+	int8_t (*SetDeviceParameters)(struct vl_data *Dev,
+			const struct VL_DeviceParameters_t *pDeviceParameters);
+	int8_t (*GetDeviceParameters)(struct vl_data *Dev,
+			struct VL_DeviceParameters_t *pDeviceParameters);
+	int8_t (*SetDeviceMode)(struct vl_data *Dev,
+			uint8_t DeviceMode);
+	int8_t (*GetDeviceMode)(struct vl_data *Dev,
+			uint8_t *pDeviceMode);
+	int8_t (*SetHistogramMode)(struct vl_data *Dev,
+			uint8_t HistogramMode);
+	int8_t (*GetHistogramMode)(struct vl_data *Dev,
+			uint8_t *pHistogramMode);
+	int8_t (*SetMeasurementTimingBudgetMicroSeconds)(struct vl_data *Dev,
+			uint32_t  MeasurementTimingBudgetMicroSeconds);
+	int8_t (*GetMeasurementTimingBudgetMicroSeconds)(
+			struct vl_data *Dev,
+			uint32_t *pMeasurementTimingBudgetMicroSeconds);
+	int8_t (*GetVcselPulsePeriod)(struct vl_data *Dev,
+			uint8_t VcselPeriodType,
+			uint8_t	*pVCSELPulsePeriod);
+	int8_t (*SetVcselPulsePeriod)(struct vl_data *Dev,
+			uint8_t VcselPeriodType,
+			uint8_t VCSELPulsePeriod);
+	int8_t (*SetSequenceStepEnable)(struct vl_data *Dev,
+			uint8_t SequenceStepId,
+			uint8_t SequenceStepEnabled);
+	int8_t (*GetSequenceStepEnable)(struct vl_data *Dev,
+			uint8_t SequenceStepId,
+			uint8_t *pSequenceStepEnabled);
+	int8_t (*GetSequenceStepEnables)(struct vl_data *Dev,
+		struct VL_SchedulerSequenceSteps_t *pSchedulerSequenceSteps);
+	int8_t (*SetSequenceStepTimeout)(struct vl_data *Dev,
+			uint8_t SequenceStepId,
+			unsigned int TimeOutMilliSecs);
+	int8_t (*GetSequenceStepTimeout)(struct vl_data *Dev,
+			uint8_t SequenceStepId,
+			unsigned int *pTimeOutMilliSecs);
+	int8_t (*GetNumberOfSequenceSteps)(struct vl_data *Dev,
+			uint8_t *pNumberOfSequenceSteps);
+	int8_t (*GetSequenceStepsInfo)(
+			uint8_t SequenceStepId,
+			char *pSequenceStepsString);
+	int8_t (*SetInterMeasurementPeriodMilliSeconds)(
+			struct vl_data *Dev,
+			uint32_t InterMeasurementPeriodMilliSeconds);
+	int8_t (*GetInterMeasurementPeriodMilliSeconds)(
+			struct vl_data *Dev,
+			uint32_t *pInterMeasurementPeriodMilliSeconds);
+	int8_t (*SetXTalkCompensationEnable)(struct vl_data *Dev,
+			uint8_t XTalkCompensationEnable);
+	int8_t (*GetXTalkCompensationEnable)(struct vl_data *Dev,
+			uint8_t *pXTalkCompensationEnable);
+	int8_t (*SetXTalkCompensationRateMegaCps)(
+			struct vl_data *Dev,
+			unsigned int XTalkCompensationRateMegaCps);
+	int8_t (*GetXTalkCompensationRateMegaCps)(
+			struct vl_data *Dev,
+			unsigned int *pXTalkCompensationRateMegaCps);
+	int8_t (*GetNumberOfLimitCheck)(
+			uint16_t *pNumberOfLimitCheck);
+	int8_t (*GetLimitCheckInfo)(struct vl_data *Dev,
+			uint16_t LimitCheckId, char *pLimitCheckString);
+	int8_t (*SetLimitCheckEnable)(struct vl_data *Dev,
+			uint16_t LimitCheckId,
+			uint8_t LimitCheckEnable);
+	int8_t (*GetLimitCheckEnable)(struct vl_data *Dev,
+			uint16_t LimitCheckId, uint8_t *pLimitCheckEnable);
+	int8_t (*SetLimitCheckValue)(struct vl_data *Dev,
+			uint16_t LimitCheckId,
+			unsigned int LimitCheckValue);
+	int8_t (*GetLimitCheckValue)(struct vl_data *Dev,
+			uint16_t LimitCheckId,
+			unsigned int *pLimitCheckValue);
+	int8_t (*GetLimitCheckCurrent)(struct vl_data *Dev,
+		uint16_t LimitCheckId, unsigned int *pLimitCheckCurrent);
+	int8_t (*SetWrapAroundCheckEnable)(struct vl_data *Dev,
+			uint8_t WrapAroundCheckEnable);
+	int8_t (*GetWrapAroundCheckEnable)(struct vl_data *Dev,
+			uint8_t *pWrapAroundCheckEnable);
+	int8_t (*PerformSingleMeasurement)(struct vl_data *Dev);
+	int8_t (*PerformRefCalibration)(struct vl_data *Dev,
+			uint8_t *pVhvSettings, uint8_t *pPhaseCal);
+	int8_t (*SetRefCalibration)(struct vl_data *Dev,
+			uint8_t VhvSettings, uint8_t PhaseCal);
+	int8_t (*GetRefCalibration)(struct vl_data *Dev,
+			uint8_t *pVhvSettings, uint8_t *pPhaseCal);
+	int8_t (*PerformXTalkCalibration)(struct vl_data *Dev,
+			unsigned int XTalkCalDistance,
+			unsigned int *pXTalkCompensationRateMegaCps);
+	int8_t (*PerformOffsetCalibration)(struct vl_data *Dev,
+			unsigned int CalDistanceMilliMeter,
+			int32_t *pOffsetMicroMeter);
+	int8_t (*StartMeasurement)(struct vl_data *Dev);
+	int8_t (*StopMeasurement)(struct vl_data *Dev);
+	int8_t (*GetMeasurementDataReady)(struct vl_data *Dev,
+			uint8_t *pMeasurementDataReady);
+	int8_t (*WaitDeviceReadyForNewMeasurement)(struct vl_data *Dev,
+			uint32_t MaxLoop);
+	int8_t (*GetRangingMeasurementData)(struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData);
+	int8_t (*GetHistogramMeasurementData)(struct vl_data *Dev,
+	struct VL_HistogramMeasurementData_t *pHistogramMeasurementData);
+	int8_t (*PerformSingleRangingMeasurement)(struct vl_data *Dev,
+	struct VL_RangingMeasurementData_t *pRangingMeasurementData);
+	int8_t (*PerformSingleHistogramMeasurement)(struct vl_data *Dev,
+	struct VL_HistogramMeasurementData_t *pHistogramMeasurementData);
+	int8_t (*SetNumberOfROIZones)(struct vl_data *Dev,
+			uint8_t NumberOfROIZones);
+	int8_t (*GetNumberOfROIZones)(struct vl_data *Dev,
+			uint8_t *pNumberOfROIZones);
+	int8_t (*GetMaxNumberOfROIZones)(struct vl_data *Dev,
+			uint8_t *pMaxNumberOfROIZones);
+	int8_t (*SetGpioConfig)(struct vl_data *Dev,
+			uint8_t Pin,
+			uint8_t DeviceMode,
+			uint8_t Functionality,
+			uint8_t Polarity);
+	int8_t (*GetGpioConfig)(struct vl_data *Dev,
+			uint8_t Pin,
+			uint8_t *pDeviceMode,
+			uint8_t *pFunctionality,
+			uint8_t *pPolarity);
+	int8_t (*SetInterruptThresholds)(struct vl_data *Dev,
+			uint8_t DeviceMode,
+			unsigned int ThresholdLow,
+			unsigned int ThresholdHigh);
+	int8_t (*GetInterruptThresholds)(struct vl_data *Dev,
+			uint8_t DeviceMode,
+			unsigned int *pThresholdLow,
+			unsigned int *pThresholdHigh);
+	int8_t (*ClearInterruptMask)(struct vl_data *Dev,
+			uint32_t InterruptMask);
+	int8_t (*GetInterruptMaskStatus)(struct vl_data *Dev,
+			uint32_t *pInterruptMaskStatus);
+	int8_t (*EnableInterruptMask)(struct vl_data *Dev,
+		uint32_t InterruptMask);
+	int8_t (*SetSpadAmbientDamperThreshold)(struct vl_data *Dev,
+			uint16_t SpadAmbientDamperThreshold);
+	int8_t (*GetSpadAmbientDamperThreshold)(struct vl_data *Dev,
+			uint16_t *pSpadAmbientDamperThreshold);
+	int8_t (*SetSpadAmbientDamperFactor)(struct vl_data *Dev,
+			uint16_t SpadAmbientDamperFactor);
+	int8_t (*GetSpadAmbientDamperFactor)(struct vl_data *Dev,
+			uint16_t *pSpadAmbientDamperFactor);
+	int8_t (*PerformRefSpadManagement)(struct vl_data *Dev,
+			uint32_t *refSpadCount, uint8_t *isApertureSpads);
+	int8_t (*SetReferenceSpads)(struct vl_data *Dev,
+			uint32_t count, uint8_t isApertureSpads);
+	int8_t (*GetReferenceSpads)(struct vl_data *Dev,
+			uint32_t *pSpadCount, uint8_t *pIsApertureSpads);
+};
+
+static struct stmvl53l0x_api_fn_t stmvl53l0x_api_func_tbl = {
+	.GetVersion = VL_GetVersion,
+	.GetPalSpecVersion = VL_GetPalSpecVersion,
+	.GetProductRevision = VL_GetProductRevision,
+	.GetDeviceInfo = VL_GetDeviceInfo,
+	.GetDeviceErrorStatus = VL_GetDeviceErrorStatus,
+	.GetRangeStatusString = VL_GetRangeStatusString,
+	.GetDeviceErrorString = VL_GetDeviceErrorString,
+	.GetPalErrorString = VL_GetPalErrorString,
+	.GetPalState = VL_GetPalState,
+	.SetPowerMode = VL_SetPowerMode,
+	.GetPowerMode = VL_GetPowerMode,
+	.SetOffsetCalibrationDataMicroMeter =
+		VL_SetOffsetCalibrationDataMicroMeter,
+	.SetLinearityCorrectiveGain =
+		VL_SetLinearityCorrectiveGain,
+	.GetLinearityCorrectiveGain =
+		VL_GetLinearityCorrectiveGain,
+	.GetOffsetCalibrationDataMicroMeter =
+		VL_GetOffsetCalibrationDataMicroMeter,
+	.SetGroupParamHold = VL_SetGroupParamHold,
+	.GetUpperLimitMilliMeter = VL_GetUpperLimitMilliMeter,
+	.SetDeviceAddress = VL_SetDeviceAddress,
+	.DataInit = VL_DataInit,
+	.SetTuningSettingBuffer = VL_SetTuningSettingBuffer,
+	.GetTuningSettingBuffer = VL_GetTuningSettingBuffer,
+	.StaticInit = VL_StaticInit,
+	.WaitDeviceBooted = VL_WaitDeviceBooted,
+	.ResetDevice = VL_ResetDevice,
+	.SetDeviceParameters = VL_SetDeviceParameters,
+	.SetDeviceMode = VL_SetDeviceMode,
+	.GetDeviceMode = VL_GetDeviceMode,
+	.SetHistogramMode = VL_SetHistogramMode,
+	.GetHistogramMode = VL_GetHistogramMode,
+	.SetMeasurementTimingBudgetMicroSeconds =
+		VL_SetMeasurementTimingBudgetMicroSeconds,
+	.GetMeasurementTimingBudgetMicroSeconds =
+		VL_GetMeasurementTimingBudgetMicroSeconds,
+	.GetVcselPulsePeriod = VL_GetVcselPulsePeriod,
+	.SetVcselPulsePeriod = VL_SetVcselPulsePeriod,
+	.SetSequenceStepEnable = VL_SetSequenceStepEnable,
+	.GetSequenceStepEnable = VL_GetSequenceStepEnable,
+	.GetSequenceStepEnables = VL_GetSequenceStepEnables,
+	.SetSequenceStepTimeout = VL_SetSequenceStepTimeout,
+	.GetSequenceStepTimeout = VL_GetSequenceStepTimeout,
+	.GetNumberOfSequenceSteps = VL_GetNumberOfSequenceSteps,
+	.GetSequenceStepsInfo = VL_GetSequenceStepsInfo,
+	.SetInterMeasurementPeriodMilliSeconds =
+		VL_SetInterMeasurementPeriodMilliSeconds,
+	.GetInterMeasurementPeriodMilliSeconds =
+		VL_GetInterMeasurementPeriodMilliSeconds,
+	.SetXTalkCompensationEnable = VL_SetXTalkCompensationEnable,
+	.GetXTalkCompensationEnable = VL_GetXTalkCompensationEnable,
+	.SetXTalkCompensationRateMegaCps =
+		VL_SetXTalkCompensationRateMegaCps,
+	.GetXTalkCompensationRateMegaCps =
+		VL_GetXTalkCompensationRateMegaCps,
+	.GetNumberOfLimitCheck = VL_GetNumberOfLimitCheck,
+	.GetLimitCheckInfo = VL_GetLimitCheckInfo,
+	.SetLimitCheckEnable = VL_SetLimitCheckEnable,
+	.GetLimitCheckEnable = VL_GetLimitCheckEnable,
+	.SetLimitCheckValue = VL_SetLimitCheckValue,
+	.GetLimitCheckValue = VL_GetLimitCheckValue,
+	.GetLimitCheckCurrent = VL_GetLimitCheckCurrent,
+	.SetWrapAroundCheckEnable = VL_SetWrapAroundCheckEnable,
+	.GetWrapAroundCheckEnable = VL_GetWrapAroundCheckEnable,
+	.PerformSingleMeasurement = VL_PerformSingleMeasurement,
+	.PerformRefCalibration = VL_PerformRefCalibration,
+	.SetRefCalibration = VL_SetRefCalibration,
+	.GetRefCalibration = VL_GetRefCalibration,
+	.PerformXTalkCalibration = VL_PerformXTalkCalibration,
+	.PerformOffsetCalibration = VL_PerformOffsetCalibration,
+	.StartMeasurement = VL_StartMeasurement,
+	.StopMeasurement = VL_StopMeasurement,
+	.GetMeasurementDataReady = VL_GetMeasurementDataReady,
+	.WaitDeviceReadyForNewMeasurement =
+		VL_WaitDeviceReadyForNewMeasurement,
+	.GetRangingMeasurementData = VL_GetRangingMeasurementData,
+	.GetHistogramMeasurementData = VL_GetHistogramMeasurementData,
+	.PerformSingleRangingMeasurement =
+		VL_PerformSingleRangingMeasurement,
+	.PerformSingleHistogramMeasurement =
+		VL_PerformSingleHistogramMeasurement,
+	.SetNumberOfROIZones = VL_SetNumberOfROIZones,
+	.GetNumberOfROIZones = VL_GetNumberOfROIZones,
+	.GetMaxNumberOfROIZones = VL_GetMaxNumberOfROIZones,
+	.SetGpioConfig = VL_SetGpioConfig,
+	.GetGpioConfig = VL_GetGpioConfig,
+	.SetInterruptThresholds = VL_SetInterruptThresholds,
+	.GetInterruptThresholds = VL_GetInterruptThresholds,
+	.ClearInterruptMask = VL_ClearInterruptMask,
+	.GetInterruptMaskStatus = VL_GetInterruptMaskStatus,
+	.EnableInterruptMask = VL_EnableInterruptMask,
+	.SetSpadAmbientDamperThreshold = VL_SetSpadAmbientDamperThreshold,
+	.GetSpadAmbientDamperThreshold = VL_GetSpadAmbientDamperThreshold,
+	.SetSpadAmbientDamperFactor = VL_SetSpadAmbientDamperFactor,
+	.GetSpadAmbientDamperFactor = VL_GetSpadAmbientDamperFactor,
+	.PerformRefSpadManagement = VL_PerformRefSpadManagement,
+	.SetReferenceSpads = VL_SetReferenceSpads,
+	.GetReferenceSpads = VL_GetReferenceSpads,
+
+};
+struct stmvl53l0x_api_fn_t *papi_func_tbl;
+
+/*
+ * IOCTL definitions
+ */
+#define VL_IOCTL_INIT			_IO('p', 0x01)
+#define VL_IOCTL_XTALKCALB		_IOW('p', 0x02, unsigned int)
+#define VL_IOCTL_OFFCALB		_IOW('p', 0x03, unsigned int)
+#define VL_IOCTL_STOP			_IO('p', 0x05)
+#define VL_IOCTL_SETXTALK		_IOW('p', 0x06, unsigned int)
+#define VL_IOCTL_SETOFFSET		_IOW('p', 0x07, int8_t)
+#define VL_IOCTL_GETDATAS \
+			_IOR('p', 0x0b, struct VL_RangingMeasurementData_t)
+#define VL_IOCTL_REGISTER \
+			_IOWR('p', 0x0c, struct stmvl53l0x_register)
+#define VL_IOCTL_PARAMETER \
+			_IOWR('p', 0x0d, struct stmvl53l0x_parameter)
+
+static long stmvl53l0x_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg);
+/*static int stmvl53l0x_flush(struct file *file, fl_owner_t id);*/
+static int stmvl53l0x_open(struct inode *inode, struct file *file);
+static int stmvl53l0x_init_client(struct vl_data *data);
+static int stmvl53l0x_start(struct vl_data *data, uint8_t scaling,
+			enum init_mode_e mode);
+static int stmvl53l0x_stop(struct vl_data *data);
+
+#ifdef DEBUG_TIME_LOG
+static void stmvl53l0x_DebugTimeGet(struct timeval *ptv)
+{
+	do_gettimeofday(ptv);
+}
+
+static void stmvl53l0x_DebugTimeDuration(struct timeval *pstart_tv,
+			struct timeval *pstop_tv)
+{
+	long total_sec, total_msec;
+
+	total_sec = pstop_tv->tv_sec - pstart_tv->tv_sec;
+	total_msec = (pstop_tv->tv_usec - pstart_tv->tv_usec)/1000;
+	total_msec += total_sec * 1000;
+	dbg("elapsedTime:%ld\n", total_msec);
+}
+#endif
+
+static void stmvl53l0x_setupAPIFunctions(void)
+{
+
+	/*cut 1.1*/
+	err("to setup API cut 1.1\n");
+	papi_func_tbl->GetVersion = VL_GetVersion;
+	papi_func_tbl->GetPalSpecVersion = VL_GetPalSpecVersion;
+	papi_func_tbl->GetProductRevision = VL_GetProductRevision;
+	papi_func_tbl->GetDeviceInfo = VL_GetDeviceInfo;
+	papi_func_tbl->GetDeviceErrorStatus = VL_GetDeviceErrorStatus;
+	papi_func_tbl->GetRangeStatusString = VL_GetRangeStatusString;
+	papi_func_tbl->GetDeviceErrorString = VL_GetDeviceErrorString;
+	papi_func_tbl->GetPalErrorString = VL_GetPalErrorString;
+	papi_func_tbl->GetPalState = VL_GetPalState;
+	papi_func_tbl->SetPowerMode = VL_SetPowerMode;
+	papi_func_tbl->GetPowerMode = VL_GetPowerMode;
+	papi_func_tbl->SetOffsetCalibrationDataMicroMeter =
+		VL_SetOffsetCalibrationDataMicroMeter;
+	papi_func_tbl->GetOffsetCalibrationDataMicroMeter =
+		VL_GetOffsetCalibrationDataMicroMeter;
+	papi_func_tbl->SetLinearityCorrectiveGain =
+		VL_SetLinearityCorrectiveGain;
+	papi_func_tbl->GetLinearityCorrectiveGain =
+		VL_GetLinearityCorrectiveGain;
+	papi_func_tbl->SetGroupParamHold = VL_SetGroupParamHold;
+	papi_func_tbl->GetUpperLimitMilliMeter =
+		VL_GetUpperLimitMilliMeter;
+	papi_func_tbl->SetDeviceAddress = VL_SetDeviceAddress;
+	papi_func_tbl->DataInit = VL_DataInit;
+	papi_func_tbl->SetTuningSettingBuffer = VL_SetTuningSettingBuffer;
+	papi_func_tbl->GetTuningSettingBuffer = VL_GetTuningSettingBuffer;
+	papi_func_tbl->StaticInit = VL_StaticInit;
+	papi_func_tbl->WaitDeviceBooted = VL_WaitDeviceBooted;
+	papi_func_tbl->ResetDevice = VL_ResetDevice;
+	papi_func_tbl->SetDeviceParameters = VL_SetDeviceParameters;
+	papi_func_tbl->SetDeviceMode = VL_SetDeviceMode;
+	papi_func_tbl->GetDeviceMode = VL_GetDeviceMode;
+	papi_func_tbl->SetHistogramMode = VL_SetHistogramMode;
+	papi_func_tbl->GetHistogramMode = VL_GetHistogramMode;
+	papi_func_tbl->SetMeasurementTimingBudgetMicroSeconds =
+		VL_SetMeasurementTimingBudgetMicroSeconds;
+	papi_func_tbl->GetMeasurementTimingBudgetMicroSeconds =
+		VL_GetMeasurementTimingBudgetMicroSeconds;
+	papi_func_tbl->GetVcselPulsePeriod = VL_GetVcselPulsePeriod;
+	papi_func_tbl->SetVcselPulsePeriod = VL_SetVcselPulsePeriod;
+	papi_func_tbl->SetSequenceStepEnable = VL_SetSequenceStepEnable;
+	papi_func_tbl->GetSequenceStepEnable = VL_GetSequenceStepEnable;
+	papi_func_tbl->GetSequenceStepEnables = VL_GetSequenceStepEnables;
+	papi_func_tbl->SetSequenceStepTimeout = VL_SetSequenceStepTimeout;
+	papi_func_tbl->GetSequenceStepTimeout = VL_GetSequenceStepTimeout;
+	papi_func_tbl->GetNumberOfSequenceSteps =
+		VL_GetNumberOfSequenceSteps;
+	papi_func_tbl->GetSequenceStepsInfo = VL_GetSequenceStepsInfo;
+	papi_func_tbl->SetInterMeasurementPeriodMilliSeconds =
+		VL_SetInterMeasurementPeriodMilliSeconds;
+	papi_func_tbl->GetInterMeasurementPeriodMilliSeconds =
+		VL_GetInterMeasurementPeriodMilliSeconds;
+	papi_func_tbl->SetXTalkCompensationEnable =
+		VL_SetXTalkCompensationEnable;
+	papi_func_tbl->GetXTalkCompensationEnable =
+		VL_GetXTalkCompensationEnable;
+	papi_func_tbl->SetXTalkCompensationRateMegaCps =
+		VL_SetXTalkCompensationRateMegaCps;
+	papi_func_tbl->GetXTalkCompensationRateMegaCps =
+		VL_GetXTalkCompensationRateMegaCps;
+	papi_func_tbl->GetNumberOfLimitCheck = VL_GetNumberOfLimitCheck;
+	papi_func_tbl->GetLimitCheckInfo = VL_GetLimitCheckInfo;
+	papi_func_tbl->SetLimitCheckEnable = VL_SetLimitCheckEnable;
+	papi_func_tbl->GetLimitCheckEnable = VL_GetLimitCheckEnable;
+	papi_func_tbl->SetLimitCheckValue = VL_SetLimitCheckValue;
+	papi_func_tbl->GetLimitCheckValue = VL_GetLimitCheckValue;
+	papi_func_tbl->GetLimitCheckCurrent = VL_GetLimitCheckCurrent;
+	papi_func_tbl->SetWrapAroundCheckEnable =
+		VL_SetWrapAroundCheckEnable;
+	papi_func_tbl->GetWrapAroundCheckEnable =
+		VL_GetWrapAroundCheckEnable;
+	papi_func_tbl->PerformSingleMeasurement =
+		VL_PerformSingleMeasurement;
+	papi_func_tbl->PerformRefCalibration = VL_PerformRefCalibration;
+	papi_func_tbl->SetRefCalibration = VL_SetRefCalibration;
+	papi_func_tbl->GetRefCalibration = VL_GetRefCalibration;
+	papi_func_tbl->PerformXTalkCalibration =
+		VL_PerformXTalkCalibration;
+	papi_func_tbl->PerformOffsetCalibration =
+		VL_PerformOffsetCalibration;
+	papi_func_tbl->StartMeasurement = VL_StartMeasurement;
+	papi_func_tbl->StopMeasurement = VL_StopMeasurement;
+	papi_func_tbl->GetMeasurementDataReady =
+		VL_GetMeasurementDataReady;
+	papi_func_tbl->WaitDeviceReadyForNewMeasurement =
+		VL_WaitDeviceReadyForNewMeasurement;
+	papi_func_tbl->GetRangingMeasurementData =
+		VL_GetRangingMeasurementData;
+	papi_func_tbl->GetHistogramMeasurementData =
+		VL_GetHistogramMeasurementData;
+	papi_func_tbl->PerformSingleRangingMeasurement =
+		VL_PerformSingleRangingMeasurement;
+	papi_func_tbl->PerformSingleHistogramMeasurement =
+		VL_PerformSingleHistogramMeasurement;
+	papi_func_tbl->SetNumberOfROIZones = VL_SetNumberOfROIZones;
+	papi_func_tbl->GetNumberOfROIZones = VL_GetNumberOfROIZones;
+	papi_func_tbl->GetMaxNumberOfROIZones = VL_GetMaxNumberOfROIZones;
+	papi_func_tbl->SetGpioConfig = VL_SetGpioConfig;
+	papi_func_tbl->GetGpioConfig = VL_GetGpioConfig;
+	papi_func_tbl->SetInterruptThresholds = VL_SetInterruptThresholds;
+	papi_func_tbl->GetInterruptThresholds = VL_GetInterruptThresholds;
+	papi_func_tbl->ClearInterruptMask = VL_ClearInterruptMask;
+	papi_func_tbl->GetInterruptMaskStatus = VL_GetInterruptMaskStatus;
+	papi_func_tbl->EnableInterruptMask = VL_EnableInterruptMask;
+	papi_func_tbl->SetSpadAmbientDamperThreshold =
+		VL_SetSpadAmbientDamperThreshold;
+	papi_func_tbl->GetSpadAmbientDamperThreshold =
+		VL_GetSpadAmbientDamperThreshold;
+	papi_func_tbl->SetSpadAmbientDamperFactor =
+		VL_SetSpadAmbientDamperFactor;
+	papi_func_tbl->GetSpadAmbientDamperFactor =
+		VL_GetSpadAmbientDamperFactor;
+	papi_func_tbl->PerformRefSpadManagement =
+		VL_PerformRefSpadManagement;
+	papi_func_tbl->SetReferenceSpads = VL_SetReferenceSpads;
+	papi_func_tbl->GetReferenceSpads = VL_GetReferenceSpads;
+
+}
+
+static void stmvl53l0x_ps_read_measurement(struct vl_data *data)
+{
+	struct timeval tv;
+	struct vl_data *vl53l0x_dev = data;
+	int8_t Status = VL_ERROR_NONE;
+	unsigned int LimitCheckCurrent;
+
+	do_gettimeofday(&tv);
+
+	data->ps_data = data->rangeData.RangeMilliMeter;
+	input_report_abs(data->input_dev_ps, ABS_DISTANCE,
+		(int)(data->ps_data + 5) / 10);
+	input_report_abs(data->input_dev_ps, ABS_HAT0X, tv.tv_sec);
+	input_report_abs(data->input_dev_ps, ABS_HAT0Y, tv.tv_usec);
+	input_report_abs(data->input_dev_ps, ABS_HAT1X,
+		data->rangeData.RangeMilliMeter);
+	input_report_abs(data->input_dev_ps, ABS_HAT1Y,
+		data->rangeData.RangeStatus);
+	input_report_abs(data->input_dev_ps, ABS_HAT2X,
+		data->rangeData.SignalRateRtnMegaCps);
+	input_report_abs(data->input_dev_ps, ABS_HAT2Y,
+		data->rangeData.AmbientRateRtnMegaCps);
+	input_report_abs(data->input_dev_ps, ABS_HAT3X,
+		data->rangeData.MeasurementTimeUsec);
+	input_report_abs(data->input_dev_ps, ABS_HAT3Y,
+		data->rangeData.RangeDMaxMilliMeter);
+	Status = papi_func_tbl->GetLimitCheckCurrent(vl53l0x_dev,
+		VL_CHECKENABLE_SIGMA_FINAL_RANGE,
+		&LimitCheckCurrent);
+	if (Status == VL_ERROR_NONE) {
+		input_report_abs(data->input_dev_ps, ABS_WHEEL,
+				LimitCheckCurrent);
+	}
+	input_report_abs(data->input_dev_ps, ABS_PRESSURE,
+		data->rangeData.EffectiveSpadRtnCount);
+	input_sync(data->input_dev_ps);
+
+	if (data->enableDebug)
+		err("range:%d, RtnRateMcps:%d,err:0x%x\n",
+		data->rangeData.RangeMilliMeter,
+			data->rangeData.SignalRateRtnMegaCps,
+			data->rangeData.RangeStatus);
+		err("Dmax:%d,rtnambr:%d,time:%d,Spad:%d,SigmaLimit:%d\n",
+			data->rangeData.RangeDMaxMilliMeter,
+			data->rangeData.AmbientRateRtnMegaCps,
+			data->rangeData.MeasurementTimeUsec,
+			data->rangeData.EffectiveSpadRtnCount,
+			LimitCheckCurrent);
+
+
+}
+
+static void stmvl53l0x_cancel_handler(struct vl_data *data)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&data->update_lock.wait_lock, flags);
+	/*
+	 * If work is already scheduled then subsequent schedules will not
+	 * change the scheduled time that's why we have to cancel it first.
+	 */
+	ret = cancel_delayed_work(&data->dwork);
+	if (ret == 0)
+		err("cancel_delayed_work return FALSE\n");
+
+	spin_unlock_irqrestore(&data->update_lock.wait_lock, flags);
+
+}
+
+void stmvl53l0x_schedule_handler(struct vl_data *data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&data->update_lock.wait_lock, flags);
+	/*
+	 * If work is already scheduled then subsequent schedules will not
+	 * change the scheduled time that's why we have to cancel it first.
+	 */
+	cancel_delayed_work(&data->dwork);
+	schedule_delayed_work(&data->dwork, 0);
+	spin_unlock_irqrestore(&data->update_lock.wait_lock, flags);
+
+}
+
+/* Flag used to exit the thread when kthread_stop() is invoked */
+static int poll_thread_exit;
+int stmvl53l0x_poll_thread(void *data)
+{
+	struct vl_data *vl53l0x_dev = data;
+	int8_t Status = VL_ERROR_NONE;
+	uint32_t sleep_time = 0;
+	uint32_t interruptStatus = 0;
+
+	dbg("Starting Polling thread\n");
+
+	while (!kthread_should_stop()) {
+		/* Check if enable_ps_sensor is true or */
+		/* exit request is made. If not block */
+		wait_event(vl53l0x_dev->poll_thread_wq,
+			(vl53l0x_dev->enable_ps_sensor || poll_thread_exit));
+		if (poll_thread_exit) {
+			dbg("Exiting the poll thread\n");
+			break;
+		}
+
+		mutex_lock(&vl53l0x_dev->work_mutex);
+
+		sleep_time = vl53l0x_dev->delay_ms;
+		Status = VL_GetInterruptMaskStatus(vl53l0x_dev,
+			&interruptStatus);
+		if (Status == VL_ERROR_NONE &&
+			interruptStatus &&
+			interruptStatus != vl53l0x_dev->interruptStatus) {
+			vl53l0x_dev->interruptStatus = interruptStatus;
+			vl53l0x_dev->noInterruptCount = 0;
+			stmvl53l0x_schedule_handler(vl53l0x_dev);
+
+		} else {
+			vl53l0x_dev->noInterruptCount++;
+		}
+
+	/* Force Clear interrupt mask and restart if no interrupt */
+	/* after twice the timingBudget */
+	if ((vl53l0x_dev->noInterruptCount * vl53l0x_dev->delay_ms) >
+	(vl53l0x_dev->timingBudget * 2)) {
+		dbg("No interrupt after (%u) msec(TimingBudget = %u)\n",
+		(vl53l0x_dev->noInterruptCount * vl53l0x_dev->delay_ms),
+			vl53l0x_dev->timingBudget);
+		Status = papi_func_tbl->ClearInterruptMask(vl53l0x_dev, 0);
+		if (vl53l0x_dev->deviceMode ==
+			VL_DEVICEMODE_SINGLE_RANGING) {
+			Status = papi_func_tbl->StartMeasurement(vl53l0x_dev);
+			if (Status != VL_ERROR_NONE)
+				dbg("Status = %d\n", Status);
+			}
+		}
+
+		mutex_unlock(&vl53l0x_dev->work_mutex);
+
+		msleep(sleep_time);
+	}
+
+	return 0;
+}
+
+/* work handler */
+static void stmvl53l0x_work_handler(struct work_struct *work)
+{
+	struct vl_data *data = container_of(work,
+	struct vl_data,	dwork.work);
+
+	struct vl_data *vl53l0x_dev = data;
+
+	int8_t Status = VL_ERROR_NONE;
+
+	mutex_lock(&data->work_mutex);
+	/* dbg("Enter\n"); */
+
+
+	if (vl53l0x_dev->enable_ps_sensor == 1) {
+#ifdef DEBUG_TIME_LOG
+		stmvl53l0x_DebugTimeGet(&stop_tv);
+		stmvl53l0x_DebugTimeDuration(&start_tv, &stop_tv);
+#endif
+		/* ISR has scheduled this function */
+		if (vl53l0x_dev->interrupt_received == 1) {
+			Status = papi_func_tbl->GetInterruptMaskStatus(
+			vl53l0x_dev, &vl53l0x_dev->interruptStatus);
+			if (Status != VL_ERROR_NONE) {
+				dbg("%s(%d) : Status = %d\n",
+				__func__, __LINE__, Status);
+			}
+			vl53l0x_dev->interrupt_received = 0;
+		}
+		if (data->enableDebug)
+			dbg("interruptStatus:0x%x, interrupt_received:%d\n",
+			vl53l0x_dev->interruptStatus,
+				vl53l0x_dev->interrupt_received);
+
+		if (vl53l0x_dev->interruptStatus ==
+			vl53l0x_dev->gpio_function) {
+			Status = papi_func_tbl->ClearInterruptMask(vl53l0x_dev,
+				0);
+			if (Status != VL_ERROR_NONE) {
+				dbg("%s(%d) : Status = %d\n",
+					__func__, __LINE__, Status);
+			} else {
+				Status =
+				papi_func_tbl->GetRangingMeasurementData(
+					vl53l0x_dev, &(data->rangeData));
+				/* to push the measurement */
+				if (Status == VL_ERROR_NONE)
+					stmvl53l0x_ps_read_measurement(data);
+				else
+					dbg("%s(%d) : Status = %d\n",
+						__func__, __LINE__, Status);
+
+				dbg("Measured range:%d\n",
+				data->rangeData.RangeMilliMeter);
+
+				if (data->enableDebug)
+					dbg("Measured range:%d\n",
+					data->rangeData.RangeMilliMeter);
+
+				if (data->deviceMode ==
+					VL_DEVICEMODE_SINGLE_RANGING)
+					Status =
+					papi_func_tbl->StartMeasurement(
+					vl53l0x_dev);
+			}
+		}
+#ifdef DEBUG_TIME_LOG
+		stmvl53l0x_DebugTimeGet(&start_tv);
+#endif
+
+		}
+
+
+	vl53l0x_dev->interruptStatus = 0;
+
+	mutex_unlock(&data->work_mutex);
+
+}
+
+
+/*
+ * SysFS support
+ */
+static ssize_t stmvl53l0x_show_enable_ps_sensor(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+
+	return snprintf(buf, 5, "%d\n", data->enable_ps_sensor);
+}
+
+static ssize_t stmvl53l0x_store_enable_ps_sensor(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int ret;
+	unsigned int val;
+
+	ret = kstrtoint(buf, 10, &val);
+	if ((val != 0) && (val != 1)) {
+		err("store unvalid value=%d\n", val);
+		return count;
+	}
+	mutex_lock(&data->work_mutex);
+	dbg("Enter, enable_ps_sensor flag:%d\n",
+		data->enable_ps_sensor);
+	dbg("enable ps senosr ( %d)\n", val);
+
+	if (val == 1) {
+		/* turn on tof sensor */
+		if (data->enable_ps_sensor == 0) {
+			/* to start */
+			stmvl53l0x_start(data, 3, NORMAL_MODE);
+		} else {
+			err("Already enabled. Skip !");
+		}
+	} else {
+		/* turn off tof sensor */
+		if (data->enable_ps_sensor == 1) {
+			data->enable_ps_sensor = 0;
+			/* to stop */
+			stmvl53l0x_stop(data);
+		}
+	}
+	dbg("End\n");
+	mutex_unlock(&data->work_mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR(enable_ps_sensor, 0664/*S_IWUGO | S_IRUGO*/,
+				   stmvl53l0x_show_enable_ps_sensor,
+					stmvl53l0x_store_enable_ps_sensor);
+
+static ssize_t stmvl53l0x_show_enable_debug(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+
+	return snprintf(buf, 5, "%d\n", data->enableDebug);
+}
+
+/* for debug */
+static ssize_t stmvl53l0x_store_enable_debug(struct device *dev,
+					struct device_attribute *attr, const
+					char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int on;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &on);
+	if ((on != 0) &&  (on != 1)) {
+		err("set debug=%d\n", on);
+		return count;
+	}
+	data->enableDebug = on;
+
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(enable_debug, 0660/*S_IWUSR | S_IRUGO*/,
+				   stmvl53l0x_show_enable_debug,
+					stmvl53l0x_store_enable_debug);
+
+static ssize_t stmvl53l0x_show_set_delay_ms(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+
+	return snprintf(buf, 5, "%d\n", data->delay_ms);
+}
+
+/* for work handler scheduler time */
+static ssize_t stmvl53l0x_store_set_delay_ms(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int delay_ms;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &delay_ms);
+	if (delay_ms == 0) {
+		err("set delay_ms=%d\n", delay_ms);
+		return count;
+	}
+	mutex_lock(&data->work_mutex);
+	data->delay_ms = delay_ms;
+	mutex_unlock(&data->work_mutex);
+
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(set_delay_ms, 0660/*S_IWUGO | S_IRUGO*/,
+				   stmvl53l0x_show_set_delay_ms,
+					stmvl53l0x_store_set_delay_ms);
+
+/* Timing Budget */
+static ssize_t stmvl53l0x_show_timing_budget(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+
+	return snprintf(buf, 10, "%d\n", data->timingBudget);
+}
+
+static ssize_t stmvl53l0x_store_set_timing_budget(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int timingBudget;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &timingBudget);
+	if (timingBudget == 0) {
+		err("set timingBudget=%d\n", timingBudget);
+		return count;
+	}
+	mutex_lock(&data->work_mutex);
+	data->timingBudget = timingBudget;
+	mutex_unlock(&data->work_mutex);
+
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(set_timing_budget, 0660/*S_IWUGO | S_IRUGO*/,
+				   stmvl53l0x_show_timing_budget,
+					stmvl53l0x_store_set_timing_budget);
+
+
+/* Long Range  */
+static ssize_t stmvl53l0x_show_long_range(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+
+	return snprintf(buf, 5, "%d\n", data->useLongRange);
+}
+
+static ssize_t stmvl53l0x_store_set_long_range(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int useLongRange;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &useLongRange);
+	if ((useLongRange != 0) &&  (useLongRange != 1)) {
+		err("set useLongRange=%d\n", useLongRange);
+		return count;
+	}
+
+	mutex_lock(&data->work_mutex);
+	data->useLongRange = useLongRange;
+	if (useLongRange)
+		data->timingBudget = 26000;
+	else
+		data->timingBudget = 200000;
+
+	mutex_unlock(&data->work_mutex);
+
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(set_long_range, 0660/*S_IWUGO | S_IRUGO*/,
+				   stmvl53l0x_show_long_range,
+					stmvl53l0x_store_set_long_range);
+
+static ssize_t stmvl53l0x_show_meter(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	struct VL_RangingMeasurementData_t Measure;
+
+	papi_func_tbl->PerformSingleRangingMeasurement(data, &Measure);
+	return snprintf(buf, 4, "%d\n", Measure.RangeMilliMeter);
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(show_meter, 0660/*S_IWUGO | S_IRUGO*/,
+				   stmvl53l0x_show_meter,
+				   NULL);
+
+static ssize_t stmvl53l0x_show_xtalk(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return 0;
+}
+
+static ssize_t stmvl53l0x_set_xtalk(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	unsigned int targetDistance;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &targetDistance);
+	data->xtalkCalDistance = targetDistance;
+	stmvl53l0x_start(data, 3, XTALKCALIB_MODE);
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(xtalk_cal, 0660/*S_IWUGO | S_IRUGO*/,
+				   stmvl53l0x_show_xtalk,
+				   stmvl53l0x_set_xtalk);
+
+static ssize_t stmvl53l0x_show_offset(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	struct VL_RangingMeasurementData_t Measure;
+
+	papi_func_tbl->PerformSingleRangingMeasurement(data, &Measure);
+	return snprintf(buf, 4, "%d\n", Measure.RangeMilliMeter);
+}
+
+static ssize_t stmvl53l0x_set_offset(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	unsigned int targetDistance;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &targetDistance);
+	data->offsetCalDistance = targetDistance;
+	stmvl53l0x_start(data, 3, OFFSETCALIB_MODE);
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(offset_cal, 0660/*S_IWUGO | S_IRUGO*/,
+				   stmvl53l0x_show_offset,
+				   stmvl53l0x_set_offset);
+
+static ssize_t stmvl53l0x_set_spad(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+
+	stmvl53l0x_start(data, 3, SPADCALIB_MODE);
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(spad_cal, 0660/*S_IWUGO | S_IRUGO*/,
+					NULL,
+					stmvl53l0x_set_spad);
+
+static ssize_t stmvl53l0x_set_ref(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+
+	stmvl53l0x_start(data, 3, REFCALIB_MODE);
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(ref_cal, 0660/*S_IWUGO | S_IRUGO*/,
+					NULL,
+					stmvl53l0x_set_ref);
+
+
+static ssize_t stmvl53l0x_show_OffsetCalibrationData(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int32_t offset_calibration_data;
+
+	papi_func_tbl->GetOffsetCalibrationDataMicroMeter(data,
+					&offset_calibration_data);
+	dbg("GetOffsetCalibrationDataMicroMeter = %d\n",
+					offset_calibration_data);
+	return snprintf(buf, 2, "%d\n",
+			offset_calibration_data);
+}
+
+static ssize_t stmvl53l0x_set_OffsetCalibrationData(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int32_t offset_calibration_data;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &offset_calibration_data);
+	papi_func_tbl->SetOffsetCalibrationDataMicroMeter(data,
+			offset_calibration_data);
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(set_offsetdata, 0660/*S_IWUGO | S_IRUGO*/,
+					stmvl53l0x_show_OffsetCalibrationData,
+					stmvl53l0x_set_OffsetCalibrationData);
+
+static ssize_t stmvl53l0x_show_XTalkCompensationRateMegaCps(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int32_t xtalk_compensation_rate;
+
+	papi_func_tbl->GetXTalkCompensationRateMegaCps(data,
+					&xtalk_compensation_rate);
+	dbg("xtalk_compensation_rate = %d\n",
+					xtalk_compensation_rate);
+	return snprintf(buf, 2, "%d\n", xtalk_compensation_rate);
+
+}
+
+static ssize_t stmvl53l0x_set_XTalkCompensationRateMegaCps(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	unsigned int xtalk_compensation_rate;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &xtalk_compensation_rate);
+	papi_func_tbl->SetXTalkCompensationRateMegaCps(data,
+						xtalk_compensation_rate);
+	return count;
+
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(set_xtalkdata, 0660/*S_IWUGO | S_IRUGO*/,
+				stmvl53l0x_show_XTalkCompensationRateMegaCps,
+				stmvl53l0x_set_XTalkCompensationRateMegaCps);
+
+
+
+static struct attribute *stmvl53l0x_attributes[] = {
+	&dev_attr_enable_ps_sensor.attr,
+	&dev_attr_enable_debug.attr,
+	&dev_attr_set_delay_ms.attr,
+	&dev_attr_set_timing_budget.attr,
+	&dev_attr_set_long_range.attr,
+	&dev_attr_show_meter.attr,
+	&dev_attr_xtalk_cal.attr,
+	&dev_attr_offset_cal.attr,
+	&dev_attr_spad_cal.attr,
+	&dev_attr_ref_cal.attr,
+	&dev_attr_set_offsetdata.attr,
+	&dev_attr_set_xtalkdata.attr,
+	NULL,
+};
+
+
+static const struct attribute_group stmvl53l0x_attr_group = {
+	.attrs = stmvl53l0x_attributes,
+};
+
+/*
+ * misc device file operation functions
+ */
+static int stmvl53l0x_ioctl_handler(struct file *file,
+			unsigned int cmd, unsigned long arg,
+			void __user *p)
+{
+	int rc = 0;
+	unsigned int xtalkint = 0;
+	unsigned int targetDistance = 0;
+	int8_t offsetint = 0;
+	struct vl_data *data =
+			container_of(file->private_data,
+				struct vl_data, miscdev);
+	struct stmvl53l0x_register reg;
+	struct stmvl53l0x_parameter parameter;
+	struct vl_data *vl53l0x_dev = data;
+	uint8_t deviceMode;
+	uint8_t page_num = 0;
+
+	if (!data)
+		return -EINVAL;
+
+	dbg("Enter enable_ps_sensor:%d\n", data->enable_ps_sensor);
+	switch (cmd) {
+	/* enable */
+	case VL_IOCTL_INIT:
+		dbg("VL_IOCTL_INIT\n");
+		/* turn on tof sensor only if it's not enabled by other */
+		/* client */
+		if (data->enable_ps_sensor == 0) {
+			/* to start */
+			stmvl53l0x_start(data, 3, NORMAL_MODE);
+		} else
+			rc = -EINVAL;
+		break;
+	/* crosstalk calibration */
+	case VL_IOCTL_XTALKCALB:
+		dbg("VL_IOCTL_XTALKCALB\n");
+		data->xtalkCalDistance = 100;
+		if (copy_from_user(&targetDistance, (unsigned int *)p,
+			sizeof(unsigned int))) {
+			err("%d, fail\n", __LINE__);
+			return -EFAULT;
+		}
+		data->xtalkCalDistance = targetDistance;
+
+		/* turn on tof sensor only if it's not enabled by other */
+		/* client */
+		if (data->enable_ps_sensor == 0) {
+			/* to start */
+			stmvl53l0x_start(data, 3, XTALKCALIB_MODE);
+		} else
+			rc = -EINVAL;
+		break;
+	/* set up Xtalk value */
+	case VL_IOCTL_SETXTALK:
+		dbg("VL_IOCTL_SETXTALK\n");
+		if (copy_from_user(&xtalkint, (unsigned int *)p,
+			sizeof(unsigned int))) {
+			err("%d, fail\n", __LINE__);
+			return -EFAULT;
+		}
+		dbg("SETXTALK as 0x%x\n", xtalkint);
+/* later
+ *	SetXTalkCompensationRate(vl53l0x_dev, xtalkint);
+ */
+		break;
+	/* offset calibration */
+	case VL_IOCTL_OFFCALB:
+		dbg("VL_IOCTL_OFFCALB\n");
+		data->offsetCalDistance = 50;
+		if (copy_from_user(&targetDistance, (unsigned int *)p,
+			sizeof(unsigned int))) {
+			err("%d, fail\n", __LINE__);
+			return -EFAULT;
+		}
+		data->offsetCalDistance = targetDistance;
+		if (data->enable_ps_sensor == 0) {
+			/* to start */
+			stmvl53l0x_start(data, 3, OFFSETCALIB_MODE);
+		} else
+			rc = -EINVAL;
+		break;
+	/* set up offset value */
+	case VL_IOCTL_SETOFFSET:
+		dbg("VL_IOCTL_SETOFFSET\n");
+		if (copy_from_user(&offsetint, (int8_t *)p, sizeof(int8_t))) {
+			err("%d, fail\n", __LINE__);
+			return -EFAULT;
+		}
+		dbg("SETOFFSET as %d\n", offsetint);
+	/* later SetOffsetCalibrationData(vl53l0x_dev, offsetint); */
+		break;
+	/* disable */
+	case VL_IOCTL_STOP:
+		dbg("VL_IOCTL_STOP\n");
+		/* turn off tof sensor only if it's enabled by other client */
+		if (data->enable_ps_sensor == 1) {
+			data->enable_ps_sensor = 0;
+			/* to stop */
+			stmvl53l0x_stop(data);
+		}
+		break;
+	/* Get all range data */
+	case VL_IOCTL_GETDATAS:
+		dbg("VL_IOCTL_GETDATAS\n");
+		if (copy_to_user((struct VL_RangingMeasurementData_t *)p,
+			&(data->rangeData),
+			sizeof(struct VL_RangingMeasurementData_t))) {
+			err("%d, fail\n", __LINE__);
+			return -EFAULT;
+		}
+		break;
+	/* Register tool */
+	case VL_IOCTL_REGISTER:
+		dbg("VL_IOCTL_REGISTER\n");
+		if (copy_from_user(&reg, (struct stmvl53l0x_register *)p,
+			sizeof(struct stmvl53l0x_register))) {
+			err("%d, fail\n", __LINE__);
+			return -EFAULT;
+		}
+		reg.status = 0;
+		page_num = (uint8_t)((reg.reg_index & 0x0000ff00) >> 8);
+		dbg(
+"VL_IOCTL_REGISTER,	page number:%d\n", page_num);
+		if (page_num != 0)
+			reg.status = VL_WrByte(vl53l0x_dev,
+				0xFF, page_num);
+
+		switch (reg.reg_bytes) {
+		case(4):
+			if (reg.is_read)
+				reg.status = VL_RdDWord(vl53l0x_dev,
+					(uint8_t)reg.reg_index,
+					&reg.reg_data);
+			else
+				reg.status = VL_WrDWord(vl53l0x_dev,
+					(uint8_t)reg.reg_index,
+					reg.reg_data);
+			break;
+		case(2):
+			if (reg.is_read)
+				reg.status = VL_RdWord(vl53l0x_dev,
+					(uint8_t)reg.reg_index,
+					(uint16_t *)&reg.reg_data);
+			else
+				reg.status = VL_WrWord(vl53l0x_dev,
+					(uint8_t)reg.reg_index,
+					(uint16_t)reg.reg_data);
+			break;
+		case(1):
+			if (reg.is_read)
+				reg.status = VL_RdByte(vl53l0x_dev,
+					(uint8_t)reg.reg_index,
+					(uint8_t *)&reg.reg_data);
+			else
+				reg.status = VL_WrByte(vl53l0x_dev,
+					(uint8_t)reg.reg_index,
+					(uint8_t)reg.reg_data);
+			break;
+		default:
+			reg.status = -1;
+
+		}
+		if (page_num != 0)
+			reg.status = VL_WrByte(vl53l0x_dev, 0xFF, 0);
+
+
+		if (copy_to_user((struct stmvl53l0x_register *)p, &reg,
+				sizeof(struct stmvl53l0x_register))) {
+			err("%d, fail\n", __LINE__);
+			return -EFAULT;
+		}
+		break;
+	/* parameter access */
+	case VL_IOCTL_PARAMETER:
+		dbg("VL_IOCTL_PARAMETER\n");
+		if (copy_from_user(&parameter, (struct stmvl53l0x_parameter *)p,
+				sizeof(struct stmvl53l0x_parameter))) {
+			err("%d, fail\n", __LINE__);
+			return -EFAULT;
+		}
+		parameter.status = 0;
+		if (data->enableDebug)
+			dbg("VL_IOCTL_PARAMETER Name = %d\n",
+				parameter.name);
+		switch (parameter.name) {
+		case (OFFSET_PAR):
+			if (parameter.is_read)
+				parameter.status =
+			papi_func_tbl->GetOffsetCalibrationDataMicroMeter(
+						vl53l0x_dev, &parameter.value);
+			else
+				parameter.status =
+			papi_func_tbl->SetOffsetCalibrationDataMicroMeter(
+				vl53l0x_dev, parameter.value);
+			dbg("get parameter value as %d\n",
+				parameter.value);
+			break;
+
+		case (REFERENCESPADS_PAR):
+			if (parameter.is_read) {
+				parameter.status =
+				papi_func_tbl->GetReferenceSpads(vl53l0x_dev,
+					(uint32_t *)&(parameter.value),
+					(uint8_t *)&(parameter.value2));
+				if (data->enableDebug)
+					dbg("Get RefSpad: Count:%u, Type:%u\n",
+				parameter.value, (uint8_t)parameter.value2);
+			} else {
+				if (data->enableDebug)
+					dbg("Set RefSpad: Count:%u, Type:%u\n",
+				parameter.value, (uint8_t)parameter.value2);
+
+				parameter.status =
+					papi_func_tbl->SetReferenceSpads(
+					vl53l0x_dev,
+					(uint32_t)(parameter.value),
+					(uint8_t)(parameter.value2));
+			}
+			break;
+
+		case (REFCALIBRATION_PAR):
+			if (parameter.is_read) {
+				parameter.status =
+				papi_func_tbl->GetRefCalibration(vl53l0x_dev,
+				(uint8_t *)&(parameter.value),
+				(uint8_t *)&(parameter.value2));
+				if (data->enableDebug)
+					dbg("Get Ref: Vhv:%u, PhaseCal:%u\n",
+					(uint8_t)parameter.value,
+					(uint8_t)parameter.value2);
+			} else {
+				if (data->enableDebug)
+					dbg("Set Ref: Vhv:%u, PhaseCal:%u\n",
+					(uint8_t)parameter.value,
+					(uint8_t)parameter.value2);
+				parameter.status =
+					papi_func_tbl->SetRefCalibration(
+					vl53l0x_dev, (uint8_t)(parameter.value),
+					(uint8_t)(parameter.value2));
+			}
+			break;
+		case (XTALKRATE_PAR):
+			if (parameter.is_read)
+				parameter.status =
+				papi_func_tbl->GetXTalkCompensationRateMegaCps(
+						vl53l0x_dev, (unsigned int *)
+						&parameter.value);
+			else
+				parameter.status =
+				papi_func_tbl->SetXTalkCompensationRateMegaCps(
+						vl53l0x_dev,
+						(unsigned int)
+							parameter.value);
+
+			break;
+		case (XTALKENABLE_PAR):
+			if (parameter.is_read)
+				parameter.status =
+				papi_func_tbl->GetXTalkCompensationEnable(
+						vl53l0x_dev,
+						(uint8_t *) &parameter.value);
+			else
+				parameter.status =
+				papi_func_tbl->SetXTalkCompensationEnable(
+						vl53l0x_dev,
+						(uint8_t) parameter.value);
+			break;
+		case (GPIOFUNC_PAR):
+			if (parameter.is_read) {
+				parameter.status =
+				papi_func_tbl->GetGpioConfig(vl53l0x_dev, 0,
+						&deviceMode,
+						&data->gpio_function,
+						&data->gpio_polarity);
+				parameter.value = data->gpio_function;
+			} else {
+				data->gpio_function = parameter.value;
+				parameter.status =
+				papi_func_tbl->SetGpioConfig(vl53l0x_dev, 0, 0,
+						data->gpio_function,
+						data->gpio_polarity);
+			}
+			break;
+		case (LOWTHRESH_PAR):
+			if (parameter.is_read) {
+				parameter.status =
+				papi_func_tbl->GetInterruptThresholds(
+				vl53l0x_dev, 0, &(data->low_threshold),
+				&(data->high_threshold));
+				parameter.value = data->low_threshold >> 16;
+			} else {
+				data->low_threshold = parameter.value << 16;
+				parameter.status =
+				papi_func_tbl->SetInterruptThresholds(
+				vl53l0x_dev, 0, data->low_threshold,
+				data->high_threshold);
+			}
+			break;
+		case (HIGHTHRESH_PAR):
+			if (parameter.is_read) {
+				parameter.status =
+				papi_func_tbl->GetInterruptThresholds(
+				vl53l0x_dev, 0, &(data->low_threshold),
+				&(data->high_threshold));
+				parameter.value = data->high_threshold >> 16;
+			} else {
+				data->high_threshold = parameter.value << 16;
+				parameter.status =
+				papi_func_tbl->SetInterruptThresholds(
+				vl53l0x_dev, 0, data->low_threshold,
+				data->high_threshold);
+			}
+			break;
+		case (DEVICEMODE_PAR):
+			if (parameter.is_read) {
+				parameter.status =
+					papi_func_tbl->GetDeviceMode(
+					vl53l0x_dev,
+					(uint8_t *)&
+					(parameter.value));
+			} else {
+				parameter.status =
+					papi_func_tbl->SetDeviceMode(
+					vl53l0x_dev,
+					(uint8_t)(parameter.value));
+					data->deviceMode =
+					(uint8_t)(parameter.value);
+			}
+			break;
+
+
+
+		case (INTERMEASUREMENT_PAR):
+			if (parameter.is_read) {
+				parameter.status =
+			papi_func_tbl->GetInterMeasurementPeriodMilliSeconds(
+				vl53l0x_dev, (uint32_t *)&(parameter.value));
+			} else {
+				parameter.status =
+			papi_func_tbl->SetInterMeasurementPeriodMilliSeconds(
+				vl53l0x_dev, (uint32_t)(parameter.value));
+				data->interMeasurems = parameter.value;
+			}
+			break;
+
+		}
+
+		if (copy_to_user((struct stmvl53l0x_parameter *)p, &parameter,
+				sizeof(struct stmvl53l0x_parameter))) {
+			err("%d, fail\n", __LINE__);
+			return -EFAULT;
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int stmvl53l0x_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static int stmvl53l0x_flush(struct file *file, fl_owner_t id)
+{
+	struct vl_data *data = container_of(file->private_data,
+					struct vl_data, miscdev);
+	(void) file;
+	(void) id;
+
+	if (data) {
+		if (data->enable_ps_sensor == 1) {
+			/* turn off tof sensor if it's enabled */
+			data->enable_ps_sensor = 0;
+			/* to stop */
+			stmvl53l0x_stop(data);
+		}
+	}
+	return 0;
+}
+
+static long stmvl53l0x_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	long ret;
+	struct vl_data *data =
+			container_of(file->private_data,
+					struct vl_data, miscdev);
+	mutex_lock(&data->work_mutex);
+	ret = stmvl53l0x_ioctl_handler(file, cmd, arg, (void __user *)arg);
+	mutex_unlock(&data->work_mutex);
+
+	return ret;
+}
+
+/*
+ * Initialization function
+ */
+static int stmvl53l0x_init_client(struct vl_data *data)
+{
+
+	int8_t Status = VL_ERROR_NONE;
+	struct VL_DeviceInfo_t DeviceInfo;
+	struct vl_data *vl53l0x_dev = data;
+
+	uint32_t refSpadCount;
+	uint8_t isApertureSpads;
+	uint8_t VhvSettings;
+	uint8_t PhaseCal;
+
+	dbg("Enter\n");
+
+	vl53l0x_dev->I2cDevAddr      = 0x52;
+	vl53l0x_dev->comms_type      =  1;
+	vl53l0x_dev->comms_speed_khz =  400;
+
+	/* Setup API functions based on revision */
+	stmvl53l0x_setupAPIFunctions();
+
+	if (Status == VL_ERROR_NONE && data->reset) {
+		dbg("Call of VL_DataInit\n");
+		/* Data initialization */
+		Status = papi_func_tbl->DataInit(vl53l0x_dev);
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		dbg("VL_GetDeviceInfo:\n");
+		Status = papi_func_tbl->GetDeviceInfo(vl53l0x_dev, &DeviceInfo);
+		if (Status == VL_ERROR_NONE) {
+			dbg("Device Name : %s\n", DeviceInfo.Name);
+			dbg("Device Type : %s\n", DeviceInfo.Type);
+			dbg("Device ID : %s\n", DeviceInfo.ProductId);
+			dbg("Product type: %d\n", DeviceInfo.ProductType);
+			dbg("ProductRevisionMajor : %d\n",
+				DeviceInfo.ProductRevisionMajor);
+			dbg("ProductRevisionMinor : %d\n",
+				DeviceInfo.ProductRevisionMinor);
+		}
+	}
+
+	if (Status == VL_ERROR_NONE) {
+		dbg("Call of VL_StaticInit\n");
+		Status = papi_func_tbl->StaticInit(vl53l0x_dev);
+		/* Device Initialization */
+	}
+
+	if (Status == VL_ERROR_NONE && data->reset) {
+		if (papi_func_tbl->PerformRefCalibration != NULL) {
+			dbg("Call of VL_PerformRefCalibration\n");
+			Status = papi_func_tbl->PerformRefCalibration(
+				vl53l0x_dev, &VhvSettings, &PhaseCal);
+				/* Ref calibration */
+		}
+	}
+
+	if (Status == VL_ERROR_NONE && data->reset) {
+		if (papi_func_tbl->PerformRefSpadManagement != NULL) {
+			dbg("Call of VL_PerformRefSpadManagement\n");
+			Status = papi_func_tbl->PerformRefSpadManagement(
+				vl53l0x_dev, &refSpadCount, &isApertureSpads);
+				/* Ref Spad Management */
+		}
+		data->reset = 0;
+		/* needed, even the function is NULL */
+	}
+
+	if (Status == VL_ERROR_NONE) {
+
+		dbg("Call of VL_SetDeviceMode\n");
+		Status = papi_func_tbl->SetDeviceMode(vl53l0x_dev,
+					VL_DEVICEMODE_SINGLE_RANGING);
+		/* Setup in	single ranging mode */
+	}
+	if (Status == VL_ERROR_NONE)
+		Status = papi_func_tbl->SetWrapAroundCheckEnable(
+			vl53l0x_dev, 1);
+
+	dbg("End\n");
+
+	return 0;
+}
+
+static int stmvl53l0x_start(struct vl_data *data, uint8_t scaling,
+	enum init_mode_e mode)
+{
+	int rc = 0;
+	struct vl_data *vl53l0x_dev = data;
+	int8_t Status = VL_ERROR_NONE;
+
+	dbg("Enter\n");
+
+	/* Power up */
+	rc = pmodule_func_tbl->power_up(data->client_object, &data->reset);
+	if (rc) {
+		err("%d,error rc %d\n", __LINE__, rc);
+		return rc;
+	}
+	/* init */
+	rc = stmvl53l0x_init_client(data);
+	if (rc) {
+		err("%d, error rc %d\n", __LINE__, rc);
+		pmodule_func_tbl->power_down(data->client_object);
+		return -EINVAL;
+	}
+
+	/* check mode */
+	if (mode != NORMAL_MODE)
+		papi_func_tbl->SetXTalkCompensationEnable(vl53l0x_dev, 1);
+
+	if (mode == OFFSETCALIB_MODE) {
+		/*VL_SetOffsetCalibrationDataMicroMeter(vl53l0x_dev, 0);*/
+		unsigned int OffsetMicroMeter;
+
+		papi_func_tbl->PerformOffsetCalibration(vl53l0x_dev,
+			(data->offsetCalDistance<<16),
+			&OffsetMicroMeter);
+		dbg("Offset calibration:%u\n", OffsetMicroMeter);
+		return rc;
+	} else if (mode == XTALKCALIB_MODE) {
+		unsigned int xtalk_compensation_rate_mega_cps;
+		/*caltarget distance : 100mm and convert to */
+		/* fixed point 16 16 format */
+		papi_func_tbl->PerformXTalkCalibration(vl53l0x_dev,
+			(data->xtalkCalDistance<<16),
+			&xtalk_compensation_rate_mega_cps);
+		dbg("Xtalk calibration:%u\n", xtalk_compensation_rate_mega_cps);
+		return rc;
+	} else if (mode == SPADCALIB_MODE) {
+		uint32_t ref_spad_count;
+		uint8_t is_aperture_spads;
+
+		papi_func_tbl->PerformRefSpadManagement(
+			vl53l0x_dev,
+			&ref_spad_count,
+			&is_aperture_spads);
+		dbg("SPAD calibration:%d,%d\n", ref_spad_count,
+					(unsigned int)is_aperture_spads);
+		return rc;
+	} else if (mode == REFCALIB_MODE) {
+		uint8_t vhv_settings;
+		uint8_t phase_cal;
+
+		papi_func_tbl->PerformRefCalibration(
+			vl53l0x_dev,
+			&vhv_settings,
+			&phase_cal);
+		dbg("Ref calibration:%u,%u\n",
+		(unsigned int)vhv_settings,
+		(unsigned int)phase_cal);
+		return rc;
+	}
+	/* set up device parameters */
+	data->gpio_polarity = VL_INTERRUPTPOLARITY_LOW;
+
+	/* Following two calls are made from IOCTL as well */
+	papi_func_tbl->SetGpioConfig(vl53l0x_dev, 0, 0,
+		data->gpio_function,
+		VL_INTERRUPTPOLARITY_LOW);
+
+	papi_func_tbl->SetInterruptThresholds(vl53l0x_dev, 0,
+		data->low_threshold, data->high_threshold);
+
+	if (data->deviceMode == VL_DEVICEMODE_CONTINUOUS_TIMED_RANGING) {
+		papi_func_tbl->SetInterMeasurementPeriodMilliSeconds(
+			vl53l0x_dev, data->interMeasurems);
+	}
+
+	dbg("DeviceMode:0x%x, interMeasurems:%d==\n", data->deviceMode,
+			data->interMeasurems);
+	papi_func_tbl->SetDeviceMode(vl53l0x_dev,
+			data->deviceMode);
+	papi_func_tbl->ClearInterruptMask(vl53l0x_dev,
+							0);
+
+	if (vl53l0x_dev->useLongRange) {
+		dbg("Configure Long Ranging\n");
+		Status = papi_func_tbl->SetLimitCheckValue(vl53l0x_dev,
+			VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE,
+			(unsigned int)(65536/10)); /* 0.1 * 65536 */
+		if (Status == VL_ERROR_NONE) {
+			Status = papi_func_tbl->SetLimitCheckValue(vl53l0x_dev,
+				VL_CHECKENABLE_SIGMA_FINAL_RANGE,
+				(unsigned int)(60*65536));
+		} else {
+			dbg("SIGNAL_RATE_FINAL_RANGE failed err = %d\n",
+				Status);
+		}
+
+		if (Status == VL_ERROR_NONE) {
+			dbg("Set Timing Budget = %u\n",
+				vl53l0x_dev->timingBudget);
+			Status =
+			papi_func_tbl->SetMeasurementTimingBudgetMicroSeconds(
+				vl53l0x_dev, vl53l0x_dev->timingBudget);
+		} else {
+			dbg("SetLimitCheckValue failed err =%d\n",
+				Status);
+		}
+
+		if (Status == VL_ERROR_NONE) {
+			Status = papi_func_tbl->SetVcselPulsePeriod(vl53l0x_dev,
+				VL_VCSEL_PERIOD_PRE_RANGE, 18);
+		} else {
+			dbg("SetMeasurementTimingBudget failed err = %d\n",
+				Status);
+		}
+
+		if (Status == VL_ERROR_NONE) {
+			Status = papi_func_tbl->SetVcselPulsePeriod(vl53l0x_dev,
+				VL_VCSEL_PERIOD_FINAL_RANGE, 14);
+		} else {
+			dbg("SetVcselPulsePeriod(PRE, 18) failed err = %d\n",
+				Status);
+		}
+
+		if (Status != VL_ERROR_NONE) {
+			dbg("SetVcselPulsePeriod(FINAL, 14) failed err = %d\n",
+				Status);
+		}
+	} else {
+		dbg("Configure High Accuracy\n");
+		Status = papi_func_tbl->SetLimitCheckValue(vl53l0x_dev,
+			VL_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE,
+			(unsigned int)(25 * 65536 / 100)); /* 0.25 * 65536 */
+		if (Status == VL_ERROR_NONE) {
+			Status = papi_func_tbl->SetLimitCheckValue(vl53l0x_dev,
+				VL_CHECKENABLE_SIGMA_FINAL_RANGE,
+				(unsigned int)(18*65536));
+		} else {
+			dbg("SIGNAL_RATE_FINAL_RANGE failed err = %d\n",
+				Status);
+		}
+
+		if (Status == VL_ERROR_NONE) {
+			dbg("Set Timing Budget = %u\n",
+				vl53l0x_dev->timingBudget);
+			Status =
+			papi_func_tbl->SetMeasurementTimingBudgetMicroSeconds(
+				vl53l0x_dev, vl53l0x_dev->timingBudget);
+		} else {
+			dbg("SetLimitCheckValue failed err = %d\n",
+				Status);
+		}
+
+		if (Status == VL_ERROR_NONE) {
+			Status = papi_func_tbl->SetVcselPulsePeriod(vl53l0x_dev,
+				VL_VCSEL_PERIOD_PRE_RANGE, 14);
+		} else {
+			dbg("SetMeasurementTimingBudget failed err = %d\n",
+				Status);
+		}
+
+		if (Status == VL_ERROR_NONE) {
+			Status = papi_func_tbl->SetVcselPulsePeriod(vl53l0x_dev,
+				VL_VCSEL_PERIOD_FINAL_RANGE, 10);
+		} else {
+			dbg("SetVcselPulsePeriod failed err = %d\n",
+				Status);
+		}
+
+		if (Status != VL_ERROR_NONE) {
+			dbg("SetVcselPulsePeriod failed err = %d\n",
+				Status);
+		}
+
+	}
+
+	/* start the ranging */
+	papi_func_tbl->StartMeasurement(vl53l0x_dev);
+	data->enable_ps_sensor = 1;
+
+
+	dbg("End\n");
+
+	return rc;
+}
+
+static int stmvl53l0x_stop(struct vl_data *data)
+{
+	int rc = 0;
+	struct vl_data *vl53l0x_dev = data;
+
+	dbg("Enter\n");
+
+	/* stop - if continuous mode */
+	if (data->deviceMode == VL_DEVICEMODE_CONTINUOUS_RANGING ||
+		data->deviceMode == VL_DEVICEMODE_CONTINUOUS_TIMED_RANGING)
+		papi_func_tbl->StopMeasurement(vl53l0x_dev);
+
+	/* clean interrupt */
+	papi_func_tbl->ClearInterruptMask(vl53l0x_dev, 0);
+
+	/* cancel work handler */
+	stmvl53l0x_cancel_handler(data);
+	/* power down */
+	rc = pmodule_func_tbl->power_down(data->client_object);
+	if (rc) {
+		err("%d, error rc %d\n", __LINE__, rc);
+		return rc;
+	}
+	dbg("End\n");
+
+	return rc;
+}
+
+/*
+ * I2C init/probing/exit functions
+ */
+static const struct file_operations stmvl53l0x_ranging_fops = {
+	.owner =			THIS_MODULE,
+	.unlocked_ioctl =	stmvl53l0x_ioctl,
+	.open =				stmvl53l0x_open,
+	.flush =			stmvl53l0x_flush,
+};
+
+int stmvl53l0x_setup(struct vl_data *data)
+{
+	int rc = 0;
+
+	dbg("Enter\n");
+
+	/* init mutex */
+	mutex_init(&data->update_lock);
+	mutex_init(&data->work_mutex);
+
+	init_waitqueue_head(&data->poll_thread_wq);
+
+	data->poll_thread = kthread_run(&stmvl53l0x_poll_thread,
+				(void *)data, "STM-VL53L0");
+	if (data->poll_thread == NULL) {
+		dbg("%s(%d) - Failed to create Polling thread\n",
+			__func__, __LINE__);
+		goto exit_free_irq;
+	}
+
+	/* init work handler */
+	INIT_DELAYED_WORK(&data->dwork, stmvl53l0x_work_handler);
+
+	/* Register to Input Device */
+	data->input_dev_ps = input_allocate_device();
+	if (!data->input_dev_ps) {
+		rc = -ENOMEM;
+		err("%d error:%d\n", __LINE__, rc);
+
+		goto exit_free_irq;
+	}
+	set_bit(EV_ABS, data->input_dev_ps->evbit);
+	/* range in cm*/
+	input_set_abs_params(data->input_dev_ps, ABS_DISTANCE, 0, 76, 0, 0);
+	/* tv_sec */
+	input_set_abs_params(data->input_dev_ps, ABS_HAT0X, 0, 0xffffffff,
+		0, 0);
+	/* tv_usec */
+	input_set_abs_params(data->input_dev_ps, ABS_HAT0Y, 0, 0xffffffff,
+		0, 0);
+	/* range in_mm */
+	input_set_abs_params(data->input_dev_ps, ABS_HAT1X, 0, 765, 0, 0);
+	/* error code change maximum to 0xff for more flexibility */
+	input_set_abs_params(data->input_dev_ps, ABS_HAT1Y, 0, 0xff, 0, 0);
+	/* rtnRate */
+	input_set_abs_params(data->input_dev_ps, ABS_HAT2X, 0, 0xffffffff,
+		0, 0);
+	/* rtn_amb_rate */
+	input_set_abs_params(data->input_dev_ps, ABS_HAT2Y, 0, 0xffffffff,
+		0, 0);
+	/* rtn_conv_time */
+	input_set_abs_params(data->input_dev_ps, ABS_HAT3X, 0, 0xffffffff,
+		0, 0);
+	/* dmax */
+	input_set_abs_params(data->input_dev_ps, ABS_HAT3Y, 0, 0xffffffff,
+		0, 0);
+
+	input_set_abs_params(data->input_dev_ps, ABS_PRESSURE, 0, 0xffffffff,
+		0, 0);
+
+	input_set_abs_params(data->input_dev_ps, ABS_WHEEL, 0, 0xffffffff,
+		0, 0);
+
+	data->input_dev_ps->name = "STM VL53L0 proximity sensor";
+
+	rc = input_register_device(data->input_dev_ps);
+	if (rc) {
+		rc = -ENOMEM;
+		err("%d error:%d\n", __LINE__, rc);
+		goto exit_free_dev_ps;
+	}
+	/* setup drv data */
+	input_set_drvdata(data->input_dev_ps, data);
+
+	/* Register sysfs hooks */
+	data->range_kobj = kobject_create_and_add("range", kernel_kobj);
+	if (!data->range_kobj) {
+		rc = -ENOMEM;
+		err("%d error:%d\n", __LINE__, rc);
+		goto exit_unregister_dev_ps;
+	}
+	rc = sysfs_create_group(&data->input_dev_ps->dev.kobj,
+			&stmvl53l0x_attr_group);
+	if (rc) {
+		rc = -ENOMEM;
+		err("%d error:%d\n", __LINE__, rc);
+		goto exit_unregister_dev_ps_1;
+	}
+	/* init default device parameter value */
+	data->enable_ps_sensor = 0;
+	data->reset = 1;
+	data->delay_ms = 30;	/* delay time to 30ms */
+	data->enableDebug = 0;
+	data->gpio_polarity = VL_INTERRUPTPOLARITY_LOW;
+	data->gpio_function = VL_GPIOFUNCTIONALITY_NEW_MEASURE_READY;
+	data->low_threshold = 60;
+	data->high_threshold = 200;
+	data->deviceMode = VL_DEVICEMODE_SINGLE_RANGING;
+	data->interMeasurems = 30;
+	data->timingBudget = 26000;
+	data->useLongRange = 1;
+
+	dbg("support ver. %s enabled\n", DRIVER_VERSION);
+	dbg("End");
+
+	return 0;
+exit_unregister_dev_ps_1:
+	kobject_put(data->range_kobj);
+exit_unregister_dev_ps:
+	input_unregister_device(data->input_dev_ps);
+exit_free_dev_ps:
+	input_free_device(data->input_dev_ps);
+exit_free_irq:
+	kfree(data);
+	return rc;
+}
+
+static int __init stmvl53l0x_init(void)
+{
+	int ret = -1;
+
+	dbg("Enter\n");
+
+	/* assign function table */
+	pmodule_func_tbl = &stmvl53l0x_module_func_tbl;
+	papi_func_tbl = &stmvl53l0x_api_func_tbl;
+
+	/* client specific init function */
+	ret = pmodule_func_tbl->init();
+
+	if (ret)
+		err("%d failed with %d\n", __LINE__, ret);
+
+	dbg("End\n");
+
+	return ret;
+}
+
+static void __exit stmvl53l0x_exit(void)
+{
+	dbg("Enter\n");
+
+	dbg("End\n");
+}
+
+MODULE_DESCRIPTION("ST FlightSense Time-of-Flight sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(stmvl53l0x_init);
+module_exit(stmvl53l0x_exit);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index af83d2e..a8a96def 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -2538,13 +2538,31 @@
 }
 
 static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
-				       struct alps_data *priv)
+					struct alps_data *priv,
+					struct psmouse *psmouse)
 {
 	bool is_dual = false;
+	int reg_val = 0;
+	struct ps2dev *ps2dev = &psmouse->ps2dev;
 
-	if (IS_SS4PLUS_DEV(priv->dev_id))
+	if (IS_SS4PLUS_DEV(priv->dev_id)) {
 		is_dual = (otp[0][0] >> 4) & 0x01;
 
+		if (!is_dual) {
+			/* For support TrackStick of Thinkpad L/E series */
+			if (alps_exit_command_mode(psmouse) == 0 &&
+				alps_enter_command_mode(psmouse) == 0) {
+				reg_val = alps_command_mode_read_reg(psmouse,
+									0xD7);
+			}
+			alps_exit_command_mode(psmouse);
+			ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
+
+			if (reg_val == 0x0C || reg_val == 0x1D)
+				is_dual = true;
+		}
+	}
+
 	if (is_dual)
 		priv->flags |= ALPS_DUALPOINT |
 					ALPS_DUALPOINT_WITH_PRESSURE;
@@ -2567,7 +2585,7 @@
 
 	alps_update_btn_info_ss4_v2(otp, priv);
 
-	alps_update_dual_info_ss4_v2(otp, priv);
+	alps_update_dual_info_ss4_v2(otp, priv, psmouse);
 
 	return 0;
 }
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index c9d491b..3851d57 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1082,6 +1082,13 @@
 		return error;
 	}
 
+	/* Make sure there is something at this address */
+	error = i2c_smbus_read_byte(client);
+	if (error < 0) {
+		dev_dbg(&client->dev, "nothing at this address: %d\n", error);
+		return -ENXIO;
+	}
+
 	/* Initialize the touchpad. */
 	error = elan_initialize(data);
 	if (error)
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index a679e56..765879d 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -557,7 +557,14 @@
 	long ret;
 	int error;
 	int len;
-	u8 buffer[ETP_I2C_INF_LENGTH];
+	u8 buffer[ETP_I2C_REPORT_LEN];
+
+	len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
+	if (len != ETP_I2C_REPORT_LEN) {
+		error = len < 0 ? len : -EIO;
+		dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
+			error, len);
+	}
 
 	reinit_completion(completion);
 	enable_irq(client->irq);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 59603a5..c519c0b 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1711,6 +1711,17 @@
 			     etd->samples[0], etd->samples[1], etd->samples[2]);
 	}
 
+	if (etd->samples[1] == 0x74 && etd->hw_version == 0x03) {
+		/*
+		 * This module has a bug which makes absolute mode
+		 * unusable, so let's abort so we'll be using standard
+		 * PS/2 protocol.
+		 */
+		psmouse_info(psmouse,
+			     "absolute mode broken, forcing standard PS/2 protocol\n");
+		goto init_fail;
+	}
+
 	if (elantech_set_absolute_mode(psmouse)) {
 		psmouse_err(psmouse,
 			    "failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index bee2674..5cbf17a 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -937,6 +937,21 @@
 	psmouse->pt_deactivate = NULL;
 }
 
+static bool psmouse_do_detect(int (*detect)(struct psmouse *, bool),
+			      struct psmouse *psmouse, bool allow_passthrough,
+			      bool set_properties)
+{
+	if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU &&
+	    !allow_passthrough) {
+		return false;
+	}
+
+	if (set_properties)
+		psmouse_apply_defaults(psmouse);
+
+	return detect(psmouse, set_properties) == 0;
+}
+
 static bool psmouse_try_protocol(struct psmouse *psmouse,
 				 enum psmouse_type type,
 				 unsigned int *max_proto,
@@ -948,15 +963,8 @@
 	if (!proto)
 		return false;
 
-	if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU &&
-	    !proto->try_passthru) {
-		return false;
-	}
-
-	if (set_properties)
-		psmouse_apply_defaults(psmouse);
-
-	if (proto->detect(psmouse, set_properties) != 0)
+	if (!psmouse_do_detect(proto->detect, psmouse, proto->try_passthru,
+			       set_properties))
 		return false;
 
 	if (set_properties && proto->init && init_allowed) {
@@ -988,8 +996,8 @@
 	 * Always check for focaltech, this is safe as it uses pnp-id
 	 * matching.
 	 */
-	if (psmouse_try_protocol(psmouse, PSMOUSE_FOCALTECH,
-				 &max_proto, set_properties, false)) {
+	if (psmouse_do_detect(focaltech_detect,
+			      psmouse, false, set_properties)) {
 		if (max_proto > PSMOUSE_IMEX &&
 		    IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH) &&
 		    (!set_properties || focaltech_init(psmouse) == 0)) {
@@ -1035,8 +1043,8 @@
 	 * probing for IntelliMouse.
 	 */
 	if (max_proto > PSMOUSE_PS2 &&
-	    psmouse_try_protocol(psmouse, PSMOUSE_SYNAPTICS, &max_proto,
-				 set_properties, false)) {
+	    psmouse_do_detect(synaptics_detect,
+			      psmouse, false, set_properties)) {
 		synaptics_hardware = true;
 
 		if (max_proto > PSMOUSE_IMEX) {
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index b604564..30328e5 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -15,6 +15,7 @@
 #define MOUSEDEV_MINORS		31
 #define MOUSEDEV_MIX		63
 
+#include <linux/bitops.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/poll.h>
@@ -103,7 +104,7 @@
 	spinlock_t packet_lock;
 	int pos_x, pos_y;
 
-	signed char ps2[6];
+	u8 ps2[6];
 	unsigned char ready, buffer, bufsiz;
 	unsigned char imexseq, impsseq;
 	enum mousedev_emul mode;
@@ -291,11 +292,10 @@
 		}
 
 		client->pos_x += packet->dx;
-		client->pos_x = client->pos_x < 0 ?
-			0 : (client->pos_x >= xres ? xres : client->pos_x);
+		client->pos_x = clamp_val(client->pos_x, 0, xres);
+
 		client->pos_y += packet->dy;
-		client->pos_y = client->pos_y < 0 ?
-			0 : (client->pos_y >= yres ? yres : client->pos_y);
+		client->pos_y = clamp_val(client->pos_y, 0, yres);
 
 		p->dx += packet->dx;
 		p->dy += packet->dy;
@@ -571,44 +571,50 @@
 	return error;
 }
 
-static inline int mousedev_limit_delta(int delta, int limit)
-{
-	return delta > limit ? limit : (delta < -limit ? -limit : delta);
-}
-
-static void mousedev_packet(struct mousedev_client *client,
-			    signed char *ps2_data)
+static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data)
 {
 	struct mousedev_motion *p = &client->packets[client->tail];
+	s8 dx, dy, dz;
 
-	ps2_data[0] = 0x08 |
-		((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07);
-	ps2_data[1] = mousedev_limit_delta(p->dx, 127);
-	ps2_data[2] = mousedev_limit_delta(p->dy, 127);
-	p->dx -= ps2_data[1];
-	p->dy -= ps2_data[2];
+	dx = clamp_val(p->dx, -127, 127);
+	p->dx -= dx;
+
+	dy = clamp_val(p->dy, -127, 127);
+	p->dy -= dy;
+
+	ps2_data[0] = BIT(3);
+	ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2);
+	ps2_data[0] |= p->buttons & 0x07;
+	ps2_data[1] = dx;
+	ps2_data[2] = dy;
 
 	switch (client->mode) {
 	case MOUSEDEV_EMUL_EXPS:
-		ps2_data[3] = mousedev_limit_delta(p->dz, 7);
-		p->dz -= ps2_data[3];
-		ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1);
+		dz = clamp_val(p->dz, -7, 7);
+		p->dz -= dz;
+
+		ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1);
 		client->bufsiz = 4;
 		break;
 
 	case MOUSEDEV_EMUL_IMPS:
-		ps2_data[0] |=
-			((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
-		ps2_data[3] = mousedev_limit_delta(p->dz, 127);
-		p->dz -= ps2_data[3];
+		dz = clamp_val(p->dz, -127, 127);
+		p->dz -= dz;
+
+		ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
+			       ((p->buttons & 0x08) >> 1);
+		ps2_data[3] = dz;
+
 		client->bufsiz = 4;
 		break;
 
 	case MOUSEDEV_EMUL_PS2:
 	default:
-		ps2_data[0] |=
-			((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
 		p->dz = 0;
+
+		ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
+			       ((p->buttons & 0x08) >> 1);
+
 		client->bufsiz = 3;
 		break;
 	}
@@ -714,7 +720,7 @@
 {
 	struct mousedev_client *client = file->private_data;
 	struct mousedev *mousedev = client->mousedev;
-	signed char data[sizeof(client->ps2)];
+	u8 data[sizeof(client->ps2)];
 	int retval = 0;
 
 	if (!client->ready && !client->buffer && mousedev->exist &&
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index d1051e3..e484ea2 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -530,6 +530,20 @@
 	{ }
 };
 
+static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
+	{
+		/*
+		 * Sony Vaio VGN-CS series require MUX or the touch sensor
+		 * buttons will disturb touchpad operation
+		 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
+		},
+	},
+	{ }
+};
+
 /*
  * On some Asus laptops, just running self tests cause problems.
  */
@@ -693,6 +707,13 @@
 		},
 	},
 	{
+		/* Lenovo ThinkPad L460 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
+		},
+	},
+	{
 		/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
@@ -1223,6 +1244,9 @@
 	if (dmi_check_system(i8042_dmi_nomux_table))
 		i8042_nomux = true;
 
+	if (dmi_check_system(i8042_dmi_forcemux_table))
+		i8042_nomux = false;
+
 	if (dmi_check_system(i8042_dmi_notimeout_table))
 		i8042_notimeout = true;
 
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 25791c2..6c4156a 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1279,4 +1279,16 @@
 
 	  If unsure, say N.
 
+config TOUCHSCREEN_HIMAX_CHIPSET
+	bool "Himax touchpanel CHIPSET"
+	depends on I2C
+	help
+	  Say Y here if you have a Himax CHIPSET touchscreen.
+	  HIMAX controllers are multi touch controllers which can
+	  report 10 touches at a time.
+
+	  If unsure, say N.
+
+source "drivers/input/touchscreen/hxchipset/Kconfig"
+
 endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 34e8823..a5952ca 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -104,3 +104,4 @@
 obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50)	+= colibri-vf50-ts.o
 obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023)	+= rohm_bu21023.o
 obj-$(CONFIG_TOUCHSCREEN_FTS)		+= focaltech_touch/
+obj-$(CONFIG_TOUCHSCREEN_HIMAX_CHIPSET)	+= hxchipset/
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index e5d185f..2613240 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3028,6 +3028,15 @@
 		.driver_data = samus_platform_data,
 	},
 	{
+		/* Samsung Chromebook Pro */
+		.ident = "Samsung Chromebook Pro",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
+		},
+		.driver_data = samus_platform_data,
+	},
+	{
 		/* Other Google Chromebooks */
 		.ident = "Chromebook",
 		.matches = {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 240b16f..5907fdd 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -778,8 +778,10 @@
 	int error;
 
 	/* We need gpio pins to suspend/resume */
-	if (!ts->gpiod_int || !ts->gpiod_rst)
+	if (!ts->gpiod_int || !ts->gpiod_rst) {
+		disable_irq(client->irq);
 		return 0;
+	}
 
 	wait_for_completion(&ts->firmware_loading_complete);
 
@@ -819,8 +821,10 @@
 	struct goodix_ts_data *ts = i2c_get_clientdata(client);
 	int error;
 
-	if (!ts->gpiod_int || !ts->gpiod_rst)
+	if (!ts->gpiod_int || !ts->gpiod_rst) {
+		enable_irq(client->irq);
 		return 0;
+	}
 
 	/*
 	 * Exit sleep mode by outputting HIGH level to INT pin
diff --git a/drivers/input/touchscreen/hxchipset/HX83100_Amber_0901_030B.i b/drivers/input/touchscreen/hxchipset/HX83100_Amber_0901_030B.i
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/HX83100_Amber_0901_030B.i
diff --git a/drivers/input/touchscreen/hxchipset/HX_CRC_124.i b/drivers/input/touchscreen/hxchipset/HX_CRC_124.i
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/HX_CRC_124.i
diff --git a/drivers/input/touchscreen/hxchipset/HX_CRC_128.i b/drivers/input/touchscreen/hxchipset/HX_CRC_128.i
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/HX_CRC_128.i
diff --git a/drivers/input/touchscreen/hxchipset/HX_CRC_60.i b/drivers/input/touchscreen/hxchipset/HX_CRC_60.i
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/HX_CRC_60.i
diff --git a/drivers/input/touchscreen/hxchipset/HX_CRC_64.i b/drivers/input/touchscreen/hxchipset/HX_CRC_64.i
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/HX_CRC_64.i
diff --git a/drivers/input/touchscreen/hxchipset/Kconfig b/drivers/input/touchscreen/hxchipset/Kconfig
new file mode 100644
index 0000000..ebf3aa4
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/Kconfig
@@ -0,0 +1,21 @@
+#
+# Himax Touchscreen driver configuration
+#
+
+config TOUCHSCREEN_HIMAX_I2C
+        tristate "HIMAX chipset i2c touchscreen"
+		depends on TOUCHSCREEN_HIMAX_CHIPSET
+		help
+		This enables support for HIMAX CHIPSET over I2C based touchscreens.
+
+config TOUCHSCREEN_HIMAX_DEBUG
+        tristate "HIMAX debug function"
+		depends on TOUCHSCREEN_HIMAX_I2C
+		help
+		This enables support for HIMAX debug function.
+
+config HMX_DB
+	tristate "HIMAX driver test over Dragon Board"
+	depends on TOUCHSCREEN_HIMAX_I2C
+	help
+	  This enables support for HIMAX driver test over Dragon Board.
diff --git a/drivers/input/touchscreen/hxchipset/Makefile b/drivers/input/touchscreen/hxchipset/Makefile
new file mode 100644
index 0000000..509d491
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/Makefile
@@ -0,0 +1,3 @@
+# Makefile for the Himax touchscreen drivers.
+
+obj-$(CONFIG_TOUCHSCREEN_HIMAX_I2C)   	+= himax_platform.o himax_ic.o himax_common.o himax_debug.o
diff --git a/drivers/input/touchscreen/hxchipset/himax_common.c b/drivers/input/touchscreen/hxchipset/himax_common.c
new file mode 100644
index 0000000..417b0c0
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/himax_common.c
@@ -0,0 +1,1936 @@
+/* Himax Android Driver Sample Code for Himax chipset
+*
+* Copyright (C) 2015 Himax Corporation.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#include "himax_common.h"
+#include "himax_ic.h"
+
+#define SUPPORT_FINGER_DATA_CHECKSUM 0x0F
+#define TS_WAKE_LOCK_TIMEOUT		(2 * HZ)
+#define FRAME_COUNT 5
+
+#if defined(HX_AUTO_UPDATE_FW)
+	static unsigned char i_CTPM_FW[]=
+	{
+		#include "HX83100_Amber_0901_030B.i"
+	};
+#endif
+
+#ifdef HX_ESD_WORKAROUND
+	extern void HX_report_ESD_event(void);
+	unsigned char ESD_00_counter = 0;
+	unsigned char ESD_00_Flag = 0;
+#endif
+
+//static int		tpd_keys_local[HX_KEY_MAX_COUNT] = HX_KEY_ARRAY; // for Virtual key array
+
+struct himax_ts_data *private_ts;
+struct himax_ic_data* ic_data;
+
+static int		HX_TOUCH_INFO_POINT_CNT;
+
+#ifdef HX_AUTO_UPDATE_FW
+extern unsigned long	FW_VER_MAJ_FLASH_ADDR;
+extern unsigned long 	FW_VER_MIN_FLASH_ADDR;
+extern unsigned long 	CFG_VER_MAJ_FLASH_ADDR;
+extern unsigned long 	CFG_VER_MIN_FLASH_ADDR;
+#endif
+extern unsigned long 	FW_VER_MAJ_FLASH_LENG;
+extern unsigned long 	FW_VER_MIN_FLASH_LENG;
+extern unsigned long 	CFG_VER_MAJ_FLASH_LENG;
+extern unsigned long 	CFG_VER_MIN_FLASH_LENG;
+extern unsigned char	IC_TYPE;
+extern unsigned char	IC_CHECKSUM;
+
+#if defined(CONFIG_TOUCHSCREEN_HIMAX_DEBUG)
+extern int himax_touch_proc_init(void);
+extern void himax_touch_proc_deinit(void);
+//PROC-START
+#ifdef HX_TP_PROC_FLASH_DUMP
+extern void	himax_ts_flash_func(void);
+extern void setFlashBuffer(void);
+extern bool getFlashDumpGoing(void);
+extern uint8_t getSysOperation(void);
+extern void setSysOperation(uint8_t operation);
+#endif
+
+#ifdef HX_TP_PROC_HITOUCH
+extern bool hitouch_is_connect;
+#endif
+
+#ifdef HX_TP_PROC_DIAG
+	extern int touch_monitor_stop_flag;
+	extern int touch_monitor_stop_limit;
+	extern void	himax_ts_diag_func(void);
+	extern int16_t *getMutualBuffer(void);
+	extern int16_t *getMutualNewBuffer(void);
+	extern int16_t *getMutualOldBuffer(void);
+	extern int16_t *getSelfBuffer(void);
+	extern uint8_t getXChannel(void);
+	extern uint8_t getYChannel(void);
+	extern uint8_t getDiagCommand(void);
+	extern void setXChannel(uint8_t x);
+	extern void setYChannel(uint8_t y);
+	extern void setMutualBuffer(void);
+	extern void setMutualNewBuffer(void);
+	extern void setMutualOldBuffer(void);
+	extern uint8_t	coordinate_dump_enable;
+	extern struct file	*coordinate_fn;
+	extern uint8_t diag_coor[128];
+#ifdef HX_TP_PROC_2T2R
+	extern int16_t *getMutualBuffer_2(void);
+	extern uint8_t getXChannel_2(void);
+	extern uint8_t getYChannel_2(void);
+	extern void setXChannel_2(uint8_t x);
+	extern void setYChannel_2(uint8_t y);
+	extern void setMutualBuffer_2(void);
+#endif
+#endif
+//PROC-END
+#endif
+
+extern int himax_parse_dt(struct himax_ts_data *ts,
+				struct himax_i2c_platform_data *pdata);
+extern int himax_ts_pinctrl_init(struct himax_ts_data *ts);
+
+static uint8_t 	vk_press;
+static uint8_t 	AA_press;
+static uint8_t 	EN_NoiseFilter;
+static uint8_t	Last_EN_NoiseFilter;
+static int	hx_point_num;																	// for himax_ts_work_func use
+static int	p_point_num	= 0xFFFF;
+static int	tpd_key;
+static int	tpd_key_old;
+static int	probe_fail_flag;
+static bool	config_load;
+static struct himax_config *config_selected;
+
+//static int iref_number = 11;
+//static bool iref_found = false;    
+
+#ifdef HX_USB_DETECT2
+extern bool USB_Flag;
+#endif
+
+#if defined(CONFIG_FB)
+int fb_notifier_callback(struct notifier_block *self,
+				 unsigned long event, void *data);
+#elif defined(CONFIG_HAS_EARLYSUSPEND)
+static void himax_ts_early_suspend(struct early_suspend *h);
+static void himax_ts_late_resume(struct early_suspend *h);
+#endif
+
+int himax_input_register(struct himax_ts_data *ts)
+{
+	int ret;
+	ts->input_dev = input_allocate_device();
+	if (ts->input_dev == NULL) {
+		ret = -ENOMEM;
+		E("%s: Failed to allocate input device\n", __func__);
+		return ret;
+	}
+	ts->input_dev->name = "himax-touchscreen";
+
+	set_bit(EV_SYN, ts->input_dev->evbit);
+	set_bit(EV_ABS, ts->input_dev->evbit);
+	set_bit(EV_KEY, ts->input_dev->evbit);
+
+	set_bit(KEY_BACK, ts->input_dev->keybit);
+	set_bit(KEY_HOME, ts->input_dev->keybit);
+	set_bit(KEY_MENU, ts->input_dev->keybit);
+	set_bit(KEY_SEARCH, ts->input_dev->keybit);
+#if defined(HX_SMART_WAKEUP)
+	set_bit(KEY_POWER, ts->input_dev->keybit);
+	set_bit(KEY_CUST_01, ts->input_dev->keybit);
+	set_bit(KEY_CUST_02, ts->input_dev->keybit);
+	set_bit(KEY_CUST_03, ts->input_dev->keybit);
+	set_bit(KEY_CUST_04, ts->input_dev->keybit);
+	set_bit(KEY_CUST_05, ts->input_dev->keybit);
+	set_bit(KEY_CUST_06, ts->input_dev->keybit);
+	set_bit(KEY_CUST_07, ts->input_dev->keybit);
+	set_bit(KEY_CUST_08, ts->input_dev->keybit);
+	set_bit(KEY_CUST_09, ts->input_dev->keybit);
+	set_bit(KEY_CUST_10, ts->input_dev->keybit);
+	set_bit(KEY_CUST_11, ts->input_dev->keybit);
+	set_bit(KEY_CUST_12, ts->input_dev->keybit);
+	set_bit(KEY_CUST_13, ts->input_dev->keybit);
+	set_bit(KEY_CUST_14, ts->input_dev->keybit);
+	set_bit(KEY_CUST_15, ts->input_dev->keybit);
+#endif
+	set_bit(BTN_TOUCH, ts->input_dev->keybit);
+
+	set_bit(KEY_F10, ts->input_dev->keybit);
+
+	set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
+
+	if (ts->protocol_type == PROTOCOL_TYPE_A) {
+		//ts->input_dev->mtsize = ts->nFinger_support;
+		input_set_abs_params(ts->input_dev, ABS_MT_TRACKING_ID,
+		0, 3, 0, 0);
+	} else {/* PROTOCOL_TYPE_B */
+		set_bit(MT_TOOL_FINGER, ts->input_dev->keybit);
+		input_mt_init_slots(ts->input_dev, ts->nFinger_support,0);
+	}
+
+	I("input_set_abs_params: mix_x %d, max_x %d, min_y %d, max_y %d\n",
+		ts->pdata->abs_x_min, ts->pdata->abs_x_max, ts->pdata->abs_y_min, ts->pdata->abs_y_max);
+
+	input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X,ts->pdata->abs_x_min, ts->pdata->abs_x_max, ts->pdata->abs_x_fuzz, 0);
+	input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y,ts->pdata->abs_y_min, ts->pdata->abs_y_max, ts->pdata->abs_y_fuzz, 0);
+	input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR,ts->pdata->abs_pressure_min, ts->pdata->abs_pressure_max, ts->pdata->abs_pressure_fuzz, 0);
+	input_set_abs_params(ts->input_dev, ABS_MT_PRESSURE,ts->pdata->abs_pressure_min, ts->pdata->abs_pressure_max, ts->pdata->abs_pressure_fuzz, 0);
+	input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR,ts->pdata->abs_width_min, ts->pdata->abs_width_max, ts->pdata->abs_pressure_fuzz, 0);
+
+//	input_set_abs_params(ts->input_dev, ABS_MT_AMPLITUDE, 0, ((ts->pdata->abs_pressure_max << 16) | ts->pdata->abs_width_max), 0, 0);
+//	input_set_abs_params(ts->input_dev, ABS_MT_POSITION, 0, (BIT(31) | (ts->pdata->abs_x_max << 16) | ts->pdata->abs_y_max), 0, 0);
+
+	return input_register_device(ts->input_dev);
+}
+
+static void calcDataSize(uint8_t finger_num)
+{
+	struct himax_ts_data *ts_data = private_ts;
+	ts_data->coord_data_size = 4 * finger_num;
+	ts_data->area_data_size = ((finger_num / 4) + (finger_num % 4 ? 1 : 0)) * 4;
+	ts_data->raw_data_frame_size = 128 - ts_data->coord_data_size - ts_data->area_data_size - 4 - 4 - 1;
+	ts_data->raw_data_nframes  = ((uint32_t)ts_data->x_channel * ts_data->y_channel +
+																					ts_data->x_channel + ts_data->y_channel) / ts_data->raw_data_frame_size +
+															(((uint32_t)ts_data->x_channel * ts_data->y_channel +
+		  																		ts_data->x_channel + ts_data->y_channel) % ts_data->raw_data_frame_size)? 1 : 0;
+	I("%s: coord_data_size: %d, area_data_size:%d, raw_data_frame_size:%d, raw_data_nframes:%d", __func__, ts_data->coord_data_size, ts_data->area_data_size, ts_data->raw_data_frame_size, ts_data->raw_data_nframes);
+}
+
+static void calculate_point_number(void)
+{
+	HX_TOUCH_INFO_POINT_CNT = ic_data->HX_MAX_PT * 4 ;
+
+	if ( (ic_data->HX_MAX_PT % 4) == 0)
+		HX_TOUCH_INFO_POINT_CNT += (ic_data->HX_MAX_PT / 4) * 4 ;
+	else
+		HX_TOUCH_INFO_POINT_CNT += ((ic_data->HX_MAX_PT / 4) +1) * 4 ;
+}
+
+#if 0
+static int himax_read_Sensor_ID(struct i2c_client *client)
+{	
+	uint8_t val_high[1], val_low[1], ID0=0, ID1=0;
+	char data[3];
+	const int normalRetry = 10;
+	int sensor_id;
+	
+	data[0] = 0x56; data[1] = 0x02; data[2] = 0x02;/*ID pin PULL High*/
+	i2c_himax_master_write(client, &data[0],3,normalRetry);
+	usleep_range(1000, 2000);
+
+	//read id pin high
+	i2c_himax_read(client, 0x57, val_high, 1, normalRetry);
+
+	data[0] = 0x56; data[1] = 0x01; data[2] = 0x01;/*ID pin PULL Low*/
+	i2c_himax_master_write(client, &data[0],3,normalRetry);
+	usleep_range(1000, 2000);
+
+	//read id pin low
+	i2c_himax_read(client, 0x57, val_low, 1, normalRetry);
+
+	if((val_high[0] & 0x01) ==0)
+		ID0=0x02;/*GND*/
+	else if((val_low[0] & 0x01) ==0)
+		ID0=0x01;/*Floating*/
+	else
+		ID0=0x04;/*VCC*/
+	
+	if((val_high[0] & 0x02) ==0)
+		ID1=0x02;/*GND*/
+	else if((val_low[0] & 0x02) ==0)
+		ID1=0x01;/*Floating*/
+	else
+		ID1=0x04;/*VCC*/
+	if((ID0==0x04)&&(ID1!=0x04))
+		{
+			data[0] = 0x56; data[1] = 0x02; data[2] = 0x01;/*ID pin PULL High,Low*/
+			i2c_himax_master_write(client, &data[0],3,normalRetry);
+			usleep_range(1000, 2000);
+
+		}
+	else if((ID0!=0x04)&&(ID1==0x04))
+		{
+			data[0] = 0x56; data[1] = 0x01; data[2] = 0x02;/*ID pin PULL Low,High*/
+			i2c_himax_master_write(client, &data[0],3,normalRetry);
+			usleep_range(1000, 2000);
+
+		}
+	else if((ID0==0x04)&&(ID1==0x04))
+		{
+			data[0] = 0x56; data[1] = 0x02; data[2] = 0x02;/*ID pin PULL High,High*/
+			i2c_himax_master_write(client, &data[0],3,normalRetry);
+			usleep_range(1000, 2000);
+
+		}
+	sensor_id=(ID1<<4)|ID0;
+
+	data[0] = 0xE4; data[1] = sensor_id;
+	i2c_himax_master_write(client, &data[0],2,normalRetry);/*Write to MCU*/
+	usleep_range(1000, 2000);
+
+	return sensor_id;
+
+}
+#endif
+static void himax_power_on_initCMD(struct i2c_client *client)
+{
+	I("%s:\n", __func__);
+
+	himax_touch_information(client);
+
+    //himax_sense_on(private_ts->client, 0x01);//1=Flash, 0=SRAM
+}
+
+#ifdef HX_AUTO_UPDATE_FW
+static int i_update_FW(void)
+{
+	int upgrade_times = 0;
+	unsigned char* ImageBuffer = i_CTPM_FW;
+	int fullFileLength = sizeof(i_CTPM_FW);
+	int i_FW_VER = 0, i_CFG_VER = 0;
+	uint8_t ret = -1, result = 0;
+//	uint8_t tmp_addr[4];
+//	uint8_t tmp_data[4];
+	int CRC_from_FW = 0;
+	int CRC_Check_result = 0;
+
+	i_FW_VER = i_CTPM_FW[FW_VER_MAJ_FLASH_ADDR]<<8 |i_CTPM_FW[FW_VER_MIN_FLASH_ADDR];
+	i_CFG_VER = i_CTPM_FW[CFG_VER_MAJ_FLASH_ADDR]<<8 |i_CTPM_FW[CFG_VER_MIN_FLASH_ADDR];
+
+	I("%s: i_fullFileLength = %d\n", __func__,fullFileLength);
+
+	himax_sense_off(private_ts->client);
+	msleep(500);
+
+	CRC_from_FW = himax_check_CRC(private_ts->client,fw_image_64k);
+	CRC_Check_result = Calculate_CRC_with_AP(ImageBuffer, CRC_from_FW,fw_image_64k);
+	I("%s: Check sum result = %d\n", __func__,CRC_Check_result);
+	//I("%s: ic_data->vendor_fw_ver = %X, i_FW_VER = %X,\n", __func__,ic_data->vendor_fw_ver, i_FW_VER);
+	//I("%s: ic_data->vendor_config_ver = %X, i_CFG_VER = %X,\n", __func__,ic_data->vendor_config_ver, i_CFG_VER);
+	
+	if ((CRC_Check_result == 0)|| ( ic_data->vendor_fw_ver < i_FW_VER ) || ( ic_data->vendor_config_ver < i_CFG_VER ))
+		{
+			himax_int_enable(private_ts->client->irq,0);
+update_retry:
+			if(fullFileLength == FW_SIZE_60k){
+				ret = fts_ctpm_fw_upgrade_with_sys_fs_60k(private_ts->client,ImageBuffer,fullFileLength,false);
+			}else if (fullFileLength == FW_SIZE_64k){
+				ret = fts_ctpm_fw_upgrade_with_sys_fs_64k(private_ts->client,ImageBuffer,fullFileLength,false);
+			}else if (fullFileLength == FW_SIZE_124k){
+				ret = fts_ctpm_fw_upgrade_with_sys_fs_124k(private_ts->client,ImageBuffer,fullFileLength,false);
+			}else if (fullFileLength == FW_SIZE_128k){
+				ret = fts_ctpm_fw_upgrade_with_sys_fs_128k(private_ts->client,ImageBuffer,fullFileLength,false);
+			}
+			if(ret == 0){
+				upgrade_times++;
+				E("%s: TP upgrade error, upgrade_times = %d\n", __func__, upgrade_times);
+				if(upgrade_times < 3)
+					goto update_retry;
+				else
+				{
+					himax_sense_on(private_ts->client, 0x01);
+					msleep(120);
+#ifdef HX_ESD_WORKAROUND
+					HX_ESD_RESET_ACTIVATE = 1;
+#endif
+					result = -1;//upgrade fail
+				}
+			}
+			else if(ret == 1){
+/*
+				// 1. Set DDREG_Req = 1 (0x9000_0020 = 0x0000_0001) (Lock register R/W from driver)
+				tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+				tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x01;
+				himax_register_write(private_ts->client, tmp_addr, 1, tmp_data);
+
+				// 2. Write driver initial code condition
+				//	  write value from AHB I2C : 0x8001_C603 = 0x000000FF
+				tmp_addr[3] = 0x80; tmp_addr[2] = 0x01; tmp_addr[1] = 0xC6; tmp_addr[0] = 0x03;
+				tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0xFF;
+				himax_register_write(private_ts->client, tmp_addr, 1, tmp_data);
+
+				// 1. Set DDREG_Req = 0 (0x9000_0020 = 0x0000_0001) (Lock register R/W from driver)
+				tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+				tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+				himax_register_write(private_ts->client, tmp_addr, 1, tmp_data);
+*/
+				himax_sense_on(private_ts->client, 0x01);
+				msleep(120);
+#ifdef HX_ESD_WORKAROUND
+				HX_ESD_RESET_ACTIVATE = 1;
+#endif
+
+				ic_data->vendor_fw_ver = i_FW_VER;
+				ic_data->vendor_config_ver = i_CFG_VER;
+				result = 1;//upgrade success
+				I("%s: TP upgrade OK\n", __func__);
+			}
+
+				himax_int_enable(private_ts->client->irq,1);
+			return result;
+		}
+	else
+		{
+			himax_sense_on(private_ts->client, 0x01);
+			return 0;//NO upgrade
+		}
+}
+#endif 
+
+#ifdef HX_RST_PIN_FUNC
+void himax_HW_reset(uint8_t loadconfig,uint8_t int_off)
+{
+	struct himax_ts_data *ts = private_ts;
+	int ret = 0;
+
+	return;
+	if (ts->rst_gpio) {
+		if(int_off)
+			{
+				if (ts->use_irq)
+					himax_int_enable(private_ts->client->irq,0);
+				else {
+					hrtimer_cancel(&ts->timer);
+					ret = cancel_work_sync(&ts->work);
+				}
+			}
+
+		I("%s: Now reset the Touch chip.\n", __func__);
+
+		himax_rst_gpio_set(ts->rst_gpio, 0);
+		msleep(20);
+		himax_rst_gpio_set(ts->rst_gpio, 1);
+		msleep(20);
+
+		if(loadconfig)
+			himax_loadSensorConfig(private_ts->client,private_ts->pdata);
+
+		if(int_off)
+			{
+				if (ts->use_irq)
+					himax_int_enable(private_ts->client->irq,1);
+				else
+					hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+			}
+	}
+}
+#endif
+
+int himax_loadSensorConfig(struct i2c_client *client, struct himax_i2c_platform_data *pdata)
+{
+
+	if (!client) {
+		E("%s: Necessary parameters client are null!\n", __func__);
+		return -EINVAL;
+	}
+
+	if(config_load == false)
+		{
+			config_selected = kzalloc(sizeof(*config_selected), GFP_KERNEL);
+			if (config_selected == NULL) {
+				E("%s: alloc config_selected fail!\n", __func__);
+				return -ENOMEM;
+			}
+		}
+
+		himax_power_on_initCMD(client);
+
+		himax_int_enable(client->irq,0);
+		himax_read_FW_ver(client);
+#ifdef HX_RST_PIN_FUNC
+	himax_HW_reset(true,false);
+#endif
+	himax_int_enable(client->irq,1);
+		I("FW_VER : %X \n",ic_data->vendor_fw_ver);
+
+		ic_data->vendor_sensor_id=0x2602;
+		I("sensor_id=%x.\n",ic_data->vendor_sensor_id);
+
+	himax_sense_on(private_ts->client, 0x01);//1=Flash, 0=SRAM
+	msleep(120);
+#ifdef HX_ESD_WORKAROUND
+	HX_ESD_RESET_ACTIVATE = 1;
+#endif
+	I("%s: initialization complete\n", __func__);
+
+	return 1;
+}
+
+#ifdef HX_ESD_WORKAROUND
+void ESD_HW_REST(void)
+{
+	I("START_Himax TP: ESD - Reset\n");	
+	
+	HX_report_ESD_event();
+	ESD_00_counter = 0;
+	ESD_00_Flag = 0;
+	/*************************************/
+		if (private_ts->protocol_type == PROTOCOL_TYPE_A)
+		input_mt_sync(private_ts->input_dev);
+		input_report_key(private_ts->input_dev, BTN_TOUCH, 0);
+		input_sync(private_ts->input_dev);
+		/*************************************/
+
+	I("END_Himax TP: ESD - Reset\n");
+}
+#endif
+#ifdef HX_HIGH_SENSE
+void himax_set_HSEN_func(struct i2c_client *client,uint8_t HSEN_enable)
+{
+	uint8_t tmp_data[4];
+
+	if(HSEN_enable)
+	{
+		I(" %s in", __func__);
+		HSEN_bit_retry:
+		himax_set_HSEN_enable(client,HSEN_enable);
+		msleep(20);
+		himax_get_HSEN_enable(client,tmp_data);
+		I("%s: Read HSEN bit data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n", __func__
+			,tmp_data[0],tmp_data[1],tmp_data[2],tmp_data[3]);
+		if(tmp_data[0]!= 0x01)
+			{
+				I("%s: retry HSEN bit write data[0]=%x  \n",__func__,tmp_data[0]);
+				goto HSEN_bit_retry;
+			}
+	}
+}
+
+static void himax_HSEN_func(struct work_struct *work)
+{
+	struct himax_ts_data *ts = container_of(work, struct himax_ts_data,
+							hsen_work.work);
+
+	himax_set_HSEN_func(ts->client, ts->HSEN_enable);
+}
+
+#endif
+
+#ifdef HX_SMART_WAKEUP
+#ifdef HX_GESTURE_TRACK
+static void gest_pt_log_coordinate(int rx, int tx)
+{
+	//driver report x y with range 0 - 255 , we scale it up to x/y pixel
+	gest_pt_x[gest_pt_cnt] = rx*(ic_data->HX_X_RES)/255;
+	gest_pt_y[gest_pt_cnt] = tx*(ic_data->HX_Y_RES)/255;
+}
+#endif
+static int himax_parse_wake_event(struct himax_ts_data *ts)
+{
+	uint8_t buf[64];
+	unsigned char check_sum_cal = 0;
+#ifdef HX_GESTURE_TRACK
+	int tmp_max_x=0x00,tmp_min_x=0xFFFF,tmp_max_y=0x00,tmp_min_y=0xFFFF;
+	int gest_len;
+#endif
+	int i=0, check_FC = 0, gesture_flag = 0;
+
+	himax_burst_enable(ts->client, 0);
+	himax_read_event_stack(ts->client,buf,56);
+
+	for(i=0;i<GEST_PTLG_ID_LEN;i++)
+	{
+		if (check_FC==0)
+		{
+			if((buf[0]!=0x00)&&((buf[0]<=0x0F)||(buf[0]==0x80)))
+			{
+				check_FC = 1;
+				gesture_flag = buf[i];
+			}
+			else
+			{
+				check_FC = 0;
+				I("ID START at %x , value = %x skip the event\n", i, buf[i]);
+				break;
+			}
+		}
+		else
+		{
+			if(buf[i]!=gesture_flag)
+			{
+				check_FC = 0;
+				I("ID NOT the same %x != %x So STOP parse event\n", buf[i], gesture_flag);
+				break;
+			}
+		}
+
+		I("0x%2.2X ", buf[i]);
+		if (i % 8 == 7)
+				I("\n");
+	}
+	I("Himax gesture_flag= %x\n",gesture_flag );
+	I("Himax check_FC is %d\n", check_FC);
+
+	if (check_FC == 0)
+		return 0;
+	if(buf[GEST_PTLG_ID_LEN] != GEST_PTLG_HDR_ID1 ||
+			buf[GEST_PTLG_ID_LEN+1] != GEST_PTLG_HDR_ID2)
+		return 0;
+	for(i=0;i<(GEST_PTLG_ID_LEN+GEST_PTLG_HDR_LEN);i++)
+	{
+		I("P[%x]=0x%2.2X \n", i, buf[i]);
+		I("checksum=0x%2.2X \n", check_sum_cal);
+		check_sum_cal += buf[i];
+	}
+	if ((check_sum_cal != 0x00) )
+	{
+		I(" %s : check_sum_cal: 0x%02X\n",__func__ ,check_sum_cal);
+		return 0;
+	}
+#ifdef HX_GESTURE_TRACK
+	if(buf[GEST_PTLG_ID_LEN] == GEST_PTLG_HDR_ID1 &&
+			buf[GEST_PTLG_ID_LEN+1] == GEST_PTLG_HDR_ID2)
+	{
+		gest_len = buf[GEST_PTLG_ID_LEN+2];
+
+		I("gest_len = %d ",gest_len);
+
+		i = 0;
+		gest_pt_cnt = 0;
+		I("gest doornidate start \n %s",__func__);
+		while(i<(gest_len+1)/2)
+		{
+			gest_pt_log_coordinate(buf[GEST_PTLG_ID_LEN+4+i*2],buf[GEST_PTLG_ID_LEN+4+i*2+1]);
+			i++;
+
+			I("gest_pt_x[%d]=%d \n",gest_pt_cnt,gest_pt_x[gest_pt_cnt]);
+			I("gest_pt_y[%d]=%d \n",gest_pt_cnt,gest_pt_y[gest_pt_cnt]);
+
+			gest_pt_cnt +=1;
+		}
+		if(gest_pt_cnt)
+			{
+				for(i=0; i<gest_pt_cnt; i++)
+					{
+						if(tmp_max_x<gest_pt_x[i])
+							tmp_max_x=gest_pt_x[i];
+						if(tmp_min_x>gest_pt_x[i])
+							tmp_min_x=gest_pt_x[i];
+						if(tmp_max_y<gest_pt_y[i])
+							tmp_max_y=gest_pt_y[i];
+						if(tmp_min_y>gest_pt_y[i])
+							tmp_min_y=gest_pt_y[i];
+					}
+				I("gest_point x_min= %d, x_max= %d, y_min= %d, y_max= %d\n",tmp_min_x,tmp_max_x,tmp_min_y,tmp_max_y);
+				gest_start_x=gest_pt_x[0];
+				gn_gesture_coor[0] = gest_start_x;
+				gest_start_y=gest_pt_y[0];
+				gn_gesture_coor[1] = gest_start_y;
+				gest_end_x=gest_pt_x[gest_pt_cnt-1];
+				gn_gesture_coor[2] = gest_end_x;
+				gest_end_y=gest_pt_y[gest_pt_cnt-1];
+				gn_gesture_coor[3] = gest_end_y;
+				gest_width = tmp_max_x - tmp_min_x;
+				gn_gesture_coor[4] = gest_width;
+				gest_height = tmp_max_y - tmp_min_y;
+				gn_gesture_coor[5] = gest_height;
+				gest_mid_x = (tmp_max_x + tmp_min_x)/2;
+				gn_gesture_coor[6] = gest_mid_x;
+				gest_mid_y = (tmp_max_y + tmp_min_y)/2;
+				gn_gesture_coor[7] = gest_mid_y;
+				gn_gesture_coor[8] = gest_mid_x;//gest_up_x
+				gn_gesture_coor[9] = gest_mid_y-gest_height/2;//gest_up_y
+				gn_gesture_coor[10] = gest_mid_x;//gest_down_x
+				gn_gesture_coor[11] = gest_mid_y+gest_height/2;	//gest_down_y
+				gn_gesture_coor[12] = gest_mid_x-gest_width/2;	//gest_left_x
+				gn_gesture_coor[13] = gest_mid_y;	//gest_left_y
+				gn_gesture_coor[14] = gest_mid_x+gest_width/2;	//gest_right_x
+				gn_gesture_coor[15] = gest_mid_y; //gest_right_y
+
+			}
+
+	}
+#endif
+	if(gesture_flag != 0x80)
+	{
+		if(!ts->gesture_cust_en[gesture_flag])
+			{
+				I("%s NOT report customer key \n ",__func__);
+				return 0;//NOT report customer key
+			}
+	}
+	else
+	{
+		if(!ts->gesture_cust_en[0])
+			{
+				I("%s NOT report report double click \n",__func__);
+				return 0;//NOT report power key
+			}
+	}
+
+	if(gesture_flag == 0x80)
+		return EV_GESTURE_PWR;
+	else
+		return gesture_flag;
+}
+
+void himax_wake_check_func(void)
+{
+	int ret_event = 0, KEY_EVENT = 0;
+
+	ret_event = himax_parse_wake_event(private_ts);
+	switch (ret_event) {
+		case EV_GESTURE_PWR:
+			KEY_EVENT = KEY_POWER;
+		break;
+		case EV_GESTURE_01:
+			KEY_EVENT = KEY_CUST_01;
+		break;
+		case EV_GESTURE_02:
+			KEY_EVENT = KEY_CUST_02;
+		break;
+		case EV_GESTURE_03:
+			KEY_EVENT = KEY_CUST_03;
+		break;
+		case EV_GESTURE_04:
+			KEY_EVENT = KEY_CUST_04;
+		break;
+		case EV_GESTURE_05:
+			KEY_EVENT = KEY_CUST_05;
+		break;
+		case EV_GESTURE_06:
+			KEY_EVENT = KEY_CUST_06;
+		break;
+		case EV_GESTURE_07:
+			KEY_EVENT = KEY_CUST_07;
+		break;
+		case EV_GESTURE_08:
+			KEY_EVENT = KEY_CUST_08;
+		break;
+		case EV_GESTURE_09:
+			KEY_EVENT = KEY_CUST_09;
+		break;
+		case EV_GESTURE_10:
+			KEY_EVENT = KEY_CUST_10;
+		break;
+		case EV_GESTURE_11:
+			KEY_EVENT = KEY_CUST_11;
+		break;
+		case EV_GESTURE_12:
+			KEY_EVENT = KEY_CUST_12;
+		break;
+		case EV_GESTURE_13:
+			KEY_EVENT = KEY_CUST_13;
+		break;
+		case EV_GESTURE_14:
+			KEY_EVENT = KEY_CUST_14;
+		break;
+		case EV_GESTURE_15:
+			KEY_EVENT = KEY_CUST_15;
+		break;
+	}
+	if(ret_event)
+		{
+			I(" %s SMART WAKEUP KEY event %x press\n",__func__,KEY_EVENT);
+			input_report_key(private_ts->input_dev, KEY_EVENT, 1);
+			input_sync(private_ts->input_dev);
+			//msleep(100);
+			I(" %s SMART WAKEUP KEY event %x release\n",__func__,KEY_EVENT);
+			input_report_key(private_ts->input_dev, KEY_EVENT, 0);
+			input_sync(private_ts->input_dev);
+			FAKE_POWER_KEY_SEND=true;
+#ifdef HX_GESTURE_TRACK
+			I("gest_start_x= %d, gest_start_y= %d, gest_end_x= %d, gest_end_y= %d\n",gest_start_x,gest_start_y,
+			gest_end_x,gest_end_y);
+			I("gest_width= %d, gest_height= %d, gest_mid_x= %d, gest_mid_y= %d\n",gest_width,gest_height,
+			gest_mid_x,gest_mid_y);
+			I("gest_up_x= %d, gest_up_y= %d, gest_down_x= %d, gest_down_y= %d\n",gn_gesture_coor[8],gn_gesture_coor[9],
+			gn_gesture_coor[10],gn_gesture_coor[11]);
+			I("gest_left_x= %d, gest_left_y= %d, gest_right_x= %d, gest_right_y= %d\n",gn_gesture_coor[12],gn_gesture_coor[13],
+			gn_gesture_coor[14],gn_gesture_coor[15]);
+#endif
+		}
+}
+
+#endif
+static void himax_ts_button_func(int tp_key_index,struct himax_ts_data *ts)
+{
+	uint16_t x_position = 0, y_position = 0;
+if ( tp_key_index != 0x00)
+	{
+		I("virtual key index =%x\n",tp_key_index);
+		if ( tp_key_index == 0x01) {
+			vk_press = 1;
+			I("back key pressed\n");
+				if (ts->pdata->virtual_key)
+				{
+					if (ts->button[0].index) {
+						x_position = (ts->button[0].x_range_min + ts->button[0].x_range_max) / 2;
+						y_position = (ts->button[0].y_range_min + ts->button[0].y_range_max) / 2;
+					}
+					if (ts->protocol_type == PROTOCOL_TYPE_A) {
+						input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, 0);
+						input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_PRESSURE,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_X,
+							x_position);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_Y,
+							y_position);
+						input_mt_sync(ts->input_dev);
+					} else if (ts->protocol_type == PROTOCOL_TYPE_B) {
+						input_mt_slot(ts->input_dev, 0);
+						input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
+						1);
+						input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_PRESSURE,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_X,
+							x_position);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_Y,
+							y_position);
+					}
+				}
+				else
+					input_report_key(ts->input_dev, KEY_BACK, 1);
+		}
+		else if ( tp_key_index == 0x02) {
+			vk_press = 1;
+			I("home key pressed\n");
+				if (ts->pdata->virtual_key)
+				{
+					if (ts->button[1].index) {
+						x_position = (ts->button[1].x_range_min + ts->button[1].x_range_max) / 2;
+						y_position = (ts->button[1].y_range_min + ts->button[1].y_range_max) / 2;
+					}
+						if (ts->protocol_type == PROTOCOL_TYPE_A) {
+						input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, 0);
+						input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_PRESSURE,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_X,
+							x_position);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_Y,
+							y_position);
+						input_mt_sync(ts->input_dev);
+					} else if (ts->protocol_type == PROTOCOL_TYPE_B) {
+						input_mt_slot(ts->input_dev, 0);
+						input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
+						1);
+						input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_PRESSURE,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_X,
+							x_position);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_Y,
+							y_position);
+					}
+				}
+				else
+					input_report_key(ts->input_dev, KEY_HOME, 1);
+		}
+		else if ( tp_key_index == 0x04) {
+			vk_press = 1;
+			I("APP_switch key pressed\n");
+				if (ts->pdata->virtual_key)
+				{
+					if (ts->button[2].index) {
+						x_position = (ts->button[2].x_range_min + ts->button[2].x_range_max) / 2;
+						y_position = (ts->button[2].y_range_min + ts->button[2].y_range_max) / 2;
+					}
+						if (ts->protocol_type == PROTOCOL_TYPE_A) {
+						input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, 0);
+						input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_PRESSURE,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_X,
+							x_position);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_Y,
+							y_position);
+						input_mt_sync(ts->input_dev);
+					} else if (ts->protocol_type == PROTOCOL_TYPE_B) {
+						input_mt_slot(ts->input_dev, 0);
+						input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
+						1);
+						input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_PRESSURE,
+							100);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_X,
+							x_position);
+						input_report_abs(ts->input_dev, ABS_MT_POSITION_Y,
+							y_position);
+					}
+				}
+				else
+					input_report_key(ts->input_dev, KEY_F10, 1);	
+		}
+		input_sync(ts->input_dev);
+	}
+else/*tp_key_index =0x00*/
+	{
+		I("virtual key released\n");
+		vk_press = 0;
+		if (ts->protocol_type == PROTOCOL_TYPE_A) {
+			input_mt_sync(ts->input_dev);
+		}
+		else if (ts->protocol_type == PROTOCOL_TYPE_B) {
+			input_mt_slot(ts->input_dev, 0);
+			input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 0);
+		}
+		input_report_key(ts->input_dev, KEY_BACK, 0);
+		input_report_key(ts->input_dev, KEY_HOME, 0);
+		input_report_key(ts->input_dev, KEY_F10, 0);
+	input_sync(ts->input_dev);
+	}
+}
+
+void himax_ts_work(struct himax_ts_data *ts)
+{
+	int ret = 0;
+	uint8_t  finger_num, hw_reset_check[2];
+	uint8_t buf[128];
+	uint8_t finger_on = 0;
+	int32_t loop_i;
+	uint16_t check_sum_cal = 0;
+	int raw_cnt_max ;
+	int raw_cnt_rmd ;
+	int hx_touch_info_size;
+	uint8_t coordInfoSize = ts->coord_data_size + ts->area_data_size + 4;
+
+#ifdef HX_TP_PROC_DIAG
+	int16_t *mutual_data;
+	int16_t *self_data;
+	uint8_t diag_cmd;
+	int  	i;
+	int 	mul_num;
+	int 	self_num;
+	int RawDataLen = 0;
+	//coordinate dump start
+	char coordinate_char[15+(ic_data->HX_MAX_PT+5)*2*5+2];
+	//coordinate dump end
+#endif
+
+	memset(buf, 0x00, sizeof(buf));
+	memset(hw_reset_check, 0x00, sizeof(hw_reset_check));
+
+	raw_cnt_max = ic_data->HX_MAX_PT/4;
+	raw_cnt_rmd = ic_data->HX_MAX_PT%4;
+	
+#if defined(HX_USB_DETECT2)
+	himax_cable_detect_func();
+#endif
+
+	if (raw_cnt_rmd != 0x00) //more than 4 fingers
+	{
+		RawDataLen = cal_data_len(raw_cnt_rmd, ic_data->HX_MAX_PT, raw_cnt_max);
+		hx_touch_info_size = (ic_data->HX_MAX_PT+raw_cnt_max+2)*4;
+	}
+	else //less than 4 fingers
+	{
+		RawDataLen = cal_data_len(raw_cnt_rmd, ic_data->HX_MAX_PT, raw_cnt_max);
+		hx_touch_info_size = (ic_data->HX_MAX_PT+raw_cnt_max+1)*4;
+	}
+
+#ifdef HX_TP_PROC_DIAG
+	diag_cmd = getDiagCommand();
+	if( diag_cmd ){
+		ret = read_event_stack(ts->client, buf, 128);
+	}
+	else{
+		if(touch_monitor_stop_flag != 0){
+			ret = read_event_stack(ts->client, buf, 128);
+			touch_monitor_stop_flag-- ;
+		}
+		else{
+			ret = read_event_stack(ts->client, buf, hx_touch_info_size);
+		}
+	}
+
+	if (!ret)
+#else
+	if(!read_event_stack(ts->client, buf, hx_touch_info_size))
+#endif		
+	{
+		E("%s: can't read data from chip!\n", __func__);
+		goto err_workqueue_out;
+	}
+	post_read_event_stack(ts->client);
+#ifdef HX_ESD_WORKAROUND
+	for(i = 0; i < hx_touch_info_size; i++)
+	{
+		if(buf[i] == 0xED)/*case 1 ESD recovery flow*/
+		{
+			check_sum_cal = 1;
+
+		}else if(buf[i] == 0x00)
+		{
+			ESD_00_Flag = 1;
+		}
+		else
+		{
+			check_sum_cal = 0;
+			ESD_00_counter = 0;
+	    ESD_00_Flag = 0;
+			i = hx_touch_info_size;
+			break;
+		}		
+	}
+	if (ESD_00_Flag == 1){
+		ESD_00_counter ++;
+	}
+	if (ESD_00_counter > 1){
+		check_sum_cal = 2;
+	}
+	
+	 if (check_sum_cal == 2 && HX_ESD_RESET_ACTIVATE == 0)
+	 {
+	  I("[HIMAX TP MSG]: ESD event checked - ALL Zero.\n");
+	  ESD_HW_REST();
+	  return;
+	 }
+	 
+	if (check_sum_cal == 1 && HX_ESD_RESET_ACTIVATE == 0)
+	{
+		I("[HIMAX TP MSG]: ESD event checked - ALL 0xED.\n");
+		ESD_HW_REST();
+		return;
+	}
+	else if (HX_ESD_RESET_ACTIVATE)
+	{
+#ifdef HX_SMART_WAKEUP
+		queue_delayed_work(ts->himax_smwp_wq, &ts->smwp_work, msecs_to_jiffies(50));
+#endif
+#ifdef HX_HIGH_SENSE
+		queue_delayed_work(ts->himax_hsen_wq, &ts->hsen_work, msecs_to_jiffies(50));
+#endif
+		HX_ESD_RESET_ACTIVATE = 0;/*drop 1st interrupts after chip reset*/
+		I("[HIMAX TP MSG]:%s: Back from reset, ready to serve.\n", __func__);
+	}
+#endif
+	for (loop_i = 0, check_sum_cal = 0; loop_i < hx_touch_info_size; loop_i++)
+		check_sum_cal += buf[loop_i];
+	
+	if ((check_sum_cal % 0x100 != 0) )
+	{
+		I("[HIMAX TP MSG] checksum fail : check_sum_cal: 0x%02X\n", check_sum_cal);
+		return;
+	}
+
+	if (ts->debug_log_level & BIT(0)) {
+		I("%s: raw data:\n", __func__);
+		for (loop_i = 0; loop_i < hx_touch_info_size; loop_i++) {
+			I("P %d = 0x%2.2X ", loop_i, buf[loop_i]);
+			if (loop_i % 8 == 7)
+				I("\n");
+		}
+	}
+
+	//touch monitor raw data fetch
+#ifdef HX_TP_PROC_DIAG
+	diag_cmd = getDiagCommand();
+	if (diag_cmd >= 1 && diag_cmd <= 6)
+	{
+		//Check 124th byte CRC
+		if(!diag_check_sum(hx_touch_info_size, buf))
+		{
+			goto bypass_checksum_failed_packet;
+		}
+#ifdef HX_TP_PROC_2T2R
+		if(Is_2T2R && diag_cmd == 4)
+		{
+			mutual_data = getMutualBuffer_2();
+			self_data 	= getSelfBuffer();
+
+			// initiallize the block number of mutual and self
+			mul_num = getXChannel_2() * getYChannel_2();
+
+#ifdef HX_EN_SEL_BUTTON
+			self_num = getXChannel_2() + getYChannel_2() + ic_data->HX_BT_NUM;
+#else
+			self_num = getXChannel_2() + getYChannel_2();
+#endif
+		}
+		else
+#endif			
+		{
+			mutual_data = getMutualBuffer();
+			self_data 	= getSelfBuffer();
+
+			// initiallize the block number of mutual and self
+			mul_num = getXChannel() * getYChannel();
+
+#ifdef HX_EN_SEL_BUTTON
+			self_num = getXChannel() + getYChannel() + ic_data->HX_BT_NUM;
+#else
+			self_num = getXChannel() + getYChannel();
+#endif
+		}
+
+		diag_parse_raw_data(hx_touch_info_size, RawDataLen, mul_num, self_num, buf, diag_cmd, mutual_data,	self_data);
+
+	}
+	else if (diag_cmd == 7)
+	{
+		memcpy(&(diag_coor[0]), &buf[0], 128);
+	}
+	//coordinate dump start
+	if (coordinate_dump_enable == 1)
+	{
+		for(i=0; i<(15 + (ic_data->HX_MAX_PT+5)*2*5); i++)
+		{
+			coordinate_char[i] = 0x20;
+		}
+		coordinate_char[15 + (ic_data->HX_MAX_PT+5)*2*5] = 0xD;
+		coordinate_char[15 + (ic_data->HX_MAX_PT+5)*2*5 + 1] = 0xA;
+	}
+	//coordinate dump end
+bypass_checksum_failed_packet:
+#endif
+		EN_NoiseFilter = (buf[HX_TOUCH_INFO_POINT_CNT+2]>>3);
+		//I("EN_NoiseFilter=%d\n",EN_NoiseFilter);
+		EN_NoiseFilter = EN_NoiseFilter & 0x01;
+		//I("EN_NoiseFilter2=%d\n",EN_NoiseFilter);
+
+#if defined(HX_EN_SEL_BUTTON) || defined(HX_EN_MUT_BUTTON)
+		tpd_key = (buf[HX_TOUCH_INFO_POINT_CNT+2]>>4);
+		if (tpd_key == 0x0F)/*All (VK+AA)leave*/
+		{
+			tpd_key = 0x00;
+		}
+		//I("[DEBUG] tpd_key:  %x\r\n", tpd_key);
+#else
+		tpd_key = 0x00;
+#endif
+
+		p_point_num = hx_point_num;
+
+		if (buf[HX_TOUCH_INFO_POINT_CNT] == 0xff)
+			hx_point_num = 0;
+		else
+			hx_point_num= buf[HX_TOUCH_INFO_POINT_CNT] & 0x0f;
+
+		// Touch Point information
+		if (hx_point_num != 0 ) {
+			if(vk_press == 0x00)
+				{
+					uint16_t old_finger = ts->pre_finger_mask;
+					ts->pre_finger_mask = 0;
+					finger_num = buf[coordInfoSize - 4] & 0x0F;
+					finger_on = 1;
+					AA_press = 1;
+					for (loop_i = 0; loop_i < ts->nFinger_support; loop_i++) {
+							int base = loop_i * 4;
+							int x = buf[base] << 8 | buf[base + 1];
+							int y = (buf[base + 2] << 8 | buf[base + 3]);
+							int w = buf[(ts->nFinger_support * 4) + loop_i];
+						if(x >= 0 && x <= ts->pdata->abs_x_max && y >= 0 && y <= ts->pdata->abs_y_max){
+							finger_num--;
+
+							if ((ts->debug_log_level & BIT(3)) > 0)
+							{
+								if (old_finger >> loop_i == 0)
+								{
+									if (ts->useScreenRes)
+									{
+										I("status: Screen:F:%02d Down, X:%d, Y:%d, W:%d, N:%d\n",
+										loop_i+1, x * ts->widthFactor >> SHIFTBITS,
+										y * ts->heightFactor >> SHIFTBITS, w, EN_NoiseFilter);
+									}
+									else
+									{
+										I("status: Raw:F:%02d Down, X:%d, Y:%d, W:%d, N:%d\n",
+										loop_i+1, x, y, w, EN_NoiseFilter);
+									}
+								}
+							}
+
+							if (ts->protocol_type == PROTOCOL_TYPE_B)
+							{
+								input_mt_slot(ts->input_dev, loop_i);
+							}
+
+							input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
+							input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+							input_report_abs(ts->input_dev, ABS_MT_PRESSURE, w);
+							input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
+							input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
+							
+							if (ts->protocol_type == PROTOCOL_TYPE_A)
+							{
+								input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, loop_i);
+								input_mt_sync(ts->input_dev);
+							}
+							else
+							{
+								ts->last_slot = loop_i;
+								input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 1);
+							}
+
+							if (!ts->first_pressed)
+							{
+								ts->first_pressed = 1;
+								I("S1@%d, %d\n", x, y);
+							}
+
+							ts->pre_finger_data[loop_i][0] = x;
+							ts->pre_finger_data[loop_i][1] = y;
+
+
+							if (ts->debug_log_level & BIT(1))
+								I("Finger %d=> X:%d, Y:%d W:%d, Z:%d, F:%d, N:%d\n",
+									loop_i + 1, x, y, w, w, loop_i + 1, EN_NoiseFilter);
+										
+							ts->pre_finger_mask = ts->pre_finger_mask + (1 << loop_i);
+											
+						} else {
+							if (ts->protocol_type == PROTOCOL_TYPE_B)
+							{
+								input_mt_slot(ts->input_dev, loop_i);
+								input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 0);
+							}
+
+							if (loop_i == 0 && ts->first_pressed == 1)
+							{
+								ts->first_pressed = 2;
+								I("E1@%d, %d\n",
+								ts->pre_finger_data[0][0] , ts->pre_finger_data[0][1]);
+							}
+							if ((ts->debug_log_level & BIT(3)) > 0)
+							{
+								if (old_finger >> loop_i == 1)
+								{
+									if (ts->useScreenRes)
+									{
+										I("status: Screen:F:%02d Up, X:%d, Y:%d, N:%d\n",
+										loop_i+1, ts->pre_finger_data[loop_i][0] * ts->widthFactor >> SHIFTBITS,
+										ts->pre_finger_data[loop_i][1] * ts->heightFactor >> SHIFTBITS, Last_EN_NoiseFilter);
+									}
+									else
+									{
+										I("status: Raw:F:%02d Up, X:%d, Y:%d, N:%d\n",
+										loop_i+1, ts->pre_finger_data[loop_i][0],
+										ts->pre_finger_data[loop_i][1], Last_EN_NoiseFilter);
+									}
+								}
+							}
+						}
+					}
+
+				}else if ((tpd_key_old != 0x00)&&(tpd_key == 0x00)) {
+					//temp_x[0] = 0xFFFF;
+					//temp_y[0] = 0xFFFF;
+					//temp_x[1] = 0xFFFF;
+					//temp_y[1] = 0xFFFF;
+					himax_ts_button_func(tpd_key,ts);
+					finger_on = 0;
+				}
+			input_report_key(ts->input_dev, BTN_TOUCH, finger_on);
+			input_sync(ts->input_dev);
+		} else if (hx_point_num == 0){
+			if(AA_press)
+				{
+				// leave event
+				finger_on = 0;
+				AA_press = 0;
+				if (ts->protocol_type == PROTOCOL_TYPE_A)
+					input_mt_sync(ts->input_dev);
+
+				for (loop_i = 0; loop_i < ts->nFinger_support; loop_i++) {
+						if (((ts->pre_finger_mask >> loop_i) & 1) == 1) {
+							if (ts->protocol_type == PROTOCOL_TYPE_B) {
+								input_mt_slot(ts->input_dev, loop_i);
+								input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 0);
+							}
+						}
+					}
+				if (ts->pre_finger_mask > 0) {
+					for (loop_i = 0; loop_i < ts->nFinger_support && (ts->debug_log_level & BIT(3)) > 0; loop_i++) {
+						if (((ts->pre_finger_mask >> loop_i) & 1) == 1) {
+							if (ts->useScreenRes) {
+								I("status:%X, Screen:F:%02d Up, X:%d, Y:%d, N:%d\n", 0, loop_i+1, ts->pre_finger_data[loop_i][0] * ts->widthFactor >> SHIFTBITS,
+									ts->pre_finger_data[loop_i][1] * ts->heightFactor >> SHIFTBITS, Last_EN_NoiseFilter);
+							} else {
+								I("status:%X, Raw:F:%02d Up, X:%d, Y:%d, N:%d\n",0, loop_i+1, ts->pre_finger_data[loop_i][0],ts->pre_finger_data[loop_i][1], Last_EN_NoiseFilter);
+							}
+						}
+					}
+					ts->pre_finger_mask = 0;
+				}
+
+				if (ts->first_pressed == 1) {
+					ts->first_pressed = 2;
+					I("E1@%d, %d\n",ts->pre_finger_data[0][0] , ts->pre_finger_data[0][1]);
+				}
+
+				if (ts->debug_log_level & BIT(1))
+					I("All Finger leave\n");
+
+			}
+			else if (tpd_key != 0x00) {
+				himax_ts_button_func(tpd_key,ts);
+				finger_on = 1;
+			}
+			else if ((tpd_key_old != 0x00)&&(tpd_key == 0x00)) {
+				himax_ts_button_func(tpd_key,ts);
+				finger_on = 0;
+			}
+			input_report_key(ts->input_dev, BTN_TOUCH, finger_on);
+			input_sync(ts->input_dev);
+		}
+		tpd_key_old = tpd_key;
+		Last_EN_NoiseFilter = EN_NoiseFilter;
+
+workqueue_out:
+	return;
+
+err_workqueue_out:
+	I("%s: Now reset the Touch chip.\n", __func__);
+
+#ifdef HX_RST_PIN_FUNC
+	himax_HW_reset(true,false);
+#endif
+
+	goto workqueue_out;
+}
+enum hrtimer_restart himax_ts_timer_func(struct hrtimer *timer)
+{
+	struct himax_ts_data *ts;
+
+	ts = container_of(timer, struct himax_ts_data, timer);
+	queue_work(ts->himax_wq, &ts->work);
+	hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL);
+	return HRTIMER_NORESTART;
+}
+
+#if defined(HX_USB_DETECT)
+static void himax_cable_tp_status_handler_func(int connect_status)
+{
+	struct himax_ts_data *ts;
+	I("Touch: cable change to %d\n", connect_status);
+	ts = private_ts;
+	if (ts->cable_config) {
+		if (!atomic_read(&ts->suspend_mode)) {
+			if ((!!connect_status) != ts->usb_connected) {
+				if (!!connect_status) {
+					ts->cable_config[1] = 0x01;
+					ts->usb_connected = 0x01;
+				} else {
+					ts->cable_config[1] = 0x00;
+					ts->usb_connected = 0x00;
+				}
+
+				i2c_himax_master_write(ts->client, ts->cable_config,
+					sizeof(ts->cable_config), HIMAX_I2C_RETRY_TIMES);
+
+				I("%s: Cable status change: 0x%2.2X\n", __func__, ts->cable_config[1]);
+			} else
+				I("%s: Cable status is the same as previous one, ignore.\n", __func__);
+		} else {
+			if (connect_status)
+				ts->usb_connected = 0x01;
+			else
+				ts->usb_connected = 0x00;
+			I("%s: Cable status remembered: 0x%2.2X\n", __func__, ts->usb_connected);
+		}
+	}
+}
+
+static struct t_cable_status_notifier himax_cable_status_handler = {
+	.name = "usb_tp_connected",
+	.func = himax_cable_tp_status_handler_func,
+};
+
+#endif
+
+#if defined(HX_USB_DETECT2)
+void himax_cable_detect_func(void)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[128];
+	struct himax_ts_data *ts;
+	u32 connect_status = 0;
+
+	connect_status = USB_Flag;//upmu_is_chr_det();
+	ts = private_ts;
+	//I("Touch: cable status=%d, cable_config=%p, usb_connected=%d \n", connect_status,ts->cable_config, ts->usb_connected);
+	if (ts->cable_config) {
+		if ((!!connect_status) != ts->usb_connected) {
+			//notify USB plug/unplug
+			// 0x9008_8060 ==> 0x0000_0000/0001
+			tmp_addr[3] = 0x90; tmp_addr[2] = 0x08; tmp_addr[1] = 0x80; tmp_addr[0] = 0x60;
+			tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00;
+			
+			if (!!connect_status) {
+				tmp_data[0] = 0x01;
+				ts->usb_connected = 0x01;
+			} else {
+				tmp_data[0] = 0x00;
+				ts->usb_connected = 0x00;
+			}
+
+			himax_flash_write_burst(ts->client, tmp_addr, tmp_data);
+
+			I("%s: Cable status change: 0x%2.2X\n", __func__, ts->usb_connected);
+			}
+		//else
+			//I("%s: Cable status is the same as previous one, ignore.\n", __func__);
+	}
+}
+#endif
+
+#ifdef CONFIG_FB
+int himax_fb_register(struct himax_ts_data *ts)
+{
+	int ret = 0;
+
+	I(" %s in", __func__);
+	ts->fb_notif.notifier_call = fb_notifier_callback;
+	ret = fb_register_client(&ts->fb_notif);
+	if (ret)
+		E(" Unable to register fb_notifier: %d\n", ret);
+
+	return ret;
+}
+#endif
+
+#ifdef HX_SMART_WAKEUP
+void himax_set_SMWP_func(struct i2c_client *client,uint8_t SMWP_enable)
+{
+	uint8_t tmp_data[4];
+
+	if(SMWP_enable)
+	{
+		SMWP_bit_retry:
+		himax_set_SMWP_enable(client, SMWP_enable);
+		msleep(20);
+		himax_get_SMWP_enable(client,tmp_data);
+		I("%s: Read SMWP bit data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n", __func__
+			,tmp_data[0],tmp_data[1],tmp_data[2],tmp_data[3]);
+		if(tmp_data[0]!= 0x01)
+			{
+				I("%s: retry SMWP bit write data[0]=%x  \n",__func__,tmp_data[0]);
+				goto SMWP_bit_retry;
+			}
+	}
+}
+
+static void himax_SMWP_work(struct work_struct *work)
+{
+	struct himax_ts_data *ts = container_of(work, struct himax_ts_data,
+							smwp_work.work);
+	I(" %s in", __func__);
+
+	himax_set_SMWP_func(ts->client,ts->SMWP_enable);
+
+}
+#endif
+
+#ifdef  HX_TP_PROC_FLASH_DUMP
+static void himax_ts_flash_work_func(struct work_struct *work)
+{
+	himax_ts_flash_func();
+}
+#endif
+
+#ifdef  HX_TP_PROC_DIAG
+static void himax_ts_diag_work_func(struct work_struct *work)
+{
+	himax_ts_diag_func();
+}
+#endif
+
+void himax_ts_init(struct himax_ts_data *ts)
+{
+	int ret = 0, err = 0;
+	struct himax_i2c_platform_data *pdata;
+	struct i2c_client *client;
+
+	client = ts->client;
+	pdata = ts->pdata;
+
+	I("%s: Start.\n", __func__);
+
+	/* Set pinctrl in active state */
+	if (ts->ts_pinctrl) {
+		ret = pinctrl_select_state(ts->ts_pinctrl,
+					ts->pinctrl_state_active);
+		if (ret < 0) {
+			E("Failed to set pin in active state %d",ret);
+		}
+	}
+
+	himax_burst_enable(client, 0);
+
+	//Get Himax IC Type / FW information / Calculate the point number
+	if (himax_check_chip_version(ts->client) == false) {
+		E("Himax chip doesn NOT EXIST");
+		goto err_ic_package_failed;
+	}
+	if (himax_ic_package_check(ts->client) == false) {
+		E("Himax chip doesn NOT EXIST");
+		goto err_ic_package_failed;
+	}
+
+	if (pdata->virtual_key)
+		ts->button = pdata->virtual_key;
+#ifdef  HX_TP_PROC_FLASH_DUMP
+	ts->flash_wq = create_singlethread_workqueue("himax_flash_wq");
+	if (!ts->flash_wq)
+	{
+		E("%s: create flash workqueue failed\n", __func__);
+		err = -ENOMEM;
+		goto err_create_wq_failed;
+	}
+
+	INIT_WORK(&ts->flash_work, himax_ts_flash_work_func);
+
+	setSysOperation(0);
+	setFlashBuffer();
+#endif
+
+#ifdef  HX_TP_PROC_DIAG
+	ts->himax_diag_wq = create_singlethread_workqueue("himax_diag");
+	if (!ts->himax_diag_wq)
+	{
+		E("%s: create diag workqueue failed\n", __func__);
+		err = -ENOMEM;
+		goto err_create_wq_failed;
+	}
+	INIT_DELAYED_WORK(&ts->himax_diag_delay_wrok, himax_ts_diag_work_func);
+#endif
+
+himax_read_FW_ver(client);
+
+#ifdef HX_AUTO_UPDATE_FW
+	I(" %s in", __func__);
+	if(i_update_FW() == false)
+		I("NOT Have new FW=NOT UPDATE=\n");
+	else
+		I("Have new FW=UPDATE=\n");
+#endif
+
+	//Himax Power On and Load Config
+	if (himax_loadSensorConfig(client, pdata) < 0) {
+		E("%s: Load Sesnsor configuration failed, unload driver.\n", __func__);
+		goto err_detect_failed;
+	}
+
+	calculate_point_number();
+#ifdef HX_TP_PROC_DIAG
+	setXChannel(ic_data->HX_RX_NUM); // X channel
+	setYChannel(ic_data->HX_TX_NUM); // Y channel
+
+	setMutualBuffer();
+	setMutualNewBuffer();
+	setMutualOldBuffer();
+	if (getMutualBuffer() == NULL) {
+		E("%s: mutual buffer allocate fail failed\n", __func__);
+		return;
+	}
+#ifdef HX_TP_PROC_2T2R
+	if(Is_2T2R){
+		setXChannel_2(ic_data->HX_RX_NUM_2); // X channel
+		setYChannel_2(ic_data->HX_TX_NUM_2); // Y channel
+
+		setMutualBuffer_2();
+
+		if (getMutualBuffer_2() == NULL) {
+			E("%s: mutual buffer 2 allocate fail failed\n", __func__);
+			return;
+		}
+	}
+#endif
+#endif
+#ifdef CONFIG_OF
+	ts->power = pdata->power;
+#endif
+	ts->pdata = pdata;
+
+	ts->x_channel = ic_data->HX_RX_NUM;
+	ts->y_channel = ic_data->HX_TX_NUM;
+	ts->nFinger_support = ic_data->HX_MAX_PT;
+	//calculate the i2c data size
+	calcDataSize(ts->nFinger_support);
+	I("%s: calcDataSize complete\n", __func__);
+#ifdef CONFIG_OF
+	ts->pdata->abs_pressure_min = 0;
+	ts->pdata->abs_pressure_max = 200;
+	ts->pdata->abs_width_min = 0;
+	ts->pdata->abs_width_max = 200;
+	pdata->cable_config[0] = 0x90;
+	pdata->cable_config[1] = 0x00;
+#endif
+	ts->suspended = false;
+#if defined(HX_USB_DETECT)||defined(HX_USB_DETECT2)
+	ts->usb_connected = 0x00;
+	ts->cable_config = pdata->cable_config;
+#endif
+	ts->protocol_type = pdata->protocol_type;
+	I("%s: Use Protocol Type %c\n", __func__,
+	ts->protocol_type == PROTOCOL_TYPE_A ? 'A' : 'B');
+
+	ret = himax_input_register(ts);
+	if (ret) {
+		E("%s: Unable to register %s input device\n",
+			__func__, ts->input_dev->name);
+		goto err_input_register_device_failed;
+	}
+#ifdef HX_SMART_WAKEUP
+	ts->SMWP_enable=0;
+	wake_lock_init(&ts->ts_SMWP_wake_lock, WAKE_LOCK_SUSPEND, HIMAX_common_NAME);
+
+	ts->himax_smwp_wq = create_singlethread_workqueue("HMX_SMWP_WORK");
+	if (!ts->himax_smwp_wq) {
+		E(" allocate himax_smwp_wq failed\n");
+		err = -ENOMEM;
+		goto err_smwp_wq_failed;
+	}
+	INIT_DELAYED_WORK(&ts->smwp_work, himax_SMWP_work);
+#endif
+#ifdef HX_HIGH_SENSE
+	ts->HSEN_enable=0;
+	ts->himax_hsen_wq = create_singlethread_workqueue("HMX_HSEN_WORK");
+	if (!ts->himax_hsen_wq) {
+		E(" allocate himax_hsen_wq failed\n");
+		err = -ENOMEM;
+		goto err_hsen_wq_failed;
+	}
+	INIT_DELAYED_WORK(&ts->hsen_work, himax_HSEN_func);
+#endif
+
+#if defined(CONFIG_TOUCHSCREEN_HIMAX_DEBUG)
+	himax_touch_proc_init();
+#endif
+
+#if defined(HX_USB_DETECT)
+	if (ts->cable_config)
+		cable_detect_register_notifier(&himax_cable_status_handler);
+#endif
+
+	err = himax_ts_register_interrupt(ts->client);
+	if (err)
+		goto err_register_interrupt_failed;
+	return;
+
+err_register_interrupt_failed:
+#ifdef HX_HIGH_SENSE
+err_hsen_wq_failed:
+#endif
+#ifdef HX_SMART_WAKEUP
+err_smwp_wq_failed:
+	wake_lock_destroy(&ts->ts_SMWP_wake_lock);
+#endif
+err_input_register_device_failed:
+	input_free_device(ts->input_dev);
+err_detect_failed:
+#ifdef  HX_TP_PROC_FLASH_DUMP
+err_create_wq_failed:
+#endif
+err_ic_package_failed:
+
+return;
+}
+
+int himax_chip_common_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	int err = 0;
+	struct himax_ts_data *ts;
+	struct himax_i2c_platform_data *pdata;
+
+	//Check I2C functionality
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		E("%s: i2c check functionality error\n", __func__);
+		err = -ENODEV;
+		goto err_check_functionality_failed;
+	}
+
+	ts = kzalloc(sizeof(struct himax_ts_data), GFP_KERNEL);
+	if (ts == NULL) {
+		E("%s: allocate himax_ts_data failed\n", __func__);
+		err = -ENOMEM;
+		goto err_alloc_data_failed;
+	}
+
+	i2c_set_clientdata(client, ts);
+	ts->client = client;
+	ts->dev = &client->dev;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (pdata == NULL) { /*Allocate Platform data space*/
+		err = -ENOMEM;
+			goto err_dt_platform_data_fail;
+	}
+
+	ic_data = kzalloc(sizeof(*ic_data), GFP_KERNEL);
+	if (ic_data == NULL) { /*Allocate IC data space*/
+		err = -ENOMEM;
+			goto err_dt_ic_data_fail;
+	}
+
+#ifdef CONFIG_OF
+	if (client->dev.of_node) { /*DeviceTree Init Platform_data*/
+		err = himax_parse_dt(ts, pdata);
+		if (err < 0) {
+			I(" pdata is NULL for DT\n");
+			goto err_alloc_dt_pdata_failed;
+		}
+	}
+#endif
+
+#ifdef HX_RST_PIN_FUNC
+	ts->rst_gpio = pdata->gpio_reset;
+#endif
+
+himax_gpio_power_config(ts->client, pdata);
+
+	err = himax_ts_pinctrl_init(ts);
+	if (err || ts->ts_pinctrl == NULL) {
+		E(" Pinctrl init failed\n");
+	}
+
+#ifndef CONFIG_OF
+	if (pdata->power) {
+		err = pdata->power(1);
+		if (err < 0) {
+			E("%s: power on failed\n", __func__);
+			goto err_power_failed;
+		}
+	}
+#endif
+	ts->pdata = pdata;
+	private_ts = ts;
+
+	mutex_init(&ts->fb_mutex);
+	/* ts initialization is deferred till FB_UNBLACK event;
+	 * probe is considered pending till then.*/
+	ts->probe_done = false;
+#ifdef CONFIG_FB
+	err = himax_fb_register(ts);
+	if (err) {
+		E("Falied to register fb notifier\n");
+		err = -ENOMEM;
+		goto err_fb_notif_wq_create;
+	}
+#endif
+
+    return 0;
+
+#ifdef CONFIG_FB
+err_fb_notif_wq_create:
+#endif
+#ifdef CONFIG_OF
+err_alloc_dt_pdata_failed:
+#else
+err_power_failed:
+err_get_platform_data_fail:
+#endif
+	if (ts->ts_pinctrl) {
+		if (IS_ERR_OR_NULL(ts->pinctrl_state_release)) {
+			devm_pinctrl_put(ts->ts_pinctrl);
+			ts->ts_pinctrl = NULL;
+		} else {
+			err = pinctrl_select_state(ts->ts_pinctrl,
+					ts->pinctrl_state_release);
+			if (err)
+				E("failed to select relase pinctrl state %d\n",
+					err);
+		}
+	}
+	kfree(ic_data);
+
+err_dt_ic_data_fail:
+	kfree(pdata);
+
+err_dt_platform_data_fail:
+	kfree(ts);
+
+err_alloc_data_failed:
+
+err_check_functionality_failed:
+	probe_fail_flag = 1;
+	return err;
+
+}
+
+int himax_chip_common_remove(struct i2c_client *client)
+{
+	struct himax_ts_data *ts = i2c_get_clientdata(client);
+	int ret;
+#if defined(CONFIG_TOUCHSCREEN_HIMAX_DEBUG)
+	himax_touch_proc_deinit();
+#endif
+#ifdef CONFIG_FB
+	if (fb_unregister_client(&ts->fb_notif))
+		dev_err(&client->dev, "Error occurred while unregistering fb_notifier.\n");
+#endif
+
+	if (!ts->use_irq)
+		hrtimer_cancel(&ts->timer);
+
+	destroy_workqueue(ts->himax_wq);
+
+	if (ts->protocol_type == PROTOCOL_TYPE_B)
+		input_mt_destroy_slots(ts->input_dev);
+
+	input_unregister_device(ts->input_dev);
+
+	if (ts->ts_pinctrl) {
+		if (IS_ERR_OR_NULL(ts->pinctrl_state_release)) {
+			devm_pinctrl_put(ts->ts_pinctrl);
+			ts->ts_pinctrl = NULL;
+		} else {
+			ret = pinctrl_select_state(ts->ts_pinctrl,
+					ts->pinctrl_state_release);
+			if (ret)
+				E("failed to select relase pinctrl state %d\n",
+					ret);
+		}
+	}
+#ifdef HX_SMART_WAKEUP
+		wake_lock_destroy(&ts->ts_SMWP_wake_lock);
+#endif
+	kfree(ts);
+
+	return 0;
+
+}
+
+int himax_chip_common_suspend(struct himax_ts_data *ts)
+{
+	int ret;
+
+	if(ts->suspended)
+	{
+		I("%s: Already suspended. Skipped. \n", __func__);
+		return 0;
+	}
+	else
+	{
+		ts->suspended = true;
+		I("%s: enter \n", __func__);
+	}
+
+#ifdef HX_TP_PROC_FLASH_DUMP
+	if (getFlashDumpGoing())
+	{
+		I("[himax] %s: Flash dump is going, reject suspend\n",__func__);
+		return 0;
+	}
+#endif
+#ifdef HX_TP_PROC_HITOUCH
+	if(hitouch_is_connect)
+	{
+		I("[himax] %s: Hitouch connect, reject suspend\n",__func__);
+		return 0;
+	}
+#endif
+#ifdef HX_SMART_WAKEUP
+	if(ts->SMWP_enable)
+	{
+		atomic_set(&ts->suspend_mode, 1);
+		ts->pre_finger_mask = 0;
+		FAKE_POWER_KEY_SEND=false;
+		I("[himax] %s: SMART_WAKEUP enable, reject suspend\n",__func__);
+		return 0;
+	}
+#endif
+#ifdef HX_ESD_WORKAROUND
+	ESD_00_counter = 0;
+	ESD_00_Flag = 0;
+#endif
+	if (!ts->use_irq) {
+		ret = cancel_work_sync(&ts->work);
+		if (ret)
+			himax_int_enable(ts->client->irq,1);
+	}
+
+	//ts->first_pressed = 0;
+	atomic_set(&ts->suspend_mode, 1);
+	ts->pre_finger_mask = 0;
+
+	if (ts->ts_pinctrl) {
+		ret = pinctrl_select_state(ts->ts_pinctrl,
+				ts->pinctrl_state_suspend);
+		if (ret < 0) {
+			E("Failed to get idle pinctrl state %d\n", ret);
+		}
+	}
+
+	if (ts->pdata->powerOff3V3 && ts->pdata->power)
+		ts->pdata->power(0);
+
+	return 0;
+}
+
+int himax_chip_common_resume(struct himax_ts_data *ts)
+{
+	int retval;
+
+	I("%s: enter \n", __func__);
+
+	if (ts->pdata->powerOff3V3 && ts->pdata->power)
+		ts->pdata->power(1);
+		
+		
+		/*************************************/
+		if (ts->protocol_type == PROTOCOL_TYPE_A)
+		input_mt_sync(ts->input_dev);
+		input_report_key(ts->input_dev, BTN_TOUCH, 0);
+		input_sync(ts->input_dev);
+		/*************************************/
+		
+		
+	if (ts->ts_pinctrl) {
+		retval = pinctrl_select_state(ts->ts_pinctrl,
+				ts->pinctrl_state_active);
+		if (retval < 0) {
+			E("Cannot get default pinctrl state %d\n", retval);
+			goto err_pinctrl_select_resume;
+		}
+	}
+
+	atomic_set(&ts->suspend_mode, 0);
+
+	himax_int_enable(ts->client->irq,1);
+
+	ts->suspended = false;
+#if defined(HX_USB_DETECT2)
+	ts->usb_connected = 0x00;
+	himax_cable_detect_func();
+#endif
+#ifdef HX_SMART_WAKEUP
+	queue_delayed_work(ts->himax_smwp_wq, &ts->smwp_work, msecs_to_jiffies(1000));
+#endif
+#ifdef HX_HIGH_SENSE
+	queue_delayed_work(ts->himax_hsen_wq, &ts->hsen_work, msecs_to_jiffies(1000));
+#endif
+	return 0;
+err_pinctrl_select_resume:
+	if (ts->pdata->powerOff3V3 && ts->pdata->power)
+		ts->pdata->power(0);
+	return retval;
+}
+
diff --git a/drivers/input/touchscreen/hxchipset/himax_common.h b/drivers/input/touchscreen/hxchipset/himax_common.h
new file mode 100644
index 0000000..27ce9aa
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/himax_common.h
@@ -0,0 +1,395 @@
+/* Himax Android Driver Sample Code for Himax chipset
+*
+* Copyright (C) 2015 Himax Corporation.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#ifndef HIMAX_COMMON_H
+#define HIMAX_COMMON_H
+
+#include <asm/segment.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/async.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/input/mt.h>
+#include <linux/firmware.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include "himax_platform.h"
+
+#if defined(CONFIG_FB)
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#elif defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif
+
+#ifdef CONFIG_OF
+#include <linux/of_gpio.h>
+#endif
+#define HIMAX_DRIVER_VER "0.2.4.0"
+
+#define FLASH_DUMP_FILE "/data/user/Flash_Dump.bin"
+#define DIAG_COORDINATE_FILE "/sdcard/Coordinate_Dump.csv"
+
+#if defined(CONFIG_TOUCHSCREEN_HIMAX_DEBUG)
+
+#define HX_TP_PROC_DIAG
+#define HX_TP_PROC_REGISTER
+#define HX_TP_PROC_DEBUG
+#define HX_TP_PROC_FLASH_DUMP
+#define HX_TP_PROC_SELF_TEST
+#define HX_TP_PROC_RESET
+#define HX_TP_PROC_SENSE_ON_OFF
+//#define HX_TP_PROC_2T2R
+
+int himax_touch_proc_init(void);
+void himax_touch_proc_deinit(void);
+#endif
+
+//===========Himax Option function=============
+//#define HX_RST_PIN_FUNC
+//#define HX_AUTO_UPDATE_FW
+//#define HX_HIGH_SENSE
+//#define HX_SMART_WAKEUP
+//#define HX_USB_DETECT
+//#define HX_ESD_WORKAROUND
+//#define HX_USB_DETECT2
+
+//#define HX_EN_SEL_BUTTON		       // Support Self Virtual key		,default is close
+#define HX_EN_MUT_BUTTON		    // Support Mutual Virtual Key	,default is close
+
+#define HX_KEY_MAX_COUNT             4			
+#define DEFAULT_RETRY_CNT            3
+
+#define HX_VKEY_0   KEY_BACK
+#define HX_VKEY_1   KEY_HOME
+#define HX_VKEY_2   KEY_RESERVED
+#define HX_VKEY_3   KEY_RESERVED
+#define HX_KEY_ARRAY    {HX_VKEY_0, HX_VKEY_1, HX_VKEY_2, HX_VKEY_3}
+
+#define SHIFTBITS 5
+//#define FLASH_SIZE 131072
+#define  FW_SIZE_60k 	61440
+#define  FW_SIZE_64k 	65536
+#define  FW_SIZE_124k 	126976
+#define  FW_SIZE_128k 	131072
+
+struct himax_ic_data {
+	int vendor_fw_ver;
+	int vendor_config_ver;
+	int vendor_sensor_id;
+	int		HX_RX_NUM;
+	int		HX_TX_NUM;
+	int		HX_BT_NUM;
+	int		HX_X_RES;
+	int		HX_Y_RES;
+	int		HX_MAX_PT;
+	bool	HX_XY_REVERSE;
+	bool	HX_INT_IS_EDGE;
+#ifdef HX_TP_PROC_2T2R
+	int HX_RX_NUM_2;
+	int HX_TX_NUM_2;
+#endif
+};
+
+struct himax_virtual_key {
+	int index;
+	int keycode;
+	int x_range_min;
+	int x_range_max;
+	int y_range_min;
+	int y_range_max;
+};
+
+struct himax_config {
+	uint8_t  default_cfg;
+	uint8_t  sensor_id;
+	uint8_t  fw_ver;
+	uint16_t length;
+	uint32_t tw_x_min;
+	uint32_t tw_x_max;
+	uint32_t tw_y_min;
+	uint32_t tw_y_max;
+	uint32_t pl_x_min;
+	uint32_t pl_x_max;
+	uint32_t pl_y_min;
+	uint32_t pl_y_max;
+	uint8_t c1[11];
+	uint8_t c2[11];
+	uint8_t c3[11];
+	uint8_t c4[11];
+	uint8_t c5[11];
+	uint8_t c6[11];
+	uint8_t c7[11];
+	uint8_t c8[11];
+	uint8_t c9[11];
+	uint8_t c10[11];
+	uint8_t c11[11];
+	uint8_t c12[11];
+	uint8_t c13[11];
+	uint8_t c14[11];
+	uint8_t c15[11];
+	uint8_t c16[11];
+	uint8_t c17[11];
+	uint8_t c18[17];
+	uint8_t c19[15];
+	uint8_t c20[5];
+	uint8_t c21[11];
+	uint8_t c22[4];
+	uint8_t c23[3];
+	uint8_t c24[3];
+	uint8_t c25[4];
+	uint8_t c26[2];
+	uint8_t c27[2];
+	uint8_t c28[2];
+	uint8_t c29[2];
+	uint8_t c30[2];
+	uint8_t c31[2];
+	uint8_t c32[2];
+	uint8_t c33[2];
+	uint8_t c34[2];
+	uint8_t c35[3];
+	uint8_t c36[5];
+	uint8_t c37[5];
+	uint8_t c38[9];
+	uint8_t c39[14];
+	uint8_t c40[159];
+	uint8_t c41[99];
+};
+
+struct himax_ts_data {
+	bool suspended;
+	bool probe_done;
+	struct mutex fb_mutex;
+	atomic_t suspend_mode;
+	uint8_t x_channel;
+	uint8_t y_channel;
+	uint8_t useScreenRes;
+	uint8_t diag_command;
+	
+	uint8_t protocol_type;
+	uint8_t first_pressed;
+	uint8_t coord_data_size;
+	uint8_t area_data_size;
+	uint8_t raw_data_frame_size;
+	uint8_t raw_data_nframes;
+	uint8_t nFinger_support;
+	uint8_t irq_enabled;
+	uint8_t diag_self[50];
+	
+	uint16_t finger_pressed;
+	uint16_t last_slot;
+	uint16_t pre_finger_mask;
+
+	uint32_t debug_log_level;
+	uint32_t widthFactor;
+	uint32_t heightFactor;
+	uint32_t tw_x_min;
+	uint32_t tw_x_max;
+	uint32_t tw_y_min;
+	uint32_t tw_y_max;
+	uint32_t pl_x_min;
+	uint32_t pl_x_max;
+	uint32_t pl_y_min;
+	uint32_t pl_y_max;
+	
+	int use_irq;
+	int (*power)(int on);
+	int pre_finger_data[10][2];
+	
+	struct device *dev;
+	struct workqueue_struct *himax_wq;
+	struct work_struct work;
+	struct input_dev *input_dev;
+	struct hrtimer timer;
+	struct i2c_client *client;
+	struct himax_i2c_platform_data *pdata;	
+	struct himax_virtual_key *button;
+	
+#if defined(CONFIG_FB)
+	struct notifier_block fb_notif;
+#elif defined(CONFIG_HAS_EARLYSUSPEND)
+	struct early_suspend early_suspend;
+#endif
+
+#ifdef HX_TP_PROC_FLASH_DUMP
+	struct workqueue_struct 			*flash_wq;
+	struct work_struct 					flash_work;
+#endif
+
+#ifdef HX_RST_PIN_FUNC
+	int rst_gpio;
+#endif
+
+#ifdef HX_TP_PROC_DIAG
+	struct workqueue_struct *himax_diag_wq;
+	struct delayed_work himax_diag_delay_wrok;
+#endif
+#ifdef HX_SMART_WAKEUP
+	uint8_t SMWP_enable;
+	uint8_t gesture_cust_en[16];
+	struct wake_lock ts_SMWP_wake_lock;
+	struct workqueue_struct *himax_smwp_wq;
+	struct delayed_work smwp_work;
+#endif
+
+#ifdef HX_HIGH_SENSE
+	uint8_t HSEN_enable;
+	struct workqueue_struct *himax_hsen_wq;
+	struct delayed_work hsen_work;
+#endif
+
+#if defined(HX_USB_DETECT)||defined(HX_USB_DETECT2)
+	uint8_t usb_connected;
+	uint8_t *cable_config;
+#endif
+
+	/* pinctrl data */
+	struct pinctrl *ts_pinctrl;
+	struct pinctrl_state *pinctrl_state_active;
+	struct pinctrl_state *pinctrl_state_suspend;
+	struct pinctrl_state *pinctrl_state_release;
+};
+
+#define HX_CMD_NOP					 0x00	
+#define HX_CMD_SETMICROOFF			 0x35	
+#define HX_CMD_SETROMRDY			 0x36	
+#define HX_CMD_TSSLPIN				 0x80	
+#define HX_CMD_TSSLPOUT 			 0x81	
+#define HX_CMD_TSSOFF				 0x82	
+#define HX_CMD_TSSON				 0x83	
+#define HX_CMD_ROE					 0x85	
+#define HX_CMD_RAE					 0x86	
+#define HX_CMD_RLE					 0x87	
+#define HX_CMD_CLRES				 0x88	
+#define HX_CMD_TSSWRESET			 0x9E	
+#define HX_CMD_SETDEEPSTB			 0xD7	
+#define HX_CMD_SET_CACHE_FUN		 0xDD	
+#define HX_CMD_SETIDLE				 0xF2	
+#define HX_CMD_SETIDLEDELAY 		 0xF3	
+#define HX_CMD_SELFTEST_BUFFER		 0x8D	
+#define HX_CMD_MANUALMODE			 0x42
+#define HX_CMD_FLASH_ENABLE 		 0x43
+#define HX_CMD_FLASH_SET_ADDRESS	 0x44
+#define HX_CMD_FLASH_WRITE_REGISTER  0x45
+#define HX_CMD_FLASH_SET_COMMAND	 0x47
+#define HX_CMD_FLASH_WRITE_BUFFER	 0x48
+#define HX_CMD_FLASH_PAGE_ERASE 	 0x4D
+#define HX_CMD_FLASH_SECTOR_ERASE	 0x4E
+#define HX_CMD_CB					 0xCB
+#define HX_CMD_EA					 0xEA
+#define HX_CMD_4A					 0x4A
+#define HX_CMD_4F					 0x4F
+#define HX_CMD_B9					 0xB9
+#define HX_CMD_76					 0x76
+
+enum input_protocol_type {
+	PROTOCOL_TYPE_A	= 0x00,
+	PROTOCOL_TYPE_B	= 0x01,
+};
+
+#ifdef HX_HIGH_SENSE
+void himax_set_HSEN_func(struct i2c_client *client,uint8_t HSEN_enable);
+#endif
+
+#ifdef HX_SMART_WAKEUP
+#define GEST_PTLG_ID_LEN	(4)
+#define GEST_PTLG_HDR_LEN	(4)
+#define GEST_PTLG_HDR_ID1	(0xCC)
+#define GEST_PTLG_HDR_ID2	(0x44)
+#define GEST_PT_MAX_NUM 	(128)
+
+#ifdef HX_GESTURE_TRACK
+static int gest_pt_cnt;
+static int gest_pt_x[GEST_PT_MAX_NUM];
+static int gest_pt_y[GEST_PT_MAX_NUM];
+static int gest_start_x,gest_start_y,gest_end_x,gest_end_y;
+static int gest_width,gest_height,gest_mid_x,gest_mid_y;
+static int gn_gesture_coor[16];
+#endif
+
+void himax_set_SMWP_func(struct i2c_client *client,uint8_t SMWP_enable);
+extern bool FAKE_POWER_KEY_SEND;
+
+	enum gesture_event_type {
+		EV_GESTURE_01 = 0x01,
+		EV_GESTURE_02,
+		EV_GESTURE_03,
+		EV_GESTURE_04,
+		EV_GESTURE_05,
+		EV_GESTURE_06,
+		EV_GESTURE_07,
+		EV_GESTURE_08,
+		EV_GESTURE_09,
+		EV_GESTURE_10,
+		EV_GESTURE_11,
+		EV_GESTURE_12,
+		EV_GESTURE_13,
+		EV_GESTURE_14,
+		EV_GESTURE_15,
+		EV_GESTURE_PWR = 0x80,
+	};
+
+#define KEY_CUST_01 251
+#define KEY_CUST_02 252
+#define KEY_CUST_03 253
+#define KEY_CUST_04 254
+#define KEY_CUST_05 255
+#define KEY_CUST_06 256
+#define KEY_CUST_07 257
+#define KEY_CUST_08 258
+#define KEY_CUST_09 259
+#define KEY_CUST_10 260
+#define KEY_CUST_11 261
+#define KEY_CUST_12 262
+#define KEY_CUST_13 263
+#define KEY_CUST_14 264
+#define KEY_CUST_15 265
+#endif
+
+#ifdef HX_ESD_WORKAROUND
+	extern	u8 HX_ESD_RESET_ACTIVATE;
+#endif
+
+extern int irq_enable_count;
+
+#ifdef QCT
+irqreturn_t himax_ts_thread(int irq, void *ptr);
+int himax_input_register(struct himax_ts_data *ts);
+#endif
+
+extern int himax_chip_common_probe(struct i2c_client *client, const struct i2c_device_id *id);
+extern int himax_chip_common_remove(struct i2c_client *client);
+extern int himax_chip_common_suspend(struct himax_ts_data *ts);
+extern int himax_chip_common_resume(struct himax_ts_data *ts);
+int himax_loadSensorConfig(struct i2c_client *client, struct himax_i2c_platform_data *pdata);
+
+#ifdef HX_USB_DETECT2
+//extern kal_bool upmu_is_chr_det(void);
+void himax_cable_detect_func(void);
+#endif
+
+#endif
+
diff --git a/drivers/input/touchscreen/hxchipset/himax_debug.c b/drivers/input/touchscreen/hxchipset/himax_debug.c
new file mode 100644
index 0000000..f8bee11
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/himax_debug.c
@@ -0,0 +1,2329 @@
+/* Himax Android Driver Sample Code for Himax chipset
+*
+* Copyright (C) 2015 Himax Corporation.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#include "himax_debug.h"
+#include "himax_ic.h"
+
+//struct himax_debug_data* debug_data;
+
+extern struct himax_ic_data* ic_data;
+extern struct himax_ts_data *private_ts;
+extern unsigned char	IC_TYPE;
+extern unsigned char	IC_CHECKSUM;
+extern int himax_input_register(struct himax_ts_data *ts);
+#ifdef QCT
+extern irqreturn_t himax_ts_thread(int irq, void *ptr);
+#endif
+#ifdef MTK
+#ifdef CONFIG_OF_TOUCH
+extern irqreturn_t tpd_eint_interrupt_handler(int irq, void *desc);
+#else
+extern void tpd_eint_interrupt_handler(void);
+#endif
+#endif
+
+#ifdef HX_TP_PROC_DIAG
+#ifdef HX_TP_PROC_2T2R
+int	HX_RX_NUM_2			= 0;
+int	HX_TX_NUM_2			= 0;
+#endif
+int	touch_monitor_stop_flag		= 0;
+int	touch_monitor_stop_limit	= 5;
+uint8_t	g_diag_arr_num			= 0;
+#endif
+
+#ifdef HX_ESD_WORKAROUND
+u8 HX_ESD_RESET_ACTIVATE;
+#endif
+
+#ifdef HX_SMART_WAKEUP
+bool FAKE_POWER_KEY_SEND;
+#endif
+
+//=============================================================================================================
+//
+//	Segment : Himax PROC Debug Function
+//
+//=============================================================================================================
+#if defined(CONFIG_TOUCHSCREEN_HIMAX_DEBUG)
+
+static ssize_t himax_vendor_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	ssize_t ret = 0;
+	char *temp_buf;
+
+	if(!HX_PROC_SEND_FLAG)
+	{
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return ret;
+		}
+
+		ret += snprintf(temp_buf, len, "%s_FW:%#x_CFG:%#x_SensorId:%#x\n", HIMAX_common_NAME,
+		ic_data->vendor_fw_ver, ic_data->vendor_config_ver, ic_data->vendor_sensor_id);
+		HX_PROC_SEND_FLAG=1;
+
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+
+	return ret;
+}
+
+static const struct file_operations himax_proc_vendor_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_vendor_read,
+};
+
+static ssize_t himax_attn_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	ssize_t ret = 0;
+	struct himax_ts_data *ts_data;
+	char *temp_buf;
+
+	ts_data = private_ts;
+
+	if (!HX_PROC_SEND_FLAG) {
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return ret;
+		}
+		ret += snprintf(temp_buf, len, "attn = %x\n", himax_int_gpio_read(ts_data->pdata->gpio_irq));
+
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+		HX_PROC_SEND_FLAG = 1;
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+
+	return ret;
+}
+
+
+static const struct file_operations himax_proc_attn_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_attn_read,
+};
+
+static ssize_t himax_int_en_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts = private_ts;
+	size_t ret = 0;
+	char *temp_buf;
+
+	if (!HX_PROC_SEND_FLAG) {
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return ret;
+		}
+		ret += snprintf(temp_buf, len, "%d ", ts->irq_enabled);
+		ret += snprintf(temp_buf+ret, len-ret, "\n");
+
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+		HX_PROC_SEND_FLAG = 1;
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+	return ret;
+}
+
+static ssize_t himax_int_en_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts = private_ts;
+	char buf_tmp[12]= {0};
+	int value, ret=0;
+
+	if (len >= 12)
+	{
+		I("%s: no command exceeds 12 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf_tmp, buff, len))
+	{
+		return -EFAULT;
+	}
+
+	if (buf_tmp[0] == '0')
+		value = false;
+	else if (buf_tmp[0] == '1')
+		value = true;
+	else
+		return -EINVAL;
+
+	if (value) {
+		if(ic_data->HX_INT_IS_EDGE)
+		{
+#ifdef MTK
+#ifdef CONFIG_OF_TOUCH
+			himax_int_enable(ts->client->irq,1);
+#else
+			//mt_eint_set_sens(CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_TYPE);
+			//mt_eint_set_hw_debounce(CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_DEBOUNCE_CN);
+			mt_eint_registration(ts->client->irq, EINTF_TRIGGER_FALLING, tpd_eint_interrupt_handler, 1);
+#endif
+#endif
+#ifdef QCT
+			ret = request_threaded_irq(ts->client->irq, NULL, himax_ts_thread,
+					IRQF_TRIGGER_FALLING | IRQF_ONESHOT, ts->client->name, ts);
+#endif
+		}
+		else
+		{
+#ifdef MTK
+#ifdef CONFIG_OF_TOUCH
+			himax_int_enable(ts->client->irq,1);
+#else
+			//mt_eint_set_sens(CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_TYPE);
+			//mt_eint_set_hw_debounce(CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_DEBOUNCE_CN);
+			mt_eint_registration(ts->client->irq, EINTF_TRIGGER_LOW, tpd_eint_interrupt_handler, 1);
+#endif
+#endif
+#ifdef QCT
+			ret = request_threaded_irq(ts->client->irq, NULL, himax_ts_thread,
+					IRQF_TRIGGER_LOW | IRQF_ONESHOT, ts->client->name, ts);
+#endif
+		}
+		if (ret == 0) {
+			ts->irq_enabled = 1;
+			irq_enable_count = 1;
+		}
+	} else {
+		himax_int_enable(ts->client->irq,0);
+		free_irq(ts->client->irq, ts);
+		ts->irq_enabled = 0;
+	}
+
+	return len;
+}
+
+static const struct file_operations himax_proc_int_en_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_int_en_read,
+	.write = himax_int_en_write,
+};
+
+static ssize_t himax_layout_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts = private_ts;
+	size_t ret = 0;
+	char *temp_buf;
+
+	if (!HX_PROC_SEND_FLAG) {
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return ret;
+		}
+		ret += snprintf(temp_buf, len,  "%d ", ts->pdata->abs_x_min);
+		ret += snprintf(temp_buf+ret, len-ret, "%d ", ts->pdata->abs_x_max);
+		ret += snprintf(temp_buf+ret, len-ret, "%d ", ts->pdata->abs_y_min);
+		ret += snprintf(temp_buf+ret, len-ret, "%d ", ts->pdata->abs_y_max);
+		ret += snprintf(temp_buf+ret, len-ret, "\n");
+
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+		HX_PROC_SEND_FLAG = 1;
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+
+	return ret;
+}
+
+static ssize_t himax_layout_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts = private_ts;
+	char buf_tmp[5];
+	int i = 0, j = 0, k = 0, ret;
+	unsigned long value;
+	int layout[4] = {0};
+	char buf[80] = {0};
+
+	if (len >= 80)
+	{
+		I("%s: no command exceeds 80 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf, buff, len))
+	{
+		return -EFAULT;
+	}
+
+	for (i = 0; i < 20; i++) {
+		if (buf[i] == ',' || buf[i] == '\n') {
+			memset(buf_tmp, 0x0, sizeof(buf_tmp));
+			if (i - j <= 5)
+				memcpy(buf_tmp, buf + j, i - j);
+			else {
+				I("buffer size is over 5 char\n");
+				return len;
+			}
+			j = i + 1;
+			if (k < 4) {
+				ret = kstrtoul(buf_tmp, 10, &value);
+				layout[k++] = value;
+			}
+		}
+	}
+	if (k == 4) {
+		ts->pdata->abs_x_min=layout[0];
+		ts->pdata->abs_x_max=layout[1];
+		ts->pdata->abs_y_min=layout[2];
+		ts->pdata->abs_y_max=layout[3];
+		I("%d, %d, %d, %d\n",ts->pdata->abs_x_min, ts->pdata->abs_x_max, ts->pdata->abs_y_min, ts->pdata->abs_y_max);
+		input_unregister_device(ts->input_dev);
+		himax_input_register(ts);
+	} else
+		I("ERR@%d, %d, %d, %d\n",ts->pdata->abs_x_min, ts->pdata->abs_x_max, ts->pdata->abs_y_min, ts->pdata->abs_y_max);
+	return len;
+}
+
+static const struct file_operations himax_proc_layout_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_layout_read,
+	.write = himax_layout_write,
+};
+
+static ssize_t himax_debug_level_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts_data;
+	size_t ret = 0;
+	char *temp_buf;
+	ts_data = private_ts;
+
+	if (!HX_PROC_SEND_FLAG) {
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return ret;
+		}
+		ret += snprintf(temp_buf, len, "%d\n", ts_data->debug_log_level);
+
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+		HX_PROC_SEND_FLAG = 1;
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+
+	return ret;
+}
+
+static ssize_t himax_debug_level_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts;
+	char buf_tmp[11];
+	int i;
+	ts = private_ts;
+
+	if (len >= 12)
+	{
+		I("%s: no command exceeds 12 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf_tmp, buff, len))
+	{
+		return -EFAULT;
+	}
+
+	ts->debug_log_level = 0;
+	for(i=0; i<len-1; i++)
+	{
+		if( buf_tmp[i]>='0' && buf_tmp[i]<='9' )
+			ts->debug_log_level |= (buf_tmp[i]-'0');
+		else if( buf_tmp[i]>='A' && buf_tmp[i]<='F' )
+			ts->debug_log_level |= (buf_tmp[i]-'A'+10);
+		else if( buf_tmp[i]>='a' && buf_tmp[i]<='f' )
+			ts->debug_log_level |= (buf_tmp[i]-'a'+10);
+
+		if(i!=len-2)
+			ts->debug_log_level <<= 4;
+	}
+
+	if (ts->debug_log_level & BIT(3)) {
+		if (ts->pdata->screenWidth > 0 && ts->pdata->screenHeight > 0 &&
+		 (ts->pdata->abs_x_max - ts->pdata->abs_x_min) > 0 &&
+		 (ts->pdata->abs_y_max - ts->pdata->abs_y_min) > 0) {
+			ts->widthFactor = (ts->pdata->screenWidth << SHIFTBITS)/(ts->pdata->abs_x_max - ts->pdata->abs_x_min);
+			ts->heightFactor = (ts->pdata->screenHeight << SHIFTBITS)/(ts->pdata->abs_y_max - ts->pdata->abs_y_min);
+			if (ts->widthFactor > 0 && ts->heightFactor > 0)
+				ts->useScreenRes = 1;
+			else {
+				ts->heightFactor = 0;
+				ts->widthFactor = 0;
+				ts->useScreenRes = 0;
+			}
+		} else
+			I("Enable finger debug with raw position mode!\n");
+	} else {
+		ts->useScreenRes = 0;
+		ts->widthFactor = 0;
+		ts->heightFactor = 0;
+	}
+
+	return len;
+}
+
+static const struct file_operations himax_proc_debug_level_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_debug_level_read,
+	.write = himax_debug_level_write,
+};
+
+#ifdef HX_TP_PROC_REGISTER
+static ssize_t himax_proc_register_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	int ret = 0;
+	uint16_t loop_i;
+	uint8_t data[128];
+	char *temp_buf;
+
+	memset(data, 0x00, sizeof(data));
+
+	I("himax_register_show: %x,%x,%x,%x\n", register_command[0],register_command[1],register_command[2],register_command[3]);
+	if(!HX_PROC_SEND_FLAG)
+	{
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return ret;
+		}
+		himax_register_read(private_ts->client, register_command, 1, data);
+
+		ret += snprintf(temp_buf, len, "command:  %x,%x,%x,%x\n", register_command[0],register_command[1],register_command[2],register_command[3]);
+
+		for (loop_i = 0; loop_i < 128; loop_i++) {
+			ret += snprintf(temp_buf+ret, len-ret, "0x%2.2X ", data[loop_i]);
+			if ((loop_i % 16) == 15)
+				ret += snprintf(temp_buf+ret, len-ret, "\n");
+		}
+		ret += snprintf(temp_buf+ret, len-ret, "\n");
+		HX_PROC_SEND_FLAG=1;
+
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+	return ret;
+}
+
+static ssize_t himax_proc_register_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	char buf_tmp[16], length = 0;
+	unsigned long result    = 0;
+	uint8_t loop_i          = 0;
+	uint16_t base           = 5;
+	uint8_t write_da[128];
+	char buf[80] = {0};
+
+	if (len >= 80)
+	{
+		I("%s: no command exceeds 80 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf, buff, len))
+	{
+		return -EFAULT;
+	}
+
+	memset(buf_tmp, 0x0, sizeof(buf_tmp));
+	memset(write_da, 0x0, sizeof(write_da));
+
+	I("himax %s \n",buf);
+
+	if ((buf[0] == 'r' || buf[0] == 'w') && buf[1] == ':') {
+
+		if (buf[2] == 'x') {
+			memcpy(buf_tmp, buf + 3, 8);
+			if (!kstrtoul(buf_tmp, 16, &result))
+				{
+					register_command[0] = (uint8_t)result;
+					register_command[1] = (uint8_t)(result >> 8);
+					register_command[2] = (uint8_t)(result >> 16);
+					register_command[3] = (uint8_t)(result >> 24);
+				}
+			base = 11;
+			I("CMD: %x,%x,%x,%x\n", register_command[0],register_command[1],register_command[2],register_command[3]);
+
+			for (loop_i = 0; loop_i < 128 && (base+10)<80; loop_i++) {
+				if (buf[base] == '\n') {
+					if (buf[0] == 'w') {
+						himax_register_write(private_ts->client, register_command, 1, write_da);
+						I("CMD: %x, %x, %x, %x, len=%d\n", write_da[0], write_da[1],write_da[2],write_da[3],length);
+					}
+					I("\n");
+					return len;
+				}
+				if (buf[base + 1] == 'x') {
+					buf_tmp[10] = '\n';
+					buf_tmp[11] = '\0';
+					memcpy(buf_tmp, buf + base + 2, 8);
+					if (!kstrtoul(buf_tmp, 16, &result)) {
+						write_da[loop_i] = (uint8_t)result;
+						write_da[loop_i+1] = (uint8_t)(result >> 8);
+						write_da[loop_i+2] = (uint8_t)(result >> 16);
+						write_da[loop_i+3] = (uint8_t)(result >> 24);
+					}
+					length+=4;
+				}
+				base += 10;
+			}
+		}
+	}
+	return len;
+}
+
+static const struct file_operations himax_proc_register_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_proc_register_read,
+	.write = himax_proc_register_write,
+};
+#endif
+
+#ifdef HX_TP_PROC_DIAG
+int16_t *getMutualBuffer(void)
+{
+	return diag_mutual;
+}
+int16_t *getMutualNewBuffer(void)
+{
+	return diag_mutual_new;
+}
+int16_t *getMutualOldBuffer(void)
+{
+	return diag_mutual_old;
+}
+int16_t *getSelfBuffer(void)
+{
+	return &diag_self[0];
+}
+uint8_t getXChannel(void)
+{
+	return x_channel;
+}
+uint8_t getYChannel(void)
+{
+	return y_channel;
+}
+uint8_t getDiagCommand(void)
+{
+	return diag_command;
+}
+void setXChannel(uint8_t x)
+{
+	x_channel = x;
+}
+void setYChannel(uint8_t y)
+{
+	y_channel = y;
+}
+void setMutualBuffer(void)
+{
+	diag_mutual = kzalloc(x_channel * y_channel * sizeof(int16_t), GFP_KERNEL);
+}
+void setMutualNewBuffer(void)
+{
+	diag_mutual_new = kzalloc(x_channel * y_channel * sizeof(int16_t), GFP_KERNEL);
+}
+void setMutualOldBuffer(void)
+{
+	diag_mutual_old = kzalloc(x_channel * y_channel * sizeof(int16_t), GFP_KERNEL);
+}
+
+#ifdef HX_TP_PROC_2T2R
+int16_t *getMutualBuffer_2(void)
+{
+	return diag_mutual_2;
+}
+uint8_t getXChannel_2(void)
+{
+	return x_channel_2;
+}
+uint8_t getYChannel_2(void)
+{
+	return y_channel_2;
+}
+void setXChannel_2(uint8_t x)
+{
+	x_channel_2 = x;
+}
+void setYChannel_2(uint8_t y)
+{
+	y_channel_2 = y;
+}
+void setMutualBuffer_2(void)
+{
+	diag_mutual_2 = kzalloc(x_channel_2 * y_channel_2 * sizeof(int16_t), GFP_KERNEL);
+}
+#endif
+
+static ssize_t himax_diag_arrange_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	//struct himax_ts_data *ts = private_ts;
+	char buf[80] = {0};
+
+	if (len >= 80)
+	{
+		I("%s: no command exceeds 80 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf, buff, len))
+	{
+		return -EFAULT;
+	}
+
+	g_diag_arr_num = buf[0] - '0';
+	I("%s: g_diag_arr_num = %d \n", __func__,g_diag_arr_num);
+
+	return len;
+}
+
+static const struct file_operations himax_proc_diag_arrange_ops =
+{
+	.owner = THIS_MODULE,
+	.write = himax_diag_arrange_write,
+};
+
+static void himax_diag_arrange_print(struct seq_file *s, int i, int j, int transpose)
+{
+	if(transpose)
+		seq_printf(s, "%6d", diag_mutual[ j + i*x_channel]);
+	else
+		seq_printf(s, "%6d", diag_mutual[ i + j*x_channel]);
+}
+
+static void himax_diag_arrange_inloop(struct seq_file *s, int in_init,bool transpose, int j)
+{
+	int i;
+	int in_max = 0;
+
+	if(transpose)
+		in_max = y_channel;
+	else
+		in_max = x_channel;
+
+	if (in_init > 0)
+	{
+		for(i = in_init-1;i >= 0;i--)
+		{
+			himax_diag_arrange_print(s, i, j, transpose);
+		}
+	}
+	else
+	{
+		for (i = 0; i < in_max; i++)
+		{
+			himax_diag_arrange_print(s, i, j, transpose);
+		}
+	}
+}
+
+static void himax_diag_arrange_outloop(struct seq_file *s, int transpose, int out_init, int in_init)
+{
+	int j;
+	int out_max = 0;
+
+	if(transpose)
+		out_max = x_channel;
+	else
+		out_max = y_channel;
+
+	if(out_init > 0)
+	{
+		for(j = out_init-1;j >= 0;j--)
+		{
+			himax_diag_arrange_inloop(s, in_init, transpose, j);
+			seq_printf(s, " %5d\n", diag_self[j]);
+		}
+	}
+	else
+	{
+		for(j = 0;j < out_max;j++)
+		{
+			himax_diag_arrange_inloop(s, in_init, transpose, j);
+			seq_printf(s, " %5d\n", diag_self[j]);
+		}
+	}
+}
+
+static void himax_diag_arrange(struct seq_file *s)
+{
+	int bit2,bit1,bit0;
+	int i;
+
+	bit2 = g_diag_arr_num >> 2;
+	bit1 = g_diag_arr_num >> 1 & 0x1;
+	bit0 = g_diag_arr_num & 0x1;
+
+	if (g_diag_arr_num < 4)
+	{
+		himax_diag_arrange_outloop(s, bit2, bit1 * y_channel, bit0 * x_channel);
+		for (i = y_channel; i < x_channel + y_channel; i++) {
+			seq_printf(s, "%6d", diag_self[i]);
+		}
+	}
+	else
+	{
+		himax_diag_arrange_outloop(s, bit2, bit1 * x_channel, bit0 * y_channel);
+		for (i = x_channel; i < x_channel + y_channel; i++) {
+			seq_printf(s, "%6d", diag_self[i]);
+		}
+	}
+}
+
+static void *himax_diag_seq_start(struct seq_file *s, loff_t *pos)
+{
+	if (*pos>=1) return NULL;
+	return (void *)((unsigned long) *pos+1);
+}
+
+static void *himax_diag_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	return NULL;
+}
+static void himax_diag_seq_stop(struct seq_file *s, void *v)
+{
+}
+static int himax_diag_seq_read(struct seq_file *s, void *v)
+{
+	size_t count = 0;
+	int32_t loop_i;//,loop_j
+	uint16_t mutual_num, self_num, width;
+
+#ifdef HX_TP_PROC_2T2R
+	if(Is_2T2R && diag_command == 4)
+	{
+		mutual_num	= x_channel_2 * y_channel_2;
+		self_num	= x_channel_2 + y_channel_2; //don't add KEY_COUNT
+		width		= x_channel_2;
+		seq_printf(s, "ChannelStart: %4d, %4d\n\n", x_channel_2, y_channel_2);
+	}
+	else
+#endif
+	{
+		mutual_num	= x_channel * y_channel;
+		self_num	= x_channel + y_channel; //don't add KEY_COUNT
+		width		= x_channel;
+		seq_printf(s, "ChannelStart: %4d, %4d\n\n", x_channel, y_channel);
+	}
+
+	// start to show out the raw data in adb shell
+	if (diag_command >= 1 && diag_command <= 6) {
+		if (diag_command <= 3) {
+			himax_diag_arrange(s);
+			seq_printf(s, "\n\n");
+#ifdef HX_EN_SEL_BUTTON
+			seq_printf(s, "\n");
+			for (loop_i = 0; loop_i < HX_BT_NUM; loop_i++)
+					seq_printf(s, "%6d", diag_self[HX_RX_NUM + HX_TX_NUM + loop_i]);
+#endif
+#ifdef HX_TP_PROC_2T2R
+		}else if(Is_2T2R && diag_command == 4 ) {
+			for (loop_i = 0; loop_i < mutual_num; loop_i++) {
+				seq_printf(s, "%4d", diag_mutual_2[loop_i]);
+				if ((loop_i % width) == (width - 1))
+					seq_printf(s, " %6d\n", diag_self[width + loop_i/width]);
+			}
+			seq_printf(s, "\n");
+			for (loop_i = 0; loop_i < width; loop_i++) {
+				seq_printf(s, "%6d", diag_self[loop_i]);
+				if (((loop_i) % width) == (width - 1))
+					seq_printf(s, "\n");
+			}
+#ifdef HX_EN_SEL_BUTTON
+			seq_printf(s, "\n");
+			for (loop_i = 0; loop_i < HX_BT_NUM; loop_i++)
+				seq_printf(s, "%4d", diag_self[HX_RX_NUM_2 + HX_TX_NUM_2 + loop_i]);
+#endif
+#endif
+		} else if (diag_command > 4) {
+			for (loop_i = 0; loop_i < self_num; loop_i++) {
+				seq_printf(s, "%4d", diag_self[loop_i]);
+				if (((loop_i - mutual_num) % width) == (width - 1))
+					seq_printf(s, "\n");
+			}
+		} else {
+			for (loop_i = 0; loop_i < mutual_num; loop_i++) {
+				seq_printf(s, "%4d", diag_mutual[loop_i]);
+				if ((loop_i % width) == (width - 1))
+					seq_printf(s, "\n");
+			}
+		}
+		seq_printf(s, "ChannelEnd");
+		seq_printf(s, "\n");
+	} else if (diag_command == 7) {
+		for (loop_i = 0; loop_i < 128 ;loop_i++) {
+			if ((loop_i % 16) == 0)
+				seq_printf(s, "LineStart:");
+				seq_printf(s, "%4d", diag_coor[loop_i]);
+			if ((loop_i % 16) == 15)
+				seq_printf(s, "\n");
+		}
+	} else if (diag_command == 9 || diag_command == 91 || diag_command == 92){
+		himax_diag_arrange(s);
+		seq_printf(s, "\n");
+	}
+
+	return count;
+}
+static const struct seq_operations himax_diag_seq_ops =
+{
+	.start	= himax_diag_seq_start,
+	.next	= himax_diag_seq_next,
+	.stop	= himax_diag_seq_stop,
+	.show	= himax_diag_seq_read,
+};
+static int himax_diag_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &himax_diag_seq_ops);
+};
+bool DSRAM_Flag;
+
+//DSRAM thread
+void himax_ts_diag_func(void)
+{
+	int i=0, j=0;
+	unsigned int index = 0;
+	int total_size = ic_data->HX_TX_NUM * ic_data->HX_RX_NUM * 2;
+	uint8_t  info_data[total_size];
+	int16_t *mutual_data     = NULL;
+	int16_t *mutual_data_new = NULL;
+	int16_t *mutual_data_old = NULL;
+	int16_t new_data;
+
+	himax_burst_enable(private_ts->client, 1);
+	if(diag_command == 9 || diag_command == 91)
+	{
+		mutual_data = getMutualBuffer();
+	}else if(diag_command == 92){
+		mutual_data = getMutualBuffer();
+		mutual_data_new = getMutualNewBuffer();
+		mutual_data_old = getMutualOldBuffer();
+	}
+	himax_get_DSRAM_data(private_ts->client, info_data);
+
+	index = 0;
+	for (i = 0; i < ic_data->HX_TX_NUM; i++)
+	{
+		for (j = 0; j < ic_data->HX_RX_NUM; j++)
+		{
+			new_data = (short)(info_data[index + 1] << 8 | info_data[index]);
+			if(diag_command == 9){
+				mutual_data[i*ic_data->HX_RX_NUM+j] = new_data;
+			}else if(diag_command == 91){ //Keep max data for 100 frame
+				if(mutual_data[i * ic_data->HX_RX_NUM + j] < new_data)
+				mutual_data[i * ic_data->HX_RX_NUM + j] = new_data;
+			}else if(diag_command == 92){ //Cal data for [N]-[N-1] frame
+				mutual_data_new[i * ic_data->HX_RX_NUM + j] = new_data;
+				mutual_data[i * ic_data->HX_RX_NUM + j] = mutual_data_new[i * ic_data->HX_RX_NUM + j] - mutual_data_old[i * ic_data->HX_RX_NUM + j];
+			}
+			index += 2;
+		}
+	}
+	if(diag_command == 92){
+		memcpy(mutual_data_old,mutual_data_new,x_channel * y_channel * sizeof(int16_t)); //copy N data to N-1 array
+	}
+	diag_max_cnt++;
+	if(diag_command == 9 || diag_command == 92){
+		queue_delayed_work(private_ts->himax_diag_wq, &private_ts->himax_diag_delay_wrok, 1/10*HZ);
+	}else if(diag_command == 91){
+		if(diag_max_cnt > 100) //count for 100 frame
+		{
+			//Clear DSRAM flag
+			DSRAM_Flag = false;
+
+			//Enable ISR
+			himax_int_enable(private_ts->client->irq,1);
+
+			//=====================================
+			// test result command : 0x8002_0324 ==> 0x00
+			//=====================================
+			himax_diag_register_set(private_ts->client, 0x00);
+		}else{
+			queue_delayed_work(private_ts->himax_diag_wq, &private_ts->himax_diag_delay_wrok, 1/10*HZ);
+		}
+	}
+}
+
+static ssize_t himax_diag_write(struct file *filp, const char __user *buff, size_t len, loff_t *data)
+{
+	char messages[80] = {0};
+
+	uint8_t command[2] = {0x00, 0x00};
+	uint8_t receive[1];
+
+	memset(receive, 0x00, sizeof(receive));
+
+	if (len >= 80)
+	{
+		I("%s: no command exceeds 80 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(messages, buff, len))
+	{
+		return -EFAULT;
+	}
+	if (messages[1] == 0x0A){
+		diag_command =messages[0] - '0';
+	}else{
+		diag_command =(messages[0] - '0')*10 + (messages[1] - '0');
+	}
+
+	I("[Himax]diag_command=0x%x\n",diag_command);
+	if (diag_command < 0x04){
+		if(DSRAM_Flag)
+		{
+			//1. Clear DSRAM flag
+			DSRAM_Flag = false;
+
+			//2. Stop DSRAM thread
+			cancel_delayed_work_sync(&private_ts->himax_diag_delay_wrok);
+
+			//3. Enable ISR
+			himax_int_enable(private_ts->client->irq,1);
+		}
+		command[0] = diag_command;
+		himax_diag_register_set(private_ts->client, command[0]);
+	}
+	//coordinate dump start
+	else if (diag_command == 0x08)	{
+		E("%s: coordinate_dump_file_create error\n", __func__);
+	}
+	else if (diag_command == 0x09 || diag_command == 91 || diag_command == 92){
+		diag_max_cnt = 0;
+		memset(diag_mutual, 0x00, x_channel * y_channel * sizeof(int16_t)); //Set data 0 everytime
+
+		//1. Disable ISR
+		himax_int_enable(private_ts->client->irq,0);
+
+		//2. Start DSRAM thread
+		//himax_diag_register_set(private_ts->client, 0x0A);
+
+		queue_delayed_work(private_ts->himax_diag_wq, &private_ts->himax_diag_delay_wrok, 2*HZ/100);
+
+		I("%s: Start get raw data in DSRAM\n", __func__);
+
+		//3. Set DSRAM flag
+		DSRAM_Flag = true;
+	}else{
+		command[0] = 0x00;
+		himax_diag_register_set(private_ts->client, command[0]);
+		E("[Himax]Diag command error!diag_command=0x%x\n",diag_command);
+	}
+	return len;
+}
+
+static const struct file_operations himax_proc_diag_ops =
+{
+	.owner = THIS_MODULE,
+	.open = himax_diag_proc_open,
+	.read = seq_read,
+	.write = himax_diag_write,
+};
+#endif
+
+#ifdef HX_TP_PROC_RESET
+static ssize_t himax_reset_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	char buf_tmp[12];
+
+	if (len >= 12)
+	{
+		I("%s: no command exceeds 12 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf_tmp, buff, len))
+	{
+		return -EFAULT;
+	}
+	//if (buf_tmp[0] == '1')
+	//	ESD_HW_REST();
+
+	return len;
+}
+
+static const struct file_operations himax_proc_reset_ops =
+{
+	.owner = THIS_MODULE,
+	.write = himax_reset_write,
+};
+#endif
+
+#ifdef HX_TP_PROC_DEBUG
+static ssize_t himax_debug_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	size_t count = 0;
+	char *temp_buf;
+
+	if(!HX_PROC_SEND_FLAG)
+	{
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf){
+			HX_PROC_SEND_FLAG=0;
+			return count;
+		}
+
+		if (debug_level_cmd == 't')
+		{
+			if (fw_update_complete)
+				count += snprintf(temp_buf+count, len-count, "FW Update Complete ");
+			else
+			{
+				count += snprintf(temp_buf+count, len-count, "FW Update Fail ");
+			}
+		}
+		else if (debug_level_cmd == 'h')
+		{
+			if (handshaking_result == 0)
+			{
+				count += snprintf(temp_buf+count, len-count, "Handshaking Result = %d (MCU Running)\n", handshaking_result);
+			}
+			else if (handshaking_result == 1)
+			{
+				count += snprintf(temp_buf+count, len-count, "Handshaking Result = %d (MCU Stop)\n", handshaking_result);
+			}
+			else if (handshaking_result == 2)
+			{
+				count += snprintf(temp_buf+count, len-count, "Handshaking Result = %d (I2C Error)\n", handshaking_result);
+			}
+			else
+			{
+				count += snprintf(temp_buf+count, len-count, "Handshaking Result = error\n");
+			}
+		}
+		else if (debug_level_cmd == 'v')
+		{
+			count += snprintf(temp_buf+count, len-count, "FW_VER = ");
+			count += snprintf(temp_buf+count, len-count, "0x%2.2X\n", ic_data->vendor_fw_ver);
+			count += snprintf(temp_buf+count, len-count, "CONFIG_VER = ");
+			count += snprintf(temp_buf+count, len-count, "0x%2.2X\n", ic_data->vendor_config_ver);
+			count += snprintf(temp_buf+count, len-count, "\n");
+		}
+		else if (debug_level_cmd == 'd')
+		{
+			count += snprintf(temp_buf+count, len-count, "Himax Touch IC Information :\n");
+			if (IC_TYPE == HX_85XX_D_SERIES_PWON)
+			{
+				count += snprintf(temp_buf+count, len-count, "IC Type : D\n");
+			}
+			else if (IC_TYPE == HX_85XX_E_SERIES_PWON)
+			{
+				count += snprintf(temp_buf+count, len-count, "IC Type : E\n");
+			}
+			else if (IC_TYPE == HX_85XX_ES_SERIES_PWON)
+			{
+				count += snprintf(temp_buf+count, len-count, "IC Type : ES\n");
+			}
+			else if (IC_TYPE == HX_85XX_F_SERIES_PWON)
+			{
+				count += snprintf(temp_buf+count, len-count, "IC Type : F\n");
+			}
+			else
+			{
+				count += snprintf(temp_buf+count, len-count, "IC Type error.\n");
+			}
+
+			if (IC_CHECKSUM == HX_TP_BIN_CHECKSUM_SW)
+			{
+				count += snprintf(temp_buf+count, len-count, "IC Checksum : SW\n");
+			}
+			else if (IC_CHECKSUM == HX_TP_BIN_CHECKSUM_HW)
+			{
+				count += snprintf(temp_buf+count, len-count, "IC Checksum : HW\n");
+			}
+			else if (IC_CHECKSUM == HX_TP_BIN_CHECKSUM_CRC)
+			{
+				count += snprintf(temp_buf+count, len-count, "IC Checksum : CRC\n");
+			}
+			else
+			{
+				count += snprintf(temp_buf+count, len-count, "IC Checksum error.\n");
+			}
+
+			if (ic_data->HX_INT_IS_EDGE)
+			{
+				count += snprintf(temp_buf+count, len-count, "Interrupt : EDGE TIRGGER\n");
+			}
+			else
+			{
+				count += snprintf(temp_buf+count, len-count, "Interrupt : LEVEL TRIGGER\n");
+			}
+
+			count += snprintf(temp_buf+count, len-count, "RX Num : %d\n", ic_data->HX_RX_NUM);
+			count += snprintf(temp_buf+count, len-count, "TX Num : %d\n", ic_data->HX_TX_NUM);
+			count += snprintf(temp_buf+count, len-count, "BT Num : %d\n", ic_data->HX_BT_NUM);
+			count += snprintf(temp_buf+count, len-count, "X Resolution : %d\n", ic_data->HX_X_RES);
+			count += snprintf(temp_buf+count, len-count, "Y Resolution : %d\n", ic_data->HX_Y_RES);
+			count += snprintf(temp_buf+count, len-count, "Max Point : %d\n", ic_data->HX_MAX_PT);
+			count += snprintf(temp_buf+count, len-count, "XY reverse : %d\n", ic_data->HX_XY_REVERSE);
+	#ifdef HX_TP_PROC_2T2R
+			if(Is_2T2R)
+			{
+				count += snprintf(temp_buf+count, len-count, "2T2R panel\n");
+				count += snprintf(temp_buf+count, len-count, "RX Num_2 : %d\n", HX_RX_NUM_2);
+				count += snprintf(temp_buf+count, len-count, "TX Num_2 : %d\n", HX_TX_NUM_2);
+			}
+	#endif
+		}
+		else if (debug_level_cmd == 'i')
+		{
+			count += snprintf(temp_buf+count, len-count, "Himax Touch Driver Version:\n");
+			count += snprintf(temp_buf+count, len-count, "%s\n", HIMAX_DRIVER_VER);
+		}
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+		HX_PROC_SEND_FLAG=1;
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+	return count;
+}
+
+static ssize_t himax_debug_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	const struct firmware *fw = NULL;
+	unsigned char *fw_data = NULL;
+	char fileName[128];
+	char buf[80] = {0};
+	int result;
+
+	if (len >= 80)
+	{
+		I("%s: no command exceeds 80 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf, buff, len))
+	{
+		return -EFAULT;
+	}
+
+	if ( buf[0] == 'h') //handshaking
+	{
+		debug_level_cmd = buf[0];
+
+		himax_int_enable(private_ts->client->irq,0);
+
+		handshaking_result = himax_hand_shaking(private_ts->client); //0:Running, 1:Stop, 2:I2C Fail
+
+		himax_int_enable(private_ts->client->irq,1);
+
+		return len;
+	}
+
+	else if ( buf[0] == 'v') //firmware version
+	{
+		debug_level_cmd = buf[0];
+		himax_int_enable(private_ts->client->irq,0);
+#ifdef HX_RST_PIN_FUNC
+		himax_HW_reset(false,false);
+#endif
+		himax_read_FW_ver(private_ts->client);
+		//himax_check_chip_version();
+#ifdef HX_RST_PIN_FUNC
+	himax_HW_reset(true,false);
+#endif
+	himax_int_enable(private_ts->client->irq,1);
+		return len;
+	}
+
+	else if ( buf[0] == 'd') //ic information
+	{
+		debug_level_cmd = buf[0];
+		return len;
+	}
+
+	else if ( buf[0] == 'i') //driver version
+	{
+		debug_level_cmd = buf[0];
+		return len;
+	}
+
+	else if (buf[0] == 't')
+	{
+
+		himax_int_enable(private_ts->client->irq,0);
+
+		debug_level_cmd 		= buf[0];
+		fw_update_complete		= false;
+
+		memset(fileName, 0, 128);
+		// parse the file name
+		snprintf(fileName, len-4, "%s", &buf[4]);
+		I("%s: upgrade from file(%s) start!\n", __func__, fileName);
+		// open file
+		result = request_firmware(&fw, fileName, private_ts->dev);
+		if (result) {
+			E("%s: open firmware file failed\n", __func__);
+			goto firmware_upgrade_done;
+			//return len;
+		}
+
+		I("%s: FW len %d\n", __func__, fw->size);
+		fw_data = (unsigned char *)fw->data;
+
+		I("%s: FW image,len %d: %02X, %02X, %02X, %02X\n", __func__, result, upgrade_fw[0], upgrade_fw[1], upgrade_fw[2], upgrade_fw[3]);
+
+		if (fw_data != NULL)
+		{
+			// start to upgrade
+			himax_int_enable(private_ts->client->irq,0);
+
+			if ((buf[1] == '6') && (buf[2] == '0'))
+			{
+				if (fts_ctpm_fw_upgrade_with_sys_fs_60k(private_ts->client,upgrade_fw, result, false) == 0)
+				{
+					E("%s: TP upgrade error, line: %d\n", __func__, __LINE__);
+					fw_update_complete = false;
+				}
+				else
+				{
+					I("%s: TP upgrade OK, line: %d\n", __func__, __LINE__);
+					fw_update_complete = true;
+				}
+			}
+			else if ((buf[1] == '6') && (buf[2] == '4'))
+			{
+				if (fts_ctpm_fw_upgrade_with_sys_fs_64k(private_ts->client,upgrade_fw, result, false) == 0)
+				{
+					E("%s: TP upgrade error, line: %d\n", __func__, __LINE__);
+					fw_update_complete = false;
+				}
+				else
+				{
+					I("%s: TP upgrade OK, line: %d\n", __func__, __LINE__);
+					fw_update_complete = true;
+				}
+			}
+			else if ((buf[1] == '2') && (buf[2] == '4'))
+			{
+				if (fts_ctpm_fw_upgrade_with_sys_fs_124k(private_ts->client,upgrade_fw, result, false) == 0)
+				{
+					E("%s: TP upgrade error, line: %d\n", __func__, __LINE__);
+					fw_update_complete = false;
+				}
+				else
+				{
+					I("%s: TP upgrade OK, line: %d\n", __func__, __LINE__);
+					fw_update_complete = true;
+				}
+			}
+			else if ((buf[1] == '2') && (buf[2] == '8'))
+			{
+				if (fts_ctpm_fw_upgrade_with_sys_fs_128k(private_ts->client,upgrade_fw, result, false) == 0)
+				{
+					E("%s: TP upgrade error, line: %d\n", __func__, __LINE__);
+					fw_update_complete = false;
+				}
+				else
+				{
+					I("%s: TP upgrade OK, line: %d\n", __func__, __LINE__);
+					fw_update_complete = true;
+				}
+			}
+			else
+			{
+				E("%s: Flash command fail: %d\n", __func__, __LINE__);
+				fw_update_complete = false;
+			}
+			release_firmware(fw);
+			goto firmware_upgrade_done;
+			//return count;
+		}
+	}
+
+	firmware_upgrade_done:
+
+#ifdef HX_RST_PIN_FUNC
+	himax_HW_reset(true,false);
+#endif
+
+	himax_sense_on(private_ts->client, 0x01);
+	msleep(120);
+#ifdef HX_ESD_WORKAROUND
+	HX_ESD_RESET_ACTIVATE = 1;
+#endif
+	himax_int_enable(private_ts->client->irq,1);
+
+	//todo himax_chip->tp_firmware_upgrade_proceed = 0;
+	//todo himax_chip->suspend_state = 0;
+	//todo enable_irq(himax_chip->irq);
+	return len;
+}
+
+static const struct file_operations himax_proc_debug_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_debug_read,
+	.write = himax_debug_write,
+};
+
+#endif
+
+#ifdef HX_TP_PROC_FLASH_DUMP
+
+static uint8_t getFlashCommand(void)
+{
+	return flash_command;
+}
+
+static uint8_t getFlashDumpProgress(void)
+{
+	return flash_progress;
+}
+
+static uint8_t getFlashDumpComplete(void)
+{
+	return flash_dump_complete;
+}
+
+static uint8_t getFlashDumpFail(void)
+{
+	return flash_dump_fail;
+}
+
+uint8_t getSysOperation(void)
+{
+	return sys_operation;
+}
+
+static uint8_t getFlashReadStep(void)
+{
+	return flash_read_step;
+}
+/*
+static uint8_t getFlashDumpSector(void)
+{
+	return flash_dump_sector;
+}
+
+static uint8_t getFlashDumpPage(void)
+{
+	return flash_dump_page;
+}
+*/
+bool getFlashDumpGoing(void)
+{
+	return flash_dump_going;
+}
+
+void setFlashBuffer(void)
+{
+	flash_buffer = kzalloc(Flash_Size * sizeof(uint8_t), GFP_KERNEL);
+	if (flash_buffer)
+		memset(flash_buffer,0x00,Flash_Size);
+}
+
+void setSysOperation(uint8_t operation)
+{
+	sys_operation = operation;
+}
+
+static void setFlashDumpProgress(uint8_t progress)
+{
+	flash_progress = progress;
+	//I("setFlashDumpProgress : progress = %d ,flash_progress = %d \n",progress,flash_progress);
+}
+
+static void setFlashDumpComplete(uint8_t status)
+{
+	flash_dump_complete = status;
+}
+
+static void setFlashDumpFail(uint8_t fail)
+{
+	flash_dump_fail = fail;
+}
+
+static void setFlashCommand(uint8_t command)
+{
+	flash_command = command;
+}
+
+static void setFlashReadStep(uint8_t step)
+{
+	flash_read_step = step;
+}
+
+static void setFlashDumpSector(uint8_t sector)
+{
+	flash_dump_sector = sector;
+}
+
+static void setFlashDumpPage(uint8_t page)
+{
+	flash_dump_page = page;
+}
+
+static void setFlashDumpGoing(bool going)
+{
+	flash_dump_going = going;
+}
+
+static ssize_t himax_proc_flash_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	int ret = 0;
+	int loop_i;
+	uint8_t local_flash_read_step=0;
+	uint8_t local_flash_complete = 0;
+	uint8_t local_flash_progress = 0;
+	uint8_t local_flash_command = 0;
+	uint8_t local_flash_fail = 0;
+	char *temp_buf;
+	local_flash_complete = getFlashDumpComplete();
+	local_flash_progress = getFlashDumpProgress();
+	local_flash_command = getFlashCommand();
+	local_flash_fail = getFlashDumpFail();
+
+	I("flash_progress = %d \n",local_flash_progress);
+	if(!HX_PROC_SEND_FLAG)
+	{
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return ret;
+		}
+
+		if (local_flash_fail)
+		{
+			ret += snprintf(temp_buf+ret, len-ret, "FlashStart:Fail \n");
+			ret += snprintf(temp_buf+ret, len-ret, "FlashEnd");
+			ret += snprintf(temp_buf+ret, len-ret, "\n");
+
+			if (copy_to_user(buf, temp_buf, len))
+			{
+				I("%s,here:%d\n", __func__, __LINE__);
+			}
+
+			kfree(temp_buf);
+			HX_PROC_SEND_FLAG = 1;
+			return ret;
+		}
+
+		if (!local_flash_complete)
+		{
+			ret += snprintf(temp_buf+ret, len-ret, "FlashStart:Ongoing:0x%2.2x \n",flash_progress);
+			ret += snprintf(temp_buf+ret, len-ret, "FlashEnd");
+			ret += snprintf(temp_buf+ret, len-ret, "\n");
+
+			if (copy_to_user(buf, temp_buf, len))
+			{
+				I("%s,here:%d\n", __func__, __LINE__);
+			}
+
+			kfree(temp_buf);
+			HX_PROC_SEND_FLAG = 1;
+			return ret;
+		}
+
+		if (local_flash_command == 1 && local_flash_complete)
+		{
+			ret += snprintf(temp_buf+ret, len-ret, "FlashStart:Complete \n");
+			ret += snprintf(temp_buf+ret, len-ret, "FlashEnd");
+			ret += snprintf(temp_buf+ret, len-ret, "\n");
+
+			if (copy_to_user(buf, temp_buf, len))
+			{
+				I("%s,here:%d\n", __func__, __LINE__);
+			}
+
+			kfree(temp_buf);
+			HX_PROC_SEND_FLAG = 1;
+			return ret;
+		}
+
+		if (local_flash_command == 3 && local_flash_complete)
+		{
+			ret += snprintf(temp_buf+ret, len-ret, "FlashStart: \n");
+			for(loop_i = 0; loop_i < 128; loop_i++)
+			{
+				ret += snprintf(temp_buf+ret, len-ret, "x%2.2x", flash_buffer[loop_i]);
+				if ((loop_i % 16) == 15)
+				{
+					ret += snprintf(temp_buf+ret, len-ret, "\n");
+				}
+			}
+			ret += snprintf(temp_buf+ret, len-ret, "FlashEnd");
+			ret += snprintf(temp_buf+ret, len-ret, "\n");
+
+			if (copy_to_user(buf, temp_buf, len))
+			{
+				I("%s,here:%d\n", __func__, __LINE__);
+			}
+
+			kfree(temp_buf);
+			HX_PROC_SEND_FLAG = 1;
+			return ret;
+		}
+
+		//flash command == 0 , report the data
+		local_flash_read_step = getFlashReadStep();
+
+		ret += snprintf(temp_buf+ret, len-ret, "FlashStart:%2.2x \n",local_flash_read_step);
+
+		for (loop_i = 0; loop_i < 1024; loop_i++)
+		{
+			ret += snprintf(temp_buf+ret, len-ret, "x%2.2X", flash_buffer[local_flash_read_step*1024 + loop_i]);
+
+			if ((loop_i % 16) == 15)
+			{
+				ret += snprintf(temp_buf+ret, len-ret, "\n");
+			}
+		}
+
+		ret += snprintf(temp_buf+ret, len-ret, "FlashEnd");
+		ret += snprintf(temp_buf+ret, len-ret, "\n");
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+		HX_PROC_SEND_FLAG = 1;
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+	return ret;
+}
+
+static ssize_t himax_proc_flash_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	char buf_tmp[6];
+	unsigned long result = 0;
+	uint8_t loop_i = 0;
+	int base = 0;
+	char buf[80] = {0};
+
+	if (len >= 80)
+	{
+		I("%s: no command exceeds 80 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf, buff, len))
+	{
+		return -EFAULT;
+	}
+	memset(buf_tmp, 0x0, sizeof(buf_tmp));
+
+	I("%s: buf[0] = %s\n", __func__, buf);
+
+	if (getSysOperation() == 1)
+	{
+		E("%s: PROC is busy , return!\n", __func__);
+		return len;
+	}
+
+	if (buf[0] == '0')
+	{
+		setFlashCommand(0);
+		if (buf[1] == ':' && buf[2] == 'x')
+		{
+			memcpy(buf_tmp, buf + 3, 2);
+			I("%s: read_Step = %s\n", __func__, buf_tmp);
+			if (!kstrtoul(buf_tmp, 16, &result))
+			{
+				I("%s: read_Step = %lu \n", __func__, result);
+				setFlashReadStep(result);
+			}
+		}
+	}
+	else if (buf[0] == '1')// 1_60,1_64,1_24,1_28 for flash size 60k,64k,124k,128k
+	{
+		setSysOperation(1);
+		setFlashCommand(1);
+		setFlashDumpProgress(0);
+		setFlashDumpComplete(0);
+		setFlashDumpFail(0);
+		if ((buf[1] == '_' ) && (buf[2] == '6' )){
+			if (buf[3] == '0'){
+				Flash_Size = FW_SIZE_60k;
+			}else if (buf[3] == '4'){
+				Flash_Size = FW_SIZE_64k;
+			}
+		}else if ((buf[1] == '_' ) && (buf[2] == '2' )){
+			if (buf[3] == '4'){
+				Flash_Size = FW_SIZE_124k;
+			}else if (buf[3] == '8'){
+				Flash_Size = FW_SIZE_128k;
+			}
+		}
+		queue_work(private_ts->flash_wq, &private_ts->flash_work);
+	}
+	else if (buf[0] == '2') // 2_60,2_64,2_24,2_28 for flash size 60k,64k,124k,128k
+	{
+		setSysOperation(1);
+		setFlashCommand(2);
+		setFlashDumpProgress(0);
+		setFlashDumpComplete(0);
+		setFlashDumpFail(0);
+		if ((buf[1] == '_' ) && (buf[2] == '6' )){
+			if (buf[3] == '0'){
+				Flash_Size = FW_SIZE_60k;
+			}else if (buf[3] == '4'){
+				Flash_Size = FW_SIZE_64k;
+			}
+		}else if ((buf[1] == '_' ) && (buf[2] == '2' )){
+			if (buf[3] == '4'){
+				Flash_Size = FW_SIZE_124k;
+			}else if (buf[3] == '8'){
+				Flash_Size = FW_SIZE_128k;
+			}
+		}
+		queue_work(private_ts->flash_wq, &private_ts->flash_work);
+	}
+	else if (buf[0] == '3')
+	{
+		setSysOperation(1);
+		setFlashCommand(3);
+		setFlashDumpProgress(0);
+		setFlashDumpComplete(0);
+		setFlashDumpFail(0);
+
+		memcpy(buf_tmp, buf + 3, 2);
+		if (!kstrtoul(buf_tmp, 16, &result))
+		{
+			setFlashDumpSector(result);
+		}
+
+		memcpy(buf_tmp, buf + 7, 2);
+		if (!kstrtoul(buf_tmp, 16, &result))
+		{
+			setFlashDumpPage(result);
+		}
+
+		queue_work(private_ts->flash_wq, &private_ts->flash_work);
+	}
+	else if (buf[0] == '4')
+	{
+		I("%s: command 4 enter.\n", __func__);
+		setSysOperation(1);
+		setFlashCommand(4);
+		setFlashDumpProgress(0);
+		setFlashDumpComplete(0);
+		setFlashDumpFail(0);
+
+		memcpy(buf_tmp, buf + 3, 2);
+		if (!kstrtoul(buf_tmp, 16, &result))
+		{
+			setFlashDumpSector(result);
+		}
+		else
+		{
+			E("%s: command 4 , sector error.\n", __func__);
+			return len;
+		}
+
+		memcpy(buf_tmp, buf + 7, 2);
+		if (!kstrtoul(buf_tmp, 16, &result))
+		{
+			setFlashDumpPage(result);
+		}
+		else
+		{
+			E("%s: command 4 , page error.\n", __func__);
+			return len;
+		}
+
+		base = 11;
+
+		I("=========Himax flash page buffer start=========\n");
+		for(loop_i=0;loop_i<128 && base<80;loop_i++)
+		{
+			memcpy(buf_tmp, buf + base, 2);
+			if (!kstrtoul(buf_tmp, 16, &result))
+			{
+				flash_buffer[loop_i] = result;
+				I("%d ",flash_buffer[loop_i]);
+				if (loop_i % 16 == 15)
+				{
+					I("\n");
+				}
+			}
+			base += 3;
+		}
+		I("=========Himax flash page buffer end=========\n");
+
+		queue_work(private_ts->flash_wq, &private_ts->flash_work);
+	}
+	return len;
+}
+
+static const struct file_operations himax_proc_flash_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_proc_flash_read,
+	.write = himax_proc_flash_write,
+};
+
+void himax_ts_flash_func(void)
+{
+	uint8_t local_flash_command = 0;
+
+	himax_int_enable(private_ts->client->irq,0);
+	setFlashDumpGoing(true);
+
+	//sector = getFlashDumpSector();
+	//page = getFlashDumpPage();
+
+	local_flash_command = getFlashCommand();
+
+	msleep(100);
+
+	I("%s: local_flash_command = %d enter.\n", __func__,local_flash_command);
+
+	if ((local_flash_command == 1 || local_flash_command == 2)|| (local_flash_command==0x0F))
+	{
+		himax_flash_dump_func(private_ts->client, local_flash_command,Flash_Size, flash_buffer);
+	}
+
+	I("Complete~~~~~~~~~~~~~~~~~~~~~~~\n");
+
+	if (local_flash_command == 2)
+	{
+		E("Flash dump failed\n");
+	}
+
+	himax_int_enable(private_ts->client->irq,1);
+	setFlashDumpGoing(false);
+
+	setFlashDumpComplete(1);
+	setSysOperation(0);
+	return;
+
+/*	Flash_Dump_i2c_transfer_error:
+
+	himax_int_enable(private_ts->client->irq,1);
+	setFlashDumpGoing(false);
+	setFlashDumpComplete(0);
+	setFlashDumpFail(1);
+	setSysOperation(0);
+	return;
+*/
+}
+
+#endif
+
+#ifdef HX_TP_PROC_SELF_TEST
+static ssize_t himax_self_test_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	int val=0x00;
+	int ret = 0;
+	char *temp_buf;
+
+	I("%s: enter, %d \n", __func__, __LINE__);
+	if(!HX_PROC_SEND_FLAG)
+	{
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return ret;
+		}
+		himax_int_enable(private_ts->client->irq,0);//disable irq
+		val = himax_chip_self_test(private_ts->client);
+#ifdef HX_ESD_WORKAROUND
+		HX_ESD_RESET_ACTIVATE = 1;
+#endif
+		himax_int_enable(private_ts->client->irq,1);//enable irq
+
+		if (val == 0x01) {
+			ret += snprintf(temp_buf+ret, len-ret, "Self_Test Pass\n");
+		} else {
+			ret += snprintf(temp_buf+ret, len-ret, "Self_Test Fail\n");
+		}
+
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+		HX_PROC_SEND_FLAG = 1;
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+	return ret;
+}
+
+/*
+static ssize_t himax_chip_self_test_store(struct device *dev,struct device_attribute *attr, const char *buf, size_t count)
+{
+	char buf_tmp[2];
+	unsigned long result = 0;
+
+	memset(buf_tmp, 0x0, sizeof(buf_tmp));
+	memcpy(buf_tmp, buf, 2);
+	if(!kstrtoul(buf_tmp, 16, &result))
+		{
+			sel_type = (uint8_t)result;
+		}
+	I("sel_type = %x \r\n", sel_type);
+	return count;
+}
+*/
+
+static const struct file_operations himax_proc_self_test_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_self_test_read,
+};
+#endif
+
+#ifdef HX_TP_PROC_SENSE_ON_OFF
+static ssize_t himax_sense_on_off_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	char buf[80] = {0};
+
+	if (len >= 80)
+	{
+		I("%s: no command exceeds 80 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf, buff, len))
+	{
+		return -EFAULT;
+	}
+
+	if(buf[0] == '0')
+	{
+		himax_sense_off(private_ts->client);
+		I("Sense off \n");
+	}
+	else if(buf[0] == '1')
+	{
+		if(buf[1] == '1'){
+			himax_sense_on(private_ts->client, 0x01);
+			I("Sense on re-map off, run flash \n");
+		}else if(buf[1] == '0'){
+			himax_sense_on(private_ts->client, 0x00);
+			I("Sense on re-map on, run sram \n");
+		}else{
+			I("Do nothing \n");
+		}
+	}
+	else
+	{
+		I("Do nothing \n");
+	}
+	return len;
+}
+
+static const struct file_operations himax_proc_sense_on_off_ops =
+{
+	.owner = THIS_MODULE,
+	.write = himax_sense_on_off_write,
+};
+#endif
+
+#ifdef HX_HIGH_SENSE
+static ssize_t himax_HSEN_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts = private_ts;
+	size_t count = 0;
+	char *temp_buf;
+
+	if(!HX_PROC_SEND_FLAG)
+	{
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return count;
+		}
+		count = snprintf(temp_buf, len, "%d\n", ts->HSEN_enable);
+		HX_PROC_SEND_FLAG=1;
+
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+	return count;
+}
+
+static ssize_t himax_HSEN_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts = private_ts;
+	char buf[80] = {0};
+
+
+	if (len >= 80)
+	{
+		I("%s: no command exceeds 80 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf, buff, len))
+	{
+		return -EFAULT;
+	}
+
+	if (buf[0] == '0'){
+		ts->HSEN_enable = 0;
+	}
+	else if (buf[0] == '1'){
+		ts->HSEN_enable = 1;
+	}
+	else
+		return -EINVAL;
+
+	himax_set_HSEN_func(ts->client, ts->HSEN_enable);
+
+	I("%s: HSEN_enable = %d.\n", __func__, ts->HSEN_enable);
+
+	return len;
+}
+
+static const struct file_operations himax_proc_HSEN_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_HSEN_read,
+	.write = himax_HSEN_write,
+};
+#endif
+
+#ifdef HX_SMART_WAKEUP
+static ssize_t himax_SMWP_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	size_t count = 0;
+	struct himax_ts_data *ts = private_ts;
+	char *temp_buf;
+
+	if(!HX_PROC_SEND_FLAG)
+	{
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return count;
+		}
+		count = snprintf(temp_buf, len, "%d\n", ts->SMWP_enable);
+
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+		HX_PROC_SEND_FLAG=1;
+	}
+	else
+		HX_PROC_SEND_FLAG=0;
+
+	return count;
+}
+
+static ssize_t himax_SMWP_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts = private_ts;
+	char buf[80] = {0};
+
+	if (len >= 80)
+	{
+		I("%s: no command exceeds 80 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf, buff, len))
+	{
+		return -EFAULT;
+	}
+
+
+	if (buf[0] == '0')
+	{
+		ts->SMWP_enable = 0;
+	}
+	else if (buf[0] == '1')
+	{
+		ts->SMWP_enable = 1;
+	}
+	else
+		return -EINVAL;
+
+	himax_set_SMWP_func(ts->client, ts->SMWP_enable);
+	HX_SMWP_EN = ts->SMWP_enable;
+	I("%s: SMART_WAKEUP_enable = %d.\n", __func__, HX_SMWP_EN);
+
+	return len;
+}
+
+static const struct file_operations himax_proc_SMWP_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_SMWP_read,
+	.write = himax_SMWP_write,
+};
+
+static ssize_t himax_GESTURE_read(struct file *file, char *buf,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts = private_ts;
+	int i =0;
+	int ret = 0;
+	char *temp_buf;
+
+	if(!HX_PROC_SEND_FLAG)
+	{
+		temp_buf = kzalloc(len, GFP_KERNEL);
+		if (!temp_buf) {
+			HX_PROC_SEND_FLAG=0;
+			return ret;
+		}
+		for(i=0;i<16;i++)
+			ret += snprintf(temp_buf+ret, len-ret, "ges_en[%d]=%d\n", i, ts->gesture_cust_en[i]);
+		HX_PROC_SEND_FLAG = 1;
+		if (copy_to_user(buf, temp_buf, len))
+		{
+			I("%s,here:%d\n", __func__, __LINE__);
+		}
+
+		kfree(temp_buf);
+		HX_PROC_SEND_FLAG = 1;
+	}
+	else
+	{
+		HX_PROC_SEND_FLAG = 0;
+		ret = 0;
+	}
+	return ret;
+}
+
+static ssize_t himax_GESTURE_write(struct file *file, const char *buff,
+	size_t len, loff_t *pos)
+{
+	struct himax_ts_data *ts = private_ts;
+	int i =0;
+	char buf[80] = {0};
+
+	if (len >= 80)
+	{
+		I("%s: no command exceeds 80 chars.\n", __func__);
+		return -EFAULT;
+	}
+	if (copy_from_user(buf, buff, len))
+	{
+		return -EFAULT;
+	}
+
+	I("himax_GESTURE_store= %s \n",buf);
+	for (i=0;i<16;i++)
+	{
+		if (buf[i] == '0')
+			ts->gesture_cust_en[i]= 0;
+		else if (buf[i] == '1')
+			ts->gesture_cust_en[i]= 1;
+		else
+			ts->gesture_cust_en[i]= 0;
+		I("gesture en[%d]=%d \n", i, ts->gesture_cust_en[i]);
+	}
+	return len;
+}
+
+static const struct file_operations himax_proc_Gesture_ops =
+{
+	.owner = THIS_MODULE,
+	.read = himax_GESTURE_read,
+	.write = himax_GESTURE_write,
+};
+#endif
+
+int himax_touch_proc_init(void)
+{
+	himax_touch_proc_dir = proc_mkdir( HIMAX_PROC_TOUCH_FOLDER, NULL);
+	if (himax_touch_proc_dir == NULL)
+	{
+		E(" %s: himax_touch_proc_dir file create failed!\n", __func__);
+		return -ENOMEM;
+	}
+
+	himax_proc_debug_level_file = proc_create(HIMAX_PROC_DEBUG_LEVEL_FILE, (S_IWUSR|S_IRUGO), himax_touch_proc_dir, &himax_proc_debug_level_ops);
+	if (himax_proc_debug_level_file == NULL)
+	{
+		E(" %s: proc debug_level file create failed!\n", __func__);
+		goto fail_1;
+	}
+
+	himax_proc_vendor_file = proc_create(HIMAX_PROC_VENDOR_FILE, (S_IRUGO),himax_touch_proc_dir, &himax_proc_vendor_ops);
+	if(himax_proc_vendor_file == NULL)
+	{
+		E(" %s: proc vendor file create failed!\n", __func__);
+		goto fail_2;
+	}
+
+	himax_proc_attn_file = proc_create(HIMAX_PROC_ATTN_FILE, (S_IRUGO),himax_touch_proc_dir, &himax_proc_attn_ops);
+	if(himax_proc_attn_file == NULL)
+	{
+		E(" %s: proc attn file create failed!\n", __func__);
+		goto fail_3;
+	}
+
+	himax_proc_int_en_file = proc_create(HIMAX_PROC_INT_EN_FILE, (S_IWUSR|S_IRUGO), himax_touch_proc_dir, &himax_proc_int_en_ops);
+	if(himax_proc_int_en_file == NULL)
+	{
+		E(" %s: proc int en file create failed!\n", __func__);
+		goto fail_4;
+	}
+
+	himax_proc_layout_file = proc_create(HIMAX_PROC_LAYOUT_FILE, (S_IWUSR|S_IRUGO), himax_touch_proc_dir, &himax_proc_layout_ops);
+	if(himax_proc_layout_file == NULL)
+	{
+		E(" %s: proc layout file create failed!\n", __func__);
+		goto fail_5;
+	}
+
+#ifdef HX_TP_PROC_RESET
+	himax_proc_reset_file = proc_create(HIMAX_PROC_RESET_FILE, (S_IWUSR), himax_touch_proc_dir, &himax_proc_reset_ops);
+	if(himax_proc_reset_file == NULL)
+	{
+		E(" %s: proc reset file create failed!\n", __func__);
+		goto fail_6;
+	}
+#endif
+
+#ifdef HX_TP_PROC_DIAG
+	himax_proc_diag_file = proc_create(HIMAX_PROC_DIAG_FILE, (S_IWUSR|S_IRUGO), himax_touch_proc_dir, &himax_proc_diag_ops);
+	if(himax_proc_diag_file == NULL)
+	{
+		E(" %s: proc diag file create failed!\n", __func__);
+		goto fail_7;
+	}
+	himax_proc_diag_arrange_file = proc_create(HIMAX_PROC_DIAG_ARR_FILE, (S_IWUSR|S_IRUGO), himax_touch_proc_dir, &himax_proc_diag_arrange_ops);
+	if(himax_proc_diag_arrange_file == NULL)
+	{
+		E(" %s: proc diag file create failed!\n", __func__);
+		goto fail_7_1;
+	}
+#endif
+
+#ifdef HX_TP_PROC_REGISTER
+	himax_proc_register_file = proc_create(HIMAX_PROC_REGISTER_FILE, (S_IWUSR|S_IRUGO), himax_touch_proc_dir, &himax_proc_register_ops);
+	if(himax_proc_register_file == NULL)
+	{
+		E(" %s: proc register file create failed!\n", __func__);
+		goto fail_8;
+	}
+#endif
+
+#ifdef HX_TP_PROC_DEBUG
+	himax_proc_debug_file = proc_create(HIMAX_PROC_DEBUG_FILE, (S_IWUSR|S_IRUGO), himax_touch_proc_dir, &himax_proc_debug_ops);
+	if(himax_proc_debug_file == NULL)
+	{
+		E(" %s: proc debug file create failed!\n", __func__);
+		goto fail_9;
+	}
+#endif
+
+#ifdef HX_TP_PROC_FLASH_DUMP
+	himax_proc_flash_dump_file = proc_create(HIMAX_PROC_FLASH_DUMP_FILE, (S_IWUSR|S_IRUGO), himax_touch_proc_dir, &himax_proc_flash_ops);
+	if(himax_proc_flash_dump_file == NULL)
+	{
+		E(" %s: proc flash dump file create failed!\n", __func__);
+		goto fail_10;
+	}
+#endif
+
+#ifdef HX_TP_PROC_SELF_TEST
+	himax_proc_self_test_file = proc_create(HIMAX_PROC_SELF_TEST_FILE, (S_IRUGO), himax_touch_proc_dir, &himax_proc_self_test_ops);
+	if(himax_proc_self_test_file == NULL)
+	{
+		E(" %s: proc self_test file create failed!\n", __func__);
+		goto fail_11;
+	}
+#endif
+
+#ifdef HX_HIGH_SENSE
+	himax_proc_HSEN_file = proc_create(HIMAX_PROC_HSEN_FILE, (S_IWUSR|S_IRUGO|S_IWUGO), himax_touch_proc_dir, &himax_proc_HSEN_ops);
+	if(himax_proc_HSEN_file == NULL)
+	{
+		E(" %s: proc HSEN file create failed!\n", __func__);
+		goto fail_12;
+	}
+#endif
+
+#ifdef HX_SMART_WAKEUP
+	himax_proc_SMWP_file = proc_create(HIMAX_PROC_SMWP_FILE, (S_IWUSR|S_IRUGO|S_IWUGO), himax_touch_proc_dir, &himax_proc_SMWP_ops);
+	if(himax_proc_SMWP_file == NULL)
+	{
+		E(" %s: proc SMWP file create failed!\n", __func__);
+		goto fail_13;
+	}
+	himax_proc_GESTURE_file = proc_create(HIMAX_PROC_GESTURE_FILE, (S_IWUSR|S_IRUGO|S_IWUGO), himax_touch_proc_dir, &himax_proc_Gesture_ops);
+	if(himax_proc_GESTURE_file == NULL)
+	{
+		E(" %s: proc GESTURE file create failed!\n", __func__);
+		goto fail_14;
+	}
+#endif
+
+#ifdef HX_TP_PROC_SENSE_ON_OFF
+	himax_proc_SENSE_ON_OFF_file = proc_create(HIMAX_PROC_SENSE_ON_OFF_FILE, (S_IWUSR|S_IRUGO|S_IWUGO), himax_touch_proc_dir, &himax_proc_sense_on_off_ops);
+	if(himax_proc_SENSE_ON_OFF_file == NULL)
+	{
+		E(" %s: proc SENSE_ON_OFF file create failed!\n", __func__);
+		goto fail_15;
+	}
+#endif
+
+	return 0 ;
+
+#ifdef HX_TP_PROC_SENSE_ON_OFF
+	fail_15:
+#endif
+#ifdef HX_SMART_WAKEUP
+	remove_proc_entry( HIMAX_PROC_GESTURE_FILE, himax_touch_proc_dir );
+	fail_14:
+	remove_proc_entry( HIMAX_PROC_SMWP_FILE, himax_touch_proc_dir );
+	fail_13:
+#endif
+#ifdef HX_HIGH_SENSE
+	remove_proc_entry( HIMAX_PROC_HSEN_FILE, himax_touch_proc_dir );
+	fail_12:
+#endif
+#ifdef HX_TP_PROC_SELF_TEST
+	remove_proc_entry( HIMAX_PROC_SELF_TEST_FILE, himax_touch_proc_dir );
+	fail_11:
+#endif
+#ifdef HX_TP_PROC_FLASH_DUMP
+	remove_proc_entry( HIMAX_PROC_FLASH_DUMP_FILE, himax_touch_proc_dir );
+	fail_10:
+#endif
+#ifdef HX_TP_PROC_DEBUG
+	remove_proc_entry( HIMAX_PROC_DEBUG_FILE, himax_touch_proc_dir );
+	fail_9:
+#endif
+#ifdef HX_TP_PROC_REGISTER
+	remove_proc_entry( HIMAX_PROC_REGISTER_FILE, himax_touch_proc_dir );
+	fail_8:
+#endif
+#ifdef HX_TP_PROC_DIAG
+	remove_proc_entry( HIMAX_PROC_DIAG_FILE, himax_touch_proc_dir );
+	fail_7:
+	remove_proc_entry( HIMAX_PROC_DIAG_ARR_FILE, himax_touch_proc_dir );
+	fail_7_1:
+#endif
+#ifdef HX_TP_PROC_RESET
+	remove_proc_entry( HIMAX_PROC_RESET_FILE, himax_touch_proc_dir );
+	fail_6:
+#endif
+	remove_proc_entry( HIMAX_PROC_LAYOUT_FILE, himax_touch_proc_dir );
+	fail_5: remove_proc_entry( HIMAX_PROC_INT_EN_FILE, himax_touch_proc_dir );
+	fail_4: remove_proc_entry( HIMAX_PROC_ATTN_FILE, himax_touch_proc_dir );
+	fail_3: remove_proc_entry( HIMAX_PROC_VENDOR_FILE, himax_touch_proc_dir );
+	fail_2: remove_proc_entry( HIMAX_PROC_DEBUG_LEVEL_FILE, himax_touch_proc_dir );
+	fail_1: remove_proc_entry( HIMAX_PROC_TOUCH_FOLDER, NULL );
+	return -ENOMEM;
+}
+
+void himax_touch_proc_deinit(void)
+{
+#ifdef HX_TP_PROC_SENSE_ON_OFF
+	remove_proc_entry( HIMAX_PROC_SENSE_ON_OFF_FILE, himax_touch_proc_dir );
+#endif
+#ifdef HX_SMART_WAKEUP
+	remove_proc_entry( HIMAX_PROC_GESTURE_FILE, himax_touch_proc_dir );
+	remove_proc_entry( HIMAX_PROC_SMWP_FILE, himax_touch_proc_dir );
+#endif
+#ifdef HX_DOT_VIEW
+	remove_proc_entry( HIMAX_PROC_HSEN_FILE, himax_touch_proc_dir );
+#endif
+#ifdef HX_TP_PROC_SELF_TEST
+	remove_proc_entry(HIMAX_PROC_SELF_TEST_FILE, himax_touch_proc_dir);
+#endif
+#ifdef HX_TP_PROC_FLASH_DUMP
+	remove_proc_entry(HIMAX_PROC_FLASH_DUMP_FILE, himax_touch_proc_dir);
+#endif
+#ifdef HX_TP_PROC_DEBUG
+	remove_proc_entry( HIMAX_PROC_DEBUG_FILE, himax_touch_proc_dir );
+#endif
+#ifdef HX_TP_PROC_REGISTER
+	remove_proc_entry(HIMAX_PROC_REGISTER_FILE, himax_touch_proc_dir);
+#endif
+#ifdef HX_TP_PROC_DIAG
+	remove_proc_entry(HIMAX_PROC_DIAG_FILE, himax_touch_proc_dir);
+#endif
+#ifdef HX_TP_PROC_RESET
+	remove_proc_entry( HIMAX_PROC_RESET_FILE, himax_touch_proc_dir );
+#endif
+	remove_proc_entry( HIMAX_PROC_LAYOUT_FILE, himax_touch_proc_dir );
+	remove_proc_entry( HIMAX_PROC_INT_EN_FILE, himax_touch_proc_dir );
+	remove_proc_entry( HIMAX_PROC_ATTN_FILE, himax_touch_proc_dir );
+	remove_proc_entry( HIMAX_PROC_VENDOR_FILE, himax_touch_proc_dir );
+	remove_proc_entry( HIMAX_PROC_DEBUG_LEVEL_FILE, himax_touch_proc_dir );
+	remove_proc_entry( HIMAX_PROC_TOUCH_FOLDER, NULL );
+}
+#endif
diff --git a/drivers/input/touchscreen/hxchipset/himax_debug.h b/drivers/input/touchscreen/hxchipset/himax_debug.h
new file mode 100644
index 0000000..91a7ae2
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/himax_debug.h
@@ -0,0 +1,181 @@
+/* Himax Android Driver Sample Code for Himax chipset
+*
+* Copyright (C) 2015 Himax Corporation.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#include "himax_platform.h"
+#include "himax_common.h"
+
+#if defined(CONFIG_TOUCHSCREEN_HIMAX_DEBUG)
+	#define HIMAX_PROC_TOUCH_FOLDER 	"android_touch"
+	#define HIMAX_PROC_DEBUG_LEVEL_FILE	"debug_level"
+	#define HIMAX_PROC_VENDOR_FILE		"vendor"
+	#define HIMAX_PROC_ATTN_FILE		"attn"
+	#define HIMAX_PROC_INT_EN_FILE		"int_en"
+	#define HIMAX_PROC_LAYOUT_FILE		"layout"
+
+	static struct proc_dir_entry *himax_touch_proc_dir;
+	static struct proc_dir_entry *himax_proc_debug_level_file;
+	static struct proc_dir_entry *himax_proc_vendor_file;
+	static struct proc_dir_entry *himax_proc_attn_file;
+	static struct proc_dir_entry *himax_proc_int_en_file;
+	static struct proc_dir_entry *himax_proc_layout_file;
+
+	uint8_t HX_PROC_SEND_FLAG;
+
+extern int himax_touch_proc_init(void);
+extern void himax_touch_proc_deinit(void);
+bool getFlashDumpGoing(void);
+
+#ifdef HX_TP_PROC_REGISTER
+	#define HIMAX_PROC_REGISTER_FILE	"register"
+	struct proc_dir_entry *himax_proc_register_file;
+	uint8_t register_command[4];
+#endif
+
+#ifdef HX_TP_PROC_DIAG
+	#define HIMAX_PROC_DIAG_FILE	"diag"
+	struct proc_dir_entry *himax_proc_diag_file;
+	#define HIMAX_PROC_DIAG_ARR_FILE	"diag_arr"
+	struct proc_dir_entry *himax_proc_diag_arrange_file;
+
+#ifdef HX_TP_PROC_2T2R
+	static bool Is_2T2R;
+	static uint8_t x_channel_2;
+	static uint8_t y_channel_2;
+	static uint8_t *diag_mutual_2;
+	
+	int16_t *getMutualBuffer_2(void);
+	uint8_t 	getXChannel_2(void);
+	uint8_t 	getYChannel_2(void);
+	
+	void 	setMutualBuffer_2(void);
+	void 	setXChannel_2(uint8_t x);
+	void 	setYChannel_2(uint8_t y);
+#endif
+	uint8_t x_channel;
+	uint8_t y_channel;
+	int16_t *diag_mutual;
+	int16_t *diag_mutual_new;
+	int16_t *diag_mutual_old;
+	uint8_t diag_max_cnt;
+
+	int diag_command;
+	uint8_t diag_coor[128];// = {0xFF};
+	int16_t diag_self[100] = {0};
+
+	int16_t *getMutualBuffer(void);
+	int16_t *getMutualNewBuffer(void);
+	int16_t *getMutualOldBuffer(void);
+	int16_t *getSelfBuffer(void);
+	uint8_t 	getDiagCommand(void);
+	uint8_t 	getXChannel(void);
+	uint8_t 	getYChannel(void);
+	
+	void 	setMutualBuffer(void);
+	void 	setMutualNewBuffer(void);
+	void 	setMutualOldBuffer(void);
+	void 	setXChannel(uint8_t x);
+	void 	setYChannel(uint8_t y);
+	uint8_t	coordinate_dump_enable = 0;
+	struct file	*coordinate_fn;
+#endif
+
+#ifdef HX_TP_PROC_DEBUG
+	#define HIMAX_PROC_DEBUG_FILE	"debug"
+	struct proc_dir_entry *himax_proc_debug_file = NULL;
+
+	bool	fw_update_complete = false;
+	int handshaking_result = 0;
+	unsigned char debug_level_cmd = 0;
+	unsigned char upgrade_fw[128*1024];
+#endif
+
+#ifdef HX_TP_PROC_FLASH_DUMP
+	#define HIMAX_PROC_FLASH_DUMP_FILE	"flash_dump"
+	struct proc_dir_entry *himax_proc_flash_dump_file = NULL;
+
+	static int Flash_Size = 131072;
+	static uint8_t *flash_buffer 				= NULL;
+	static uint8_t flash_command 				= 0;
+	static uint8_t flash_read_step 			= 0;
+	static uint8_t flash_progress 			= 0;
+	static uint8_t flash_dump_complete	= 0;
+	static uint8_t flash_dump_fail 			= 0;
+	static uint8_t sys_operation				= 0;
+	static uint8_t flash_dump_sector	 	= 0;
+	static uint8_t flash_dump_page 			= 0;
+	static bool    flash_dump_going			= false;
+
+	static uint8_t getFlashCommand(void);
+	static uint8_t getFlashDumpComplete(void);
+	static uint8_t getFlashDumpFail(void);
+	static uint8_t getFlashDumpProgress(void);
+	static uint8_t getFlashReadStep(void);
+	//static uint8_t getFlashDumpSector(void);
+	//static uint8_t getFlashDumpPage(void);
+
+	void setFlashBuffer(void);
+	uint8_t getSysOperation(void);
+
+	static void setFlashCommand(uint8_t command);
+	static void setFlashReadStep(uint8_t step);
+	static void setFlashDumpComplete(uint8_t complete);
+	static void setFlashDumpFail(uint8_t fail);
+	static void setFlashDumpProgress(uint8_t progress);
+	void setSysOperation(uint8_t operation);
+	static void setFlashDumpSector(uint8_t sector);
+	static void setFlashDumpPage(uint8_t page);
+	static void setFlashDumpGoing(bool going);
+
+#endif
+
+#ifdef HX_TP_PROC_SELF_TEST
+	#define HIMAX_PROC_SELF_TEST_FILE	"self_test"
+	struct proc_dir_entry *himax_proc_self_test_file = NULL;
+	uint32_t **raw_data_array;
+	uint8_t X_NUM = 0, Y_NUM = 0;
+	uint8_t sel_type = 0x0D;
+#endif
+
+#ifdef HX_TP_PROC_RESET
+#define HIMAX_PROC_RESET_FILE		"reset"
+extern void himax_HW_reset(uint8_t loadconfig,uint8_t int_off);
+struct proc_dir_entry *himax_proc_reset_file 		= NULL;
+#endif
+
+#ifdef HX_HIGH_SENSE
+	#define HIMAX_PROC_HSEN_FILE "HSEN"
+	struct proc_dir_entry *himax_proc_HSEN_file = NULL;
+#endif
+
+#ifdef HX_TP_PROC_SENSE_ON_OFF
+	#define HIMAX_PROC_SENSE_ON_OFF_FILE "SenseOnOff"
+	struct proc_dir_entry *himax_proc_SENSE_ON_OFF_file = NULL;
+#endif
+
+#ifdef HX_RST_PIN_FUNC
+	void himax_HW_reset(uint8_t loadconfig,uint8_t int_off);
+#endif
+
+#ifdef HX_SMART_WAKEUP
+#define HIMAX_PROC_SMWP_FILE "SMWP"
+struct proc_dir_entry *himax_proc_SMWP_file = NULL;
+#define HIMAX_PROC_GESTURE_FILE "GESTURE"
+struct proc_dir_entry *himax_proc_GESTURE_file = NULL;
+uint8_t HX_SMWP_EN = 0;
+//extern bool FAKE_POWER_KEY_SEND;
+#endif
+
+#endif
+
diff --git a/drivers/input/touchscreen/hxchipset/himax_ic.c b/drivers/input/touchscreen/hxchipset/himax_ic.c
new file mode 100644
index 0000000..6ad8dc0
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/himax_ic.c
@@ -0,0 +1,2118 @@
+/* Himax Android Driver Sample Code for HMX83100 chipset
+*
+* Copyright (C) 2015 Himax Corporation.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#include "himax_ic.h"
+
+static unsigned char i_TP_CRC_FW_128K[]=
+{
+	#include "HX_CRC_128.i"
+};
+static unsigned char i_TP_CRC_FW_64K[]=
+{
+	#include "HX_CRC_64.i"
+};
+static unsigned char i_TP_CRC_FW_124K[]=
+{
+	#include "HX_CRC_124.i"
+};
+static unsigned char i_TP_CRC_FW_60K[]=
+{
+	#include "HX_CRC_60.i"
+};
+
+
+unsigned long	FW_VER_MAJ_FLASH_ADDR;
+unsigned long 	FW_VER_MAJ_FLASH_LENG;
+unsigned long 	FW_VER_MIN_FLASH_ADDR;
+unsigned long 	FW_VER_MIN_FLASH_LENG;
+unsigned long 	CFG_VER_MAJ_FLASH_ADDR;
+unsigned long 	CFG_VER_MAJ_FLASH_LENG;
+unsigned long 	CFG_VER_MIN_FLASH_ADDR;
+unsigned long 	CFG_VER_MIN_FLASH_LENG;
+
+unsigned char	IC_TYPE = 0;
+unsigned char	IC_CHECKSUM = 0;
+
+extern struct himax_ic_data* ic_data;
+
+int himax_hand_shaking(struct i2c_client *client)    //0:Running, 1:Stop, 2:I2C Fail
+{
+	int ret, result;
+	uint8_t hw_reset_check[1];
+	uint8_t hw_reset_check_2[1];
+	uint8_t buf0[2];
+	uint8_t	IC_STATUS_CHECK = 0xAA;	
+
+	memset(hw_reset_check, 0x00, sizeof(hw_reset_check));
+	memset(hw_reset_check_2, 0x00, sizeof(hw_reset_check_2));
+
+	buf0[0] = 0xF2;
+	if (IC_STATUS_CHECK == 0xAA) {
+		buf0[1] = 0xAA;
+		IC_STATUS_CHECK = 0x55;
+	} else {
+		buf0[1] = 0x55;
+		IC_STATUS_CHECK = 0xAA;
+	}
+
+	ret = i2c_himax_master_write(client, buf0, 2, HIMAX_I2C_RETRY_TIMES);
+	if (ret < 0) {
+		E("[Himax]:write 0xF2 failed line: %d \n",__LINE__);
+		goto work_func_send_i2c_msg_fail;
+	}
+	msleep(50); 
+  	
+	buf0[0] = 0xF2;
+	buf0[1] = 0x00;
+	ret = i2c_himax_master_write(client, buf0, 2, HIMAX_I2C_RETRY_TIMES);
+	if (ret < 0) {
+		E("[Himax]:write 0x92 failed line: %d \n",__LINE__);
+		goto work_func_send_i2c_msg_fail;
+	}
+	usleep_range(2000, 4000);
+  	
+	ret = i2c_himax_read(client, 0xD1, hw_reset_check, 1, HIMAX_I2C_RETRY_TIMES);
+	if (ret < 0) {
+		E("[Himax]:i2c_himax_read 0xD1 failed line: %d \n",__LINE__);
+		goto work_func_send_i2c_msg_fail;
+	}
+	
+	if ((IC_STATUS_CHECK != hw_reset_check[0])) {
+		usleep_range(2000, 4000);
+		ret = i2c_himax_read(client, 0xD1, hw_reset_check_2, 1, HIMAX_I2C_RETRY_TIMES);
+		if (ret < 0) {
+			E("[Himax]:i2c_himax_read 0xD1 failed line: %d \n",__LINE__);
+			goto work_func_send_i2c_msg_fail;
+		}
+	
+		if (hw_reset_check[0] == hw_reset_check_2[0]) {
+			result = 1; 
+		} else {
+			result = 0; 
+		}
+	} else {
+		result = 0; 
+	}
+	
+	return result;
+
+work_func_send_i2c_msg_fail:
+	return 2;
+}
+
+void himax_diag_register_set(struct i2c_client *client, uint8_t diag_command)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+
+	if(diag_command != 0)
+		diag_command = diag_command + 5;
+
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x02; tmp_addr[1] = 0x01; tmp_addr[0] = 0x80;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = diag_command;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+}
+
+void himax_flash_dump_func(struct i2c_client *client, uint8_t local_flash_command, int Flash_Size, uint8_t *flash_buffer)
+{
+	//struct himax_ts_data *ts = container_of(work, struct himax_ts_data, flash_work);
+
+//	uint8_t sector = 0;
+//	uint8_t page = 0;
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+	uint8_t out_buffer[20];
+	uint8_t in_buffer[260] = {0};
+	int page_prog_start = 0;
+	int i = 0;
+
+	himax_sense_off(client);
+	himax_burst_enable(client, 0);
+	/*=============Dump Flash Start=============*/
+	//=====================================
+	// SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x10;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x02; tmp_data[1] = 0x07; tmp_data[0] = 0x80;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	for (page_prog_start = 0; page_prog_start < Flash_Size; page_prog_start = page_prog_start + 256)
+	{
+		//=================================
+		// SPI Transfer Control
+		// Set 256 bytes page read : 0x8000_0020 ==> 0x6940_02FF
+		// Set read start address  : 0x8000_0028 ==> 0x0000_0000
+		// Set command			   : 0x8000_0024 ==> 0x0000_003B
+		//=================================
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+		tmp_data[3] = 0x69; tmp_data[2] = 0x40; tmp_data[1] = 0x02; tmp_data[0] = 0xFF;
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x28;
+		if (page_prog_start < 0x100)
+		{
+			tmp_data[3] = 0x00;
+			tmp_data[2] = 0x00;
+			tmp_data[1] = 0x00;
+			tmp_data[0] = (uint8_t)page_prog_start;
+		}
+		else if (page_prog_start >= 0x100 && page_prog_start < 0x10000)
+		{
+			tmp_data[3] = 0x00;
+			tmp_data[2] = 0x00;
+			tmp_data[1] = (uint8_t)(page_prog_start >> 8);
+			tmp_data[0] = (uint8_t)page_prog_start;
+		}
+		else if (page_prog_start >= 0x10000 && page_prog_start < 0x1000000)
+		{
+			tmp_data[3] = 0x00;
+			tmp_data[2] = (uint8_t)(page_prog_start >> 16);
+			tmp_data[1] = (uint8_t)(page_prog_start >> 8);
+			tmp_data[0] = (uint8_t)page_prog_start;
+		}
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x3B;
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+		//==================================
+		// AHB_I2C Burst Read
+		// Set SPI data register : 0x8000_002C ==> 0x00
+		//==================================
+		out_buffer[0] = 0x2C;
+		out_buffer[1] = 0x00;
+		out_buffer[2] = 0x00;
+		out_buffer[3] = 0x80;
+		i2c_himax_write(client, 0x00 ,out_buffer, 4, 3);
+
+		//==================================
+		// Read access : 0x0C ==> 0x00
+		//==================================
+		out_buffer[0] = 0x00;
+		i2c_himax_write(client, 0x0C ,out_buffer, 1, 3);
+
+		//==================================
+		// Read 128 bytes two times
+		//==================================
+		i2c_himax_read(client, 0x08 ,in_buffer, 128, 3);
+		for (i = 0; i < 128; i++)
+			flash_buffer[i + page_prog_start] = in_buffer[i];
+
+		i2c_himax_read(client, 0x08 ,in_buffer, 128, 3);
+		for (i = 0; i < 128; i++)
+			flash_buffer[(i + 128) + page_prog_start] = in_buffer[i];
+
+		I("%s:Verify Progress: %x\n", __func__, page_prog_start);
+	}
+
+/*=============Dump Flash End=============*/
+		//msleep(100);
+		/*
+		for( i=0 ; i<8 ;i++)
+		{
+			for(j=0 ; j<64 ; j++)
+			{
+				setFlashDumpProgress(i*32 + j);
+			}
+		}
+		*/
+	himax_sense_on(client, 0x01);
+
+	return;
+
+}
+
+int himax_chip_self_test(struct i2c_client *client)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[128];
+	int pf_value=0x00;
+	uint8_t test_result_id = 0;
+	int j;
+
+	memset(tmp_addr, 0x00, sizeof(tmp_addr));
+	memset(tmp_data, 0x00, sizeof(tmp_data));
+
+	himax_interface_on(client);
+	himax_sense_off(client);
+
+	//Set criteria
+	himax_burst_enable(client, 1);
+
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x08; tmp_addr[1] = 0x80; tmp_addr[0] = 0x94;
+	tmp_data[3] = 0x14; tmp_data[2] = 0xC8; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	tmp_data[7] = 0x13; tmp_data[6] = 0x60; tmp_data[5] = 0x0A; tmp_data[4] = 0x99;
+
+	himax_flash_write_burst_lenth(client, tmp_addr, tmp_data, 8);
+
+	//start selftest
+	// 0x9008_805C ==> 0x0000_0001
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x08; tmp_addr[1] = 0x80; tmp_addr[0] = 0x5C;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x01;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	himax_sense_on(client, 1);
+
+	msleep(2000);
+
+	himax_sense_off(client);
+	msleep(20);
+
+	//=====================================
+	// Read test result ID : 0x9008_8078 ==> 0xA/0xB/0xC/0xF
+	//=====================================
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x08; tmp_addr[1] = 0x80; tmp_addr[0] = 0x78;
+	himax_register_read(client, tmp_addr, 1, tmp_data);
+
+	test_result_id = tmp_data[0];
+
+	I("%s: check test result, test_result_id=%x, test_result=%x\n", __func__
+	,test_result_id,tmp_data[0]);
+
+	if (test_result_id==0xF) {
+		I("[Himax]: self-test pass\n");
+		pf_value = 0x1;
+	} else {
+		E("[Himax]: self-test fail\n");
+		pf_value = 0x0;
+	}
+	himax_burst_enable(client, 1);
+
+	for (j = 0;j < 10; j++){
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+		tmp_addr[3] = 0x90; tmp_addr[2] = 0x06; tmp_addr[1] = 0x00; tmp_addr[0] = 0x0C;
+		himax_register_read(client, tmp_addr, 1, tmp_data);
+		I("[Himax]: 9006000C = %d\n", tmp_data[0]);
+		if (tmp_data[0] != 0){
+		tmp_data[3] = 0x90; tmp_data[2] = 0x06; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+		if ( i2c_himax_write(client, 0x00 ,tmp_data, 4, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+		}
+		tmp_data[0] = 0x00;
+		if ( i2c_himax_write(client, 0x0C ,tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+		}
+			i2c_himax_read(client, 0x08, tmp_data, 124,HIMAX_I2C_RETRY_TIMES);
+		}else{
+			break;
+		}
+	}
+
+	himax_sense_on(client, 1);
+	msleep(120);
+
+	return pf_value;
+}
+
+void himax_set_HSEN_enable(struct i2c_client *client, uint8_t HSEN_enable)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+
+	himax_burst_enable(client, 0);
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x08; tmp_addr[1] = 0x80; tmp_addr[0] = 0x50;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = HSEN_enable;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+}
+void himax_get_HSEN_enable(struct i2c_client *client,uint8_t *tmp_data)
+{
+	uint8_t tmp_addr[4];
+
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x08; tmp_addr[1] = 0x80; tmp_addr[0] = 0x50;
+	himax_register_read(client, tmp_addr, 1, tmp_data);
+}
+
+void himax_set_SMWP_enable(struct i2c_client *client, uint8_t SMWP_enable)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x08; tmp_addr[1] = 0x80; tmp_addr[0] = 0x54;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = SMWP_enable;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+}
+
+void himax_get_SMWP_enable(struct i2c_client *client,uint8_t *tmp_data)
+{
+	uint8_t tmp_addr[4];
+
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x08; tmp_addr[1] = 0x80; tmp_addr[0] = 0x54;
+	himax_register_read(client, tmp_addr, 1, tmp_data);
+}
+
+int himax_burst_enable(struct i2c_client *client, uint8_t auto_add_4_byte)
+{
+	uint8_t tmp_data[4];
+
+	tmp_data[0] = 0x31;
+	if ( i2c_himax_write(client, 0x13 ,tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return -EBUSY;
+	}
+	
+	tmp_data[0] = (0x10 | auto_add_4_byte);
+	if ( i2c_himax_write(client, 0x0D ,tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return -EBUSY;
+	}
+	return 0;
+
+}
+
+void himax_register_read(struct i2c_client *client, uint8_t *read_addr, int read_length, uint8_t *read_data)
+{
+	uint8_t tmp_data[4];
+	int i = 0;
+	int address = 0;
+
+	if(read_length>256)
+	{
+		E("%s: read len over 256!\n", __func__);
+		return;
+	}
+	if (read_length > 1)
+		himax_burst_enable(client, 1);
+	else
+	himax_burst_enable(client, 0);
+	address = (read_addr[3] << 24) + (read_addr[2] << 16) + (read_addr[1] << 8) + read_addr[0];
+	i = address;
+		tmp_data[0] = (uint8_t)i;
+		tmp_data[1] = (uint8_t)(i >> 8);
+		tmp_data[2] = (uint8_t)(i >> 16);
+		tmp_data[3] = (uint8_t)(i >> 24);
+		if ( i2c_himax_write(client, 0x00 ,tmp_data, 4, HIMAX_I2C_RETRY_TIMES) < 0) {
+				E("%s: i2c access fail!\n", __func__);
+				return;
+		 	}
+		tmp_data[0] = 0x00;
+		if ( i2c_himax_write(client, 0x0C ,tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+				E("%s: i2c access fail!\n", __func__);
+				return;
+		 	}
+		
+		if ( i2c_himax_read(client, 0x08 ,read_data, read_length * 4, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+			return;
+		}
+	if (read_length > 1)
+		himax_burst_enable(client, 0);
+}
+
+void himax_flash_read(struct i2c_client *client, uint8_t *reg_byte, uint8_t *read_data)
+{
+    uint8_t tmpbyte[2];
+    
+    if ( i2c_himax_write(client, 0x00 ,&reg_byte[0], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(client, 0x01 ,&reg_byte[1], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(client, 0x02 ,&reg_byte[2], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(client, 0x03 ,&reg_byte[3], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+    tmpbyte[0] = 0x00;
+    if ( i2c_himax_write(client, 0x0C ,&tmpbyte[0], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_read(client, 0x08 ,&read_data[0], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_read(client, 0x09 ,&read_data[1], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_read(client, 0x0A ,&read_data[2], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_read(client, 0x0B ,&read_data[3], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_read(client, 0x18 ,&tmpbyte[0], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}// No bus request
+
+	if ( i2c_himax_read(client, 0x0F ,&tmpbyte[1], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}// idle state
+
+}
+
+void himax_flash_write_burst(struct i2c_client *client, uint8_t * reg_byte, uint8_t * write_data)
+{
+    uint8_t data_byte[8];
+	int i = 0, j = 0;
+
+     for (i = 0; i < 4; i++)
+     { 
+         data_byte[i] = reg_byte[i];
+     }
+     for (j = 4; j < 8; j++)
+     {
+         data_byte[j] = write_data[j-4];
+     }
+	 
+	 if ( i2c_himax_write(client, 0x00 ,data_byte, 8, HIMAX_I2C_RETRY_TIMES) < 0) {
+		 E("%s: i2c access fail!\n", __func__);
+		 return;
+	 }
+
+}
+
+int himax_flash_write_burst_lenth(struct i2c_client *client, uint8_t *reg_byte, uint8_t *write_data, int length)
+{
+    uint8_t data_byte[256];
+	int i = 0, j = 0;
+
+    for (i = 0; i < 4; i++)
+    {
+        data_byte[i] = reg_byte[i];
+    }
+    for (j = 4; j < length + 4; j++)
+    {
+        data_byte[j] = write_data[j - 4];
+    }
+   
+	if ( i2c_himax_write(client, 0x00 ,data_byte, length + 4, HIMAX_I2C_RETRY_TIMES) < 0) {
+		 E("%s: i2c access fail!\n", __func__);
+		 return -EBUSY;
+	}
+
+    return 0;
+}
+
+int himax_register_write(struct i2c_client *client, uint8_t *write_addr, int write_length, uint8_t *write_data)
+{
+	int i =0, address = 0;
+	int ret = 0;
+
+	address = (write_addr[3] << 24) + (write_addr[2] << 16) + (write_addr[1] << 8) + write_addr[0];
+
+	for (i = address; i < address + write_length * 4; i = i + 4)
+	{
+		if (write_length > 1)
+		{
+			ret = himax_burst_enable(client, 1);
+			if(ret)
+				return ret;
+		}
+		else
+		{
+			ret = himax_burst_enable(client, 0);
+			if(ret)
+				return ret;
+		}
+	ret = himax_flash_write_burst_lenth(client, write_addr, write_data, write_length * 4);
+	if(ret < 0)
+		return ret;
+	}
+
+	return 0;
+}
+
+void himax_sense_off(struct i2c_client *client)
+{
+	uint8_t wdt_off = 0x00;
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[5];	
+
+	himax_burst_enable(client, 0);
+
+	while(wdt_off == 0x00)
+	{
+		// 0x9000_800C ==> 0x0000_AC53
+		tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x80; tmp_addr[0] = 0x0C;
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0xAC; tmp_data[0] = 0x53;
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+		//=====================================
+    // Read Watch Dog disable password : 0x9000_800C ==> 0x0000_AC53
+    //=====================================
+		tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x80; tmp_addr[0] = 0x0C;
+		himax_register_read(client, tmp_addr, 1, tmp_data);
+		
+		//Check WDT
+		if (tmp_data[0] == 0x53 && tmp_data[1] == 0xAC && tmp_data[2] == 0x00 && tmp_data[3] == 0x00)
+			wdt_off = 0x01;
+		else
+			wdt_off = 0x00;
+	}
+
+	// VCOM		//0x9008_806C ==> 0x0000_0001
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x08; tmp_addr[1] = 0x80; tmp_addr[0] = 0x6C;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x01;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	msleep(20);
+
+	// 0x9000_0010 ==> 0x0000_00DA
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x10;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0xDA;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	//=====================================
+	// Read CPU clock off password : 0x9000_0010 ==> 0x0000_00DA
+	//=====================================
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x10;
+	himax_register_read(client, tmp_addr, 1, tmp_data);
+	I("%s: CPU clock off password data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n", __func__
+		,tmp_data[0],tmp_data[1],tmp_data[2],tmp_data[3]);
+
+}
+
+void himax_interface_on(struct i2c_client *client)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[5];
+
+    //===========================================
+    //  Any Cmd for ineterface on : 0x9000_0000 ==> 0x0000_0000
+    //===========================================
+    tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x00;
+    himax_flash_read(client, tmp_addr, tmp_data); //avoid RD/WR fail
+}
+
+bool wait_wip(struct i2c_client *client, int Timing)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+	uint8_t in_buffer[10];
+	//uint8_t out_buffer[20];
+	int retry_cnt = 0;
+
+	//=====================================
+	// SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x10;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x02; tmp_data[1] = 0x07; tmp_data[0] = 0x80;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	in_buffer[0] = 0x01;
+
+	do
+	{
+		//=====================================
+		// SPI Transfer Control : 0x8000_0020 ==> 0x4200_0003
+		//=====================================
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+		tmp_data[3] = 0x42; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x03;
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+		//=====================================
+		// SPI Command : 0x8000_0024 ==> 0x0000_0005
+		// read 0x8000_002C for 0x01, means wait success
+		//=====================================
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x05;
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+		in_buffer[0] = in_buffer[1] = in_buffer[2] = in_buffer[3] = 0xFF;
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x2C;
+		himax_register_read(client, tmp_addr, 1, in_buffer);
+		
+		if ((in_buffer[0] & 0x01) == 0x00)
+			return true;
+
+		retry_cnt++;
+		
+		if (in_buffer[0] != 0x00 || in_buffer[1] != 0x00 || in_buffer[2] != 0x00 || in_buffer[3] != 0x00)
+        	I("%s:Wait wip retry_cnt:%d, buffer[0]=%d, buffer[1]=%d, buffer[2]=%d, buffer[3]=%d \n", __func__, 
+            retry_cnt,in_buffer[0],in_buffer[1],in_buffer[2],in_buffer[3]);
+
+		if (retry_cnt > 100)
+        {        	
+			E("%s: Wait wip error!\n", __func__);
+            return false;
+        }
+		msleep(Timing);
+	}while ((in_buffer[0] & 0x01) == 0x01);
+	return true;
+}
+
+void himax_sense_on(struct i2c_client *client, uint8_t FlashMode)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[128];
+
+	himax_interface_on(client);
+	himax_burst_enable(client, 0);
+	//CPU reset
+	// 0x9000_0014 ==> 0x0000_00CA
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x14;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0xCA;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	//=====================================
+	// Read pull low CPU reset signal : 0x9000_0014 ==> 0x0000_00CA
+	//=====================================
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x14;
+	himax_register_read(client, tmp_addr, 1, tmp_data);
+
+	I("%s: check pull low CPU reset signal  data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n", __func__
+	,tmp_data[0],tmp_data[1],tmp_data[2],tmp_data[3]);
+
+	// 0x9000_0014 ==> 0x0000_0000
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x14;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	//=====================================
+	// Read revert pull low CPU reset signal : 0x9000_0014 ==> 0x0000_0000
+	//=====================================
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x14;
+	himax_register_read(client, tmp_addr, 1, tmp_data);
+
+	I("%s: revert pull low CPU reset signal data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n", __func__
+	,tmp_data[0],tmp_data[1],tmp_data[2],tmp_data[3]);
+
+	//=====================================
+  // Reset TCON
+  //=====================================
+  tmp_addr[3] = 0x80; tmp_addr[2] = 0x02; tmp_addr[1] = 0x01; tmp_addr[0] = 0xE0;
+  tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+  himax_flash_write_burst(client, tmp_addr, tmp_data);
+	usleep_range(10000, 20000);
+  tmp_addr[3] = 0x80; tmp_addr[2] = 0x02; tmp_addr[1] = 0x01; tmp_addr[0] = 0xE0;
+  tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x01;
+  himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	if (FlashMode == 0x00)	//SRAM
+	{
+		//=====================================
+		//			Re-map
+		//=====================================
+		tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x00;
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0xF1;
+		himax_flash_write_burst_lenth(client, tmp_addr, tmp_data, 4);
+		I("%s:83100_Chip_Re-map ON\n", __func__);
+	}
+	else
+	{
+		//=====================================
+		//			Re-map off
+		//=====================================
+		tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x00;
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+		himax_flash_write_burst_lenth(client, tmp_addr, tmp_data, 4);
+		I("%s:83100_Chip_Re-map OFF\n", __func__);
+	}
+	//=====================================
+	//			CPU clock on
+	//=====================================
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x10;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	himax_flash_write_burst_lenth(client, tmp_addr, tmp_data, 4);
+
+}
+
+void himax_chip_erase(struct i2c_client *client)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+
+	himax_burst_enable(client, 0);
+
+	//=====================================
+	// SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x10;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x02; tmp_data[1] = 0x07; tmp_data[0] = 0x80;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	//=====================================
+	// Chip Erase
+	// Write Enable : 1. 0x8000_0020 ==> 0x4700_0000
+	//				  2. 0x8000_0024 ==> 0x0000_0006
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+	tmp_data[3] = 0x47; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x06;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	//=====================================
+	// Chip Erase
+	// Erase Command : 0x8000_0024 ==> 0x0000_00C7
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0xC7;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+	
+	msleep(2000);
+	
+	if (!wait_wip(client, 100))
+		E("%s:83100_Chip_Erase Fail\n", __func__);
+
+}
+
+bool himax_block_erase(struct i2c_client *client)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+
+	himax_burst_enable(client, 0);
+
+	//=====================================
+	// SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x10;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x02; tmp_data[1] = 0x07; tmp_data[0] = 0x80;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	//=====================================
+	// Chip Erase
+	// Write Enable : 1. 0x8000_0020 ==> 0x4700_0000
+	//				  2. 0x8000_0024 ==> 0x0000_0006
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+	tmp_data[3] = 0x47; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x06;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	//=====================================
+	// Block Erase
+	// Erase Command : 0x8000_0028 ==> 0x0000_0000 //SPI addr
+	//				   0x8000_0020 ==> 0x6700_0000 //control
+	//				   0x8000_0024 ==> 0x0000_0052 //BE
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x28;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+	
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+	tmp_data[3] = 0x67; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+	
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x52;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	msleep(1000);
+
+	if (!wait_wip(client, 100))
+	{
+		E("%s:83100_Erase Fail\n", __func__);
+		return false;
+	}
+	else
+	{
+		return true;
+	}
+
+}
+
+bool himax_sector_erase(struct i2c_client *client, int start_addr)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+	int page_prog_start = 0;
+
+	himax_burst_enable(client, 0);
+
+	//=====================================
+	// SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x10;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x02; tmp_data[1] = 0x07; tmp_data[0] = 0x80;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+	for (page_prog_start = start_addr; page_prog_start < start_addr + 0x0F000; page_prog_start = page_prog_start + 0x1000)
+		{
+			//=====================================
+			// Chip Erase
+			// Write Enable : 1. 0x8000_0020 ==> 0x4700_0000
+			//				  2. 0x8000_0024 ==> 0x0000_0006
+			//=====================================
+			tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+			tmp_data[3] = 0x47; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+			himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+			tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+			tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x06;
+			himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+			//=====================================
+			// Sector Erase
+			// Erase Command : 0x8000_0028 ==> 0x0000_0000 //SPI addr
+			// 				0x8000_0020 ==> 0x6700_0000 //control
+			// 				0x8000_0024 ==> 0x0000_0020 //SE
+			//=====================================
+			tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x28;
+			if (page_prog_start < 0x100)
+			{
+			 tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = (uint8_t)page_prog_start;
+			}
+			else if (page_prog_start >= 0x100 && page_prog_start < 0x10000)
+			{
+			 tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = (uint8_t)(page_prog_start >> 8); tmp_data[0] = (uint8_t)page_prog_start;
+			}
+			else if (page_prog_start >= 0x10000 && page_prog_start < 0x1000000)
+			{
+			 tmp_data[3] = 0x00; tmp_data[2] = (uint8_t)(page_prog_start >> 16); tmp_data[1] = (uint8_t)(page_prog_start >> 8); tmp_data[0] = (uint8_t)page_prog_start;
+			}
+			himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+			tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+			tmp_data[3] = 0x67; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+			himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+			tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+			tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x20;
+			himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+			msleep(200);
+
+			if (!wait_wip(client, 100))
+			{
+				E("%s:83100_Erase Fail\n", __func__);
+				return false;
+			}
+		}	
+		return true;
+}
+
+void himax_sram_write(struct i2c_client *client, uint8_t *FW_content)
+{
+	int i = 0;
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[128];
+	int FW_length = 0x4000; // 0x4000 = 16K bin file
+	
+	//himax_sense_off(client);
+
+	for (i = 0; i < FW_length; i = i + 128) 
+	{
+		himax_burst_enable(client, 1);
+
+		if (i < 0x100)
+		{
+			tmp_addr[3] = 0x08; 
+			tmp_addr[2] = 0x00; 
+			tmp_addr[1] = 0x00; 
+			tmp_addr[0] = i;
+		}
+		else if (i >= 0x100 && i < 0x10000)
+		{
+			tmp_addr[3] = 0x08; 
+			tmp_addr[2] = 0x00; 
+			tmp_addr[1] = (i >> 8); 
+			tmp_addr[0] = i;
+		}
+
+		memcpy(&tmp_data[0], &FW_content[i], 128);
+		himax_flash_write_burst_lenth(client, tmp_addr, tmp_data, 128);
+
+	}
+
+	if (!wait_wip(client, 100))
+		E("%s:83100_Sram_Write Fail\n", __func__);
+}
+
+bool himax_sram_verify(struct i2c_client *client, uint8_t *FW_File, int FW_Size)
+{
+	int i = 0;
+	uint8_t out_buffer[20];
+	uint8_t in_buffer[128];
+	uint8_t *get_fw_content;
+
+	get_fw_content = kzalloc(0x4000*sizeof(uint8_t), GFP_KERNEL);
+	if (!get_fw_content)
+		return false;
+
+	for (i = 0; i < 0x4000; i = i + 128)
+	{
+		himax_burst_enable(client, 1);
+
+		//==================================
+		//	AHB_I2C Burst Read
+		//==================================
+		if (i < 0x100)
+		{
+			out_buffer[3] = 0x08; 
+			out_buffer[2] = 0x00; 
+			out_buffer[1] = 0x00; 
+			out_buffer[0] = i;
+		}
+		else if (i >= 0x100 && i < 0x10000)
+		{
+			out_buffer[3] = 0x08; 
+			out_buffer[2] = 0x00; 
+			out_buffer[1] = (i >> 8); 
+			out_buffer[0] = i;
+		}
+
+		if ( i2c_himax_write(client, 0x00 ,out_buffer, 4, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+			return false;
+		}
+
+		out_buffer[0] = 0x00;		
+		if ( i2c_himax_write(client, 0x0C ,out_buffer, 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+			return false;
+		}
+
+		if ( i2c_himax_read(client, 0x08 ,in_buffer, 128, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+			return false;
+		}
+		memcpy(&get_fw_content[i], &in_buffer[0], 128);
+	}
+
+	for (i = 0; i < FW_Size; i++)
+		{
+	        if (FW_File[i] != get_fw_content[i])
+	        	{
+					E("%s: fail! SRAM[%x]=%x NOT CRC_ifile=%x\n", __func__,i,get_fw_content[i],FW_File[i]);
+		            return false;
+	        	}
+		}
+
+	kfree(get_fw_content);
+
+	return true;
+}
+
+void himax_flash_programming(struct i2c_client *client, uint8_t *FW_content, int FW_Size)
+{
+	int page_prog_start = 0;
+	int program_length = 48;
+	int i = 0, j = 0, k = 0;
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+	uint8_t buring_data[256];    // Read for flash data, 128K
+									 // 4 bytes for 0x80002C padding
+
+	//himax_interface_on(client);
+	himax_burst_enable(client, 0);
+
+	//=====================================
+	// SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x10;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x02; tmp_data[1] = 0x07; tmp_data[0] = 0x80;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+	for (page_prog_start = 0; page_prog_start < FW_Size; page_prog_start = page_prog_start + 256)
+	{
+		//msleep(5);
+		//=====================================
+		// Write Enable : 1. 0x8000_0020 ==> 0x4700_0000
+		//				  2. 0x8000_0024 ==> 0x0000_0006
+		//=====================================
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+		tmp_data[3] = 0x47; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x06;
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+		//=================================
+		// SPI Transfer Control
+		// Set 256 bytes page write : 0x8000_0020 ==> 0x610F_F000
+		// Set read start address	: 0x8000_0028 ==> 0x0000_0000			
+		//=================================
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+		tmp_data[3] = 0x61; tmp_data[2] = 0x0F; tmp_data[1] = 0xF0; tmp_data[0] = 0x00;
+		// data bytes should be 0x6100_0000 + ((word_number)*4-1)*4096 = 0x6100_0000 + 0xFF000 = 0x610F_F000
+		// Programmable size = 1 page = 256 bytes, word_number = 256 byte / 4 = 64
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x28;
+		//tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00; // Flash start address 1st : 0x0000_0000
+
+		if (page_prog_start < 0x100)
+		{
+			tmp_data[3] = 0x00; 
+			tmp_data[2] = 0x00; 
+			tmp_data[1] = 0x00; 
+			tmp_data[0] = (uint8_t)page_prog_start;
+		}
+		else if (page_prog_start >= 0x100 && page_prog_start < 0x10000)
+		{
+			tmp_data[3] = 0x00; 
+			tmp_data[2] = 0x00; 
+			tmp_data[1] = (uint8_t)(page_prog_start >> 8); 
+			tmp_data[0] = (uint8_t)page_prog_start;
+		}
+		else if (page_prog_start >= 0x10000 && page_prog_start < 0x1000000)
+		{
+			tmp_data[3] = 0x00; 
+			tmp_data[2] = (uint8_t)(page_prog_start >> 16); 
+			tmp_data[1] = (uint8_t)(page_prog_start >> 8); 
+			tmp_data[0] = (uint8_t)page_prog_start;
+		}
+		
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+
+		//=================================
+		// Send 16 bytes data : 0x8000_002C ==> 16 bytes data	  
+		//=================================
+		buring_data[0] = 0x2C;
+		buring_data[1] = 0x00;
+		buring_data[2] = 0x00;
+		buring_data[3] = 0x80;
+		
+		for (i = /*0*/page_prog_start, j = 0; i < 16 + page_prog_start/**/; i++, j++)	/// <------ bin file
+		{
+			buring_data[j + 4] = FW_content[i];
+		}
+
+
+		if ( i2c_himax_write(client, 0x00 ,buring_data, 20, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+			return;
+		}
+		//=================================
+		// Write command : 0x8000_0024 ==> 0x0000_0002
+		//=================================
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x02;
+		himax_flash_write_burst(client, tmp_addr, tmp_data);
+
+		//=================================
+		// Send 240 bytes data : 0x8000_002C ==> 240 bytes data	 
+		//=================================
+
+		for (j = 0; j < 5; j++)
+		{
+			for (i = (page_prog_start + 16 + (j * 48)), k = 0; i < (page_prog_start + 16 + (j * 48)) + program_length; i++, k++)   /// <------ bin file
+			{
+				buring_data[k+4] = FW_content[i];//(byte)i;
+			}
+
+			if ( i2c_himax_write(client, 0x00 ,buring_data, program_length+4, HIMAX_I2C_RETRY_TIMES) < 0) {
+				E("%s: i2c access fail!\n", __func__);
+				return;
+			}
+
+		}
+
+		if (!wait_wip(client, 1))
+			E("%s:83100_Flash_Programming Fail\n", __func__);
+	}
+}
+
+bool himax_check_chip_version(struct i2c_client *client)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+	uint8_t ret_data = 0x00;
+	int i = 0;
+	int ret = 0;
+	himax_sense_off(client);
+	for (i = 0; i < 5; i++)
+	{
+		// 1. Set DDREG_Req = 1 (0x9000_0020 = 0x0000_0001) (Lock register R/W from driver)
+		tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x01;
+		ret = himax_register_write(client, tmp_addr, 1, tmp_data);
+		if(ret)
+			return false;
+
+		// 2. Set bank as 0 (0x8001_BD01 = 0x0000_0000)
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x01; tmp_addr[1] = 0xBD; tmp_addr[0] = 0x01;
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+		ret = himax_register_write(client, tmp_addr, 1, tmp_data);
+		if(ret)
+			return false;
+
+		// 3. Read driver ID register RF4H 1 byte (0x8001_F401)
+		//	  Driver register RF4H 1 byte value = 0x84H, read back value will become 0x84848484
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x01; tmp_addr[1] = 0xF4; tmp_addr[0] = 0x01;
+		himax_register_read(client, tmp_addr, 1, tmp_data);
+		ret_data = tmp_data[0];
+
+		I("%s:Read driver IC ID = %X\n", __func__, ret_data);
+		if (ret_data == 0x84)
+		{
+			IC_TYPE         = HX_83100_SERIES_PWON;
+			//himax_sense_on(client, 0x01);
+			ret_data = true;
+			break;
+		}
+		else
+		{
+			ret_data = false;
+			E("%s:Read driver ID register Fail:\n", __func__);
+		}
+	}
+	// 4. After read finish, set DDREG_Req = 0 (0x9000_0020 = 0x0000_0000) (Unlock register R/W from driver)
+	tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	himax_register_write(client, tmp_addr, 1, tmp_data);
+	//himax_sense_on(client, 0x01);
+	return ret_data;
+}
+
+#if 1
+int himax_check_CRC(struct i2c_client *client, int mode)
+{
+	bool burnFW_success = false;
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+	int tmp_value;
+	int CRC_value = 0;
+
+	memset(tmp_data, 0x00, sizeof(tmp_data));
+
+	if (1)
+	{
+		if(mode == fw_image_60k)
+		{
+			himax_sram_write(client, (i_TP_CRC_FW_60K));
+			burnFW_success = himax_sram_verify(client, i_TP_CRC_FW_60K, 0x4000);
+		}
+		else if(mode == fw_image_64k)
+		{
+			himax_sram_write(client, (i_TP_CRC_FW_64K));
+			burnFW_success = himax_sram_verify(client, i_TP_CRC_FW_64K, 0x4000);
+		}
+		else if(mode == fw_image_124k)
+		{
+			himax_sram_write(client, (i_TP_CRC_FW_124K));
+			burnFW_success = himax_sram_verify(client, i_TP_CRC_FW_124K, 0x4000);
+		}
+		else if(mode == fw_image_128k)
+		{
+			himax_sram_write(client, (i_TP_CRC_FW_128K));
+			burnFW_success = himax_sram_verify(client, i_TP_CRC_FW_128K, 0x4000);
+		}
+		if (burnFW_success)
+		{
+			I("%s: Start to do CRC FW mode=%d \n", __func__,mode);
+			himax_sense_on(client, 0x00);	// run CRC firmware
+
+			while(true)
+			{
+				msleep(100);
+
+				tmp_addr[3] = 0x90; 
+				tmp_addr[2] = 0x08; 
+				tmp_addr[1] = 0x80; 
+				tmp_addr[0] = 0x94;
+				himax_register_read(client, tmp_addr, 1, tmp_data);
+
+				I("%s: CRC from firmware is %x, %x, %x, %x \n", __func__,tmp_data[3],
+					tmp_data[2],tmp_data[1],tmp_data[0]);
+
+				if (tmp_data[3] == 0xFF && tmp_data[2] == 0xFF && tmp_data[1] == 0xFF && tmp_data[0] == 0xFF)
+				{ 
+					}
+				else
+					break;
+			}
+
+			CRC_value = tmp_data[3];
+
+			tmp_value = tmp_data[2] << 8;
+			CRC_value += tmp_value;
+
+			tmp_value = tmp_data[1] << 16;
+			CRC_value += tmp_value;
+
+			tmp_value = tmp_data[0] << 24;
+			CRC_value += tmp_value;
+
+			I("%s: CRC Value is %x \n", __func__, CRC_value);
+
+			//Close Remapping
+	        //=====================================
+	        //          Re-map close
+	        //=====================================
+	        tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x00;
+	        tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00;
+	        himax_flash_write_burst_lenth(client, tmp_addr, tmp_data, 4);
+			return CRC_value;				
+		}
+		else
+		{
+			E("%s: SRAM write fail\n", __func__);
+			return 0;
+		}		
+	}
+	else
+		I("%s: NO CRC Check File \n", __func__);
+
+	return 0;
+}
+
+bool Calculate_CRC_with_AP(unsigned char *FW_content , int CRC_from_FW, int mode)
+{
+	uint8_t tmp_data[4];
+	int i, j;
+	int fw_data;
+	int fw_data_2;
+	int CRC = 0xFFFFFFFF;
+	int PolyNomial = 0x82F63B78;
+	int length = 0;
+	
+	if (mode == fw_image_128k)
+		length = 0x8000;
+	else if (mode == fw_image_124k)
+		length = 0x7C00;
+	else if (mode == fw_image_64k)
+		length = 0x4000;
+	else //if (mode == fw_image_60k)
+		length = 0x3C00;
+
+	for (i = 0; i < length; i++)
+	{
+		fw_data = FW_content[i * 4 ];
+		
+		for (j = 1; j < 4; j++)
+		{
+			fw_data_2 = FW_content[i * 4 + j];
+			fw_data += (fw_data_2) << (8 * j);
+		}
+
+		CRC = fw_data ^ CRC;
+
+		for (j = 0; j < 32; j++)
+		{
+			if ((CRC % 2) != 0)
+			{
+				CRC = ((CRC >> 1) & 0x7FFFFFFF) ^ PolyNomial;				
+			}
+			else
+			{
+				CRC = (((CRC >> 1) ^ 0x7FFFFFFF)& 0x7FFFFFFF);				
+			}
+		}
+	}
+
+	I("%s: CRC calculate from bin file is %x \n", __func__, CRC);
+
+	tmp_data[0] = (uint8_t)(CRC >> 24);
+	tmp_data[1] = (uint8_t)(CRC >> 16);
+	tmp_data[2] = (uint8_t)(CRC >> 8);
+	tmp_data[3] = (uint8_t) CRC;
+
+	CRC = tmp_data[0];
+	CRC += tmp_data[1] << 8;			
+	CRC += tmp_data[2] << 16;
+	CRC += tmp_data[3] << 24;
+
+	I("%s: CRC calculate from bin file REVERSE %x \n", __func__, CRC);
+	I("%s: CRC calculate from FWis %x \n", __func__, CRC_from_FW);
+	if (CRC_from_FW == CRC)
+		return true;
+	else
+		return false;
+}
+#endif
+
+int fts_ctpm_fw_upgrade_with_sys_fs_60k(struct i2c_client *client, unsigned char *fw, int len, bool change_iref)
+{
+	int CRC_from_FW = 0;
+	int burnFW_success = 0;
+
+	if (len != 0x10000)   //64k
+    {
+    	E("%s: The file size is not 64K bytes\n", __func__);
+    	return false;
+		}
+	himax_sense_off(client);
+	msleep(500);
+	himax_interface_on(client);
+  if (!himax_sector_erase(client, 0x00000))
+			{
+            E("%s:Sector erase fail!Please restart the IC.\n", __func__);
+            return false;
+      }
+	himax_flash_programming(client, fw, 0x0F000);
+
+	//burnFW_success = himax_83100_Verify(fw, len);
+	//if(burnFW_success==false)
+	//	return burnFW_success;
+
+	CRC_from_FW = himax_check_CRC(client,fw_image_60k);
+	burnFW_success = Calculate_CRC_with_AP(fw, CRC_from_FW,fw_image_60k);
+	//himax_sense_on(client, 0x01);
+	return burnFW_success;
+}
+
+int fts_ctpm_fw_upgrade_with_sys_fs_64k(struct i2c_client *client, unsigned char *fw, int len, bool change_iref)
+{
+	int CRC_from_FW = 0;
+	int burnFW_success = 0;
+
+	if (len != 0x10000)   //64k
+  {
+    	E("%s: The file size is not 64K bytes\n", __func__);
+    	return false;
+	}
+	himax_sense_off(client);
+	msleep(500);
+	himax_interface_on(client);
+	himax_chip_erase(client);
+	himax_flash_programming(client, fw, len);
+
+	//burnFW_success = himax_83100_Verify(fw, len);
+	//if(burnFW_success==false)
+	//	return burnFW_success;
+
+	CRC_from_FW = himax_check_CRC(client,fw_image_64k);
+	burnFW_success = Calculate_CRC_with_AP(fw, CRC_from_FW,fw_image_64k);
+	//himax_sense_on(client, 0x01);
+	return burnFW_success;
+}
+
+int fts_ctpm_fw_upgrade_with_sys_fs_124k(struct i2c_client *client, unsigned char *fw, int len, bool change_iref)
+{
+	int CRC_from_FW = 0;
+	int burnFW_success = 0;
+
+	if (len != 0x20000)   //128k
+  {
+    	E("%s: The file size is not 128K bytes\n", __func__);
+    	return false;
+	}
+	himax_sense_off(client);
+	msleep(500);
+	himax_interface_on(client);
+	if (!himax_block_erase(client))
+    	{
+            E("%s:Block erase fail!Please restart the IC.\n", __func__);
+            return false;
+      }
+
+    if (!himax_sector_erase(client, 0x10000))
+    	{
+            E("%s:Sector erase fail!Please restart the IC.\n", __func__);
+            return false;
+      }
+	himax_flash_programming(client, fw, 0x1F000);
+
+
+	//burnFW_success = himax_83100_Verify(fw, len);
+	//if(burnFW_success==false)
+	//	return burnFW_success;
+
+	CRC_from_FW = himax_check_CRC(client,fw_image_124k);
+	burnFW_success = Calculate_CRC_with_AP(fw, CRC_from_FW,fw_image_124k);
+	//himax_sense_on(client, 0x01);
+	return burnFW_success;
+}
+
+int fts_ctpm_fw_upgrade_with_sys_fs_128k(struct i2c_client *client, unsigned char *fw, int len, bool change_iref)
+{
+	int CRC_from_FW = 0;
+	int burnFW_success = 0;
+
+	if (len != 0x20000)   //128k
+  {
+    	E("%s: The file size is not 128K bytes\n", __func__);
+    	return false;
+	}
+	himax_sense_off(client);
+	msleep(500);
+	himax_interface_on(client);
+	himax_chip_erase(client);
+
+	himax_flash_programming(client, fw, len);
+
+	//burnFW_success = himax_83100_Verify(fw, len);
+	//if(burnFW_success==false)
+	//	return burnFW_success;
+
+	CRC_from_FW = himax_check_CRC(client,fw_image_128k);
+	burnFW_success = Calculate_CRC_with_AP(fw, CRC_from_FW,fw_image_128k);
+	//himax_sense_on(client, 0x01);
+	return burnFW_success;
+}
+
+void himax_touch_information(struct i2c_client *client)
+{
+	uint8_t cmd[4];
+	char data[12] = {0};
+
+	I("%s:IC_TYPE =%d\n", __func__,IC_TYPE);
+
+	if(IC_TYPE == HX_83100_SERIES_PWON)
+	{
+		cmd[3] = 0x08; cmd[2] = 0x00; cmd[1] = 0x00; cmd[0] = 0xF8;
+		himax_register_read(client, cmd, 1, data);
+
+		ic_data->HX_RX_NUM				= data[1];
+		ic_data->HX_TX_NUM				= data[2];
+		ic_data->HX_MAX_PT				= data[3];
+
+		cmd[3] = 0x08; cmd[2] = 0x00; cmd[1] = 0x00; cmd[0] = 0xFC;
+		himax_register_read(client, cmd, 1, data);
+
+	  if((data[1] & 0x04) == 0x04) {
+			ic_data->HX_XY_REVERSE = true;
+	  } else {
+			ic_data->HX_XY_REVERSE = false;
+	  }
+		ic_data->HX_Y_RES = data[3]*256;
+		cmd[3] = 0x08; cmd[2] = 0x00; cmd[1] = 0x01; cmd[0] = 0x00;
+		himax_register_read(client, cmd, 1, data);
+		ic_data->HX_Y_RES = ic_data->HX_Y_RES + data[0];
+		ic_data->HX_X_RES = data[1]*256 + data[2];
+		cmd[3] = 0x08; cmd[2] = 0x00; cmd[1] = 0x00; cmd[0] = 0x8C;
+		himax_register_read(client, cmd, 1, data);
+	  if((data[0] & 0x01) == 1) {
+			ic_data->HX_INT_IS_EDGE = true;
+	  } else {
+			ic_data->HX_INT_IS_EDGE = false;
+	  }
+	  if (ic_data->HX_RX_NUM > 40)
+			ic_data->HX_RX_NUM = 29;
+	  if (ic_data->HX_TX_NUM > 20)
+			ic_data->HX_TX_NUM = 16;
+	  if (ic_data->HX_MAX_PT > 10)
+			ic_data->HX_MAX_PT = 10;
+	  if (ic_data->HX_Y_RES > 2000)
+			ic_data->HX_Y_RES = 1280;
+	  if (ic_data->HX_X_RES > 2000)
+			ic_data->HX_X_RES = 720;
+#ifdef HX_EN_MUT_BUTTON
+		cmd[3] = 0x08; cmd[2] = 0x00; cmd[1] = 0x00; cmd[0] = 0xE8;
+		himax_register_read(client, cmd, 1, data);
+		ic_data->HX_BT_NUM				= data[3];
+#endif
+		I("%s:HX_RX_NUM =%d,HX_TX_NUM =%d,HX_MAX_PT=%d \n", __func__,ic_data->HX_RX_NUM,ic_data->HX_TX_NUM,ic_data->HX_MAX_PT);
+		I("%s:HX_XY_REVERSE =%d,HX_Y_RES =%d,HX_X_RES=%d \n", __func__,ic_data->HX_XY_REVERSE,ic_data->HX_Y_RES,ic_data->HX_X_RES);
+		I("%s:HX_INT_IS_EDGE =%d \n", __func__,ic_data->HX_INT_IS_EDGE);
+	}
+	else
+	{
+		ic_data->HX_RX_NUM				= 0;
+		ic_data->HX_TX_NUM				= 0;
+		ic_data->HX_BT_NUM				= 0;
+		ic_data->HX_X_RES				= 0;
+		ic_data->HX_Y_RES				= 0;
+		ic_data->HX_MAX_PT				= 0;
+		ic_data->HX_XY_REVERSE		= false;
+		ic_data->HX_INT_IS_EDGE		= false;
+	}
+}
+
+void himax_read_FW_ver(struct i2c_client *client)
+{
+	uint8_t cmd[4];
+	uint8_t data[64] = {0};
+
+	//=====================================
+	// Read FW version : 0x0000_E303
+	//=====================================
+	cmd[3] = 0x00; cmd[2] = 0x00; cmd[1] = 0xE3; cmd[0] = 0x00;
+	himax_register_read(client, cmd, 1, data);
+
+	ic_data->vendor_config_ver = data[3]<<8;
+
+	cmd[3] = 0x00; cmd[2] = 0x00; cmd[1] = 0xE3; cmd[0] = 0x04;
+	himax_register_read(client, cmd, 1, data);
+
+	ic_data->vendor_config_ver = data[0] | ic_data->vendor_config_ver;
+	I("CFG_VER : %X \n",ic_data->vendor_config_ver);
+
+	cmd[3] = 0x08; cmd[2] = 0x00; cmd[1] = 0x00; cmd[0] = 0x28;
+	himax_register_read(client, cmd, 1, data);
+
+	ic_data->vendor_fw_ver = data[0]<<8 | data[1];
+	I("FW_VER : %X \n",ic_data->vendor_fw_ver);
+
+
+	return;
+}
+
+bool himax_ic_package_check(struct i2c_client *client)
+{
+#if 0
+	uint8_t cmd[3];
+	uint8_t data[3];
+
+	memset(cmd, 0x00, sizeof(cmd));
+	memset(data, 0x00, sizeof(data));
+
+	if (i2c_himax_read(client, 0xD1, cmd, 3, HIMAX_I2C_RETRY_TIMES) < 0)
+		return false ;
+
+	if (i2c_himax_read(client, 0x31, data, 3, HIMAX_I2C_RETRY_TIMES) < 0)
+		return false;
+
+	if((data[0] == 0x85 && data[1] == 0x29))
+		{
+			IC_TYPE         = HX_85XX_F_SERIES_PWON;
+    	IC_CHECKSUM 		= HX_TP_BIN_CHECKSUM_CRC;
+    	//Himax: Set FW and CFG Flash Address                                          
+    	FW_VER_MAJ_FLASH_ADDR   = 64901;  //0xFD85                              
+    	FW_VER_MAJ_FLASH_LENG   = 1;
+    	FW_VER_MIN_FLASH_ADDR   = 64902;  //0xFD86                                     
+    	FW_VER_MIN_FLASH_LENG   = 1;
+    	CFG_VER_MAJ_FLASH_ADDR 	= 64928;   //0xFDA0         
+    	CFG_VER_MAJ_FLASH_LENG 	= 12;
+    	CFG_VER_MIN_FLASH_ADDR 	= 64940;   //0xFDAC
+    	CFG_VER_MIN_FLASH_LENG 	= 12;
+			I("Himax IC package 852x F\n");
+			}
+	if((data[0] == 0x85 && data[1] == 0x30) || (cmd[0] == 0x05 && cmd[1] == 0x85 && cmd[2] == 0x29))
+		{
+			IC_TYPE 		= HX_85XX_E_SERIES_PWON;
+			IC_CHECKSUM = HX_TP_BIN_CHECKSUM_CRC;
+			//Himax: Set FW and CFG Flash Address                                          
+			FW_VER_MAJ_FLASH_ADDR	= 133;	//0x0085                              
+			FW_VER_MAJ_FLASH_LENG	= 1;                                               
+			FW_VER_MIN_FLASH_ADDR	= 134;  //0x0086                                     
+			FW_VER_MIN_FLASH_LENG	= 1;                                                
+			CFG_VER_MAJ_FLASH_ADDR = 160;	//0x00A0         
+			CFG_VER_MAJ_FLASH_LENG = 12; 	                                 
+			CFG_VER_MIN_FLASH_ADDR = 172;	//0x00AC
+			CFG_VER_MIN_FLASH_LENG = 12;   
+			I("Himax IC package 852x E\n");
+		}
+	else if((data[0] == 0x85 && data[1] == 0x31))
+		{
+			IC_TYPE         = HX_85XX_ES_SERIES_PWON;
+    	IC_CHECKSUM 		= HX_TP_BIN_CHECKSUM_CRC;
+    	//Himax: Set FW and CFG Flash Address                                          
+    	FW_VER_MAJ_FLASH_ADDR   = 133;  //0x0085                              
+    	FW_VER_MAJ_FLASH_LENG   = 1;
+    	FW_VER_MIN_FLASH_ADDR   = 134;  //0x0086                                     
+    	FW_VER_MIN_FLASH_LENG   = 1;
+    	CFG_VER_MAJ_FLASH_ADDR 	= 160;   //0x00A0         
+    	CFG_VER_MAJ_FLASH_LENG 	= 12;
+    	CFG_VER_MIN_FLASH_ADDR 	= 172;   //0x00AC
+    	CFG_VER_MIN_FLASH_LENG 	= 12;
+			I("Himax IC package 852x ES\n");
+			}
+	else if ((data[0] == 0x85 && data[1] == 0x28) || (cmd[0] == 0x04 && cmd[1] == 0x85 &&
+		(cmd[2] == 0x26 || cmd[2] == 0x27 || cmd[2] == 0x28))) {
+		IC_TYPE                = HX_85XX_D_SERIES_PWON;
+		IC_CHECKSUM            = HX_TP_BIN_CHECKSUM_CRC;
+		//Himax: Set FW and CFG Flash Address
+		FW_VER_MAJ_FLASH_ADDR  = 133;                    // 0x0085
+		FW_VER_MAJ_FLASH_LENG  = 1;
+		FW_VER_MIN_FLASH_ADDR  = 134;                    // 0x0086
+		FW_VER_MIN_FLASH_LENG  = 1;
+		CFG_VER_MAJ_FLASH_ADDR = 160;                    // 0x00A0
+		CFG_VER_MAJ_FLASH_LENG = 12;
+		CFG_VER_MIN_FLASH_ADDR = 172;                    // 0x00AC
+		CFG_VER_MIN_FLASH_LENG = 12;
+		I("Himax IC package 852x D\n");
+	} else if ((data[0] == 0x85 && data[1] == 0x23) || (cmd[0] == 0x03 && cmd[1] == 0x85 &&
+			(cmd[2] == 0x26 || cmd[2] == 0x27 || cmd[2] == 0x28 || cmd[2] == 0x29))) {
+		IC_TYPE                = HX_85XX_C_SERIES_PWON;
+		IC_CHECKSUM            = HX_TP_BIN_CHECKSUM_SW;
+		//Himax: Set FW and CFG Flash Address
+		FW_VER_MAJ_FLASH_ADDR  = 133;                   // 0x0085
+		FW_VER_MAJ_FLASH_LENG  = 1;
+		FW_VER_MIN_FLASH_ADDR  = 134;                   // 0x0086
+		FW_VER_MIN_FLASH_LENG  = 1;
+		CFG_VER_MAJ_FLASH_ADDR = 135;                   // 0x0087
+		CFG_VER_MAJ_FLASH_LENG = 12;
+		CFG_VER_MIN_FLASH_ADDR = 147;                   // 0x0093
+		CFG_VER_MIN_FLASH_LENG = 12;
+		I("Himax IC package 852x C\n");
+	} else if ((data[0] == 0x85 && data[1] == 0x26) ||
+		   (cmd[0] == 0x02 && cmd[1] == 0x85 &&
+		   (cmd[2] == 0x19 || cmd[2] == 0x25 || cmd[2] == 0x26))) {
+		IC_TYPE                = HX_85XX_B_SERIES_PWON;
+		IC_CHECKSUM            = HX_TP_BIN_CHECKSUM_SW;
+		//Himax: Set FW and CFG Flash Address
+		FW_VER_MAJ_FLASH_ADDR  = 133;                   // 0x0085
+		FW_VER_MAJ_FLASH_LENG  = 1;
+		FW_VER_MIN_FLASH_ADDR  = 728;                   // 0x02D8
+		FW_VER_MIN_FLASH_LENG  = 1;
+		CFG_VER_MAJ_FLASH_ADDR = 692;                   // 0x02B4
+		CFG_VER_MAJ_FLASH_LENG = 3;
+		CFG_VER_MIN_FLASH_ADDR = 704;                   // 0x02C0
+		CFG_VER_MIN_FLASH_LENG = 3;
+		I("Himax IC package 852x B\n");
+	} else if ((data[0] == 0x85 && data[1] == 0x20) || (cmd[0] == 0x01 &&
+			cmd[1] == 0x85 && cmd[2] == 0x19)) {
+		IC_TYPE     = HX_85XX_A_SERIES_PWON;
+		IC_CHECKSUM = HX_TP_BIN_CHECKSUM_SW;
+		I("Himax IC package 852x A\n");
+	} else {
+		E("Himax IC package incorrect!!\n");
+	}*/
+#else
+		IC_TYPE         = HX_83100_SERIES_PWON;
+    IC_CHECKSUM 		= HX_TP_BIN_CHECKSUM_CRC;
+    //Himax: Set FW and CFG Flash Address
+    FW_VER_MAJ_FLASH_ADDR   = 57384;   //0xE028
+    FW_VER_MAJ_FLASH_LENG   = 1;
+    FW_VER_MIN_FLASH_ADDR   = 57385;   //0xE029
+    FW_VER_MIN_FLASH_LENG   = 1;
+    CFG_VER_MAJ_FLASH_ADDR 	= 58115;  //0xE303
+    CFG_VER_MAJ_FLASH_LENG 	= 1;
+    CFG_VER_MIN_FLASH_ADDR 	= 58116;  //0xE304
+    CFG_VER_MIN_FLASH_LENG 	= 1;
+		I("Himax IC package 83100_in\n");
+
+#endif
+	return true;
+}
+
+void himax_read_event_stack(struct i2c_client *client, 	uint8_t *buf, uint8_t length)
+{
+	uint8_t cmd[4];
+
+	cmd[3] = 0x90; cmd[2] = 0x06; cmd[1] = 0x00; cmd[0] = 0x00;	
+	if ( i2c_himax_write(client, 0x00 ,cmd, 4, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);		
+	}
+
+	cmd[0] = 0x00;		
+	if ( i2c_himax_write(client, 0x0C ,cmd, 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);		
+	}
+
+	i2c_himax_read(client, 0x08, buf, length, HIMAX_I2C_RETRY_TIMES);
+}
+
+#if 0
+static void himax_83100_Flash_Write(uint8_t * reg_byte, uint8_t * write_data)
+{
+	uint8_t tmpbyte[2];
+
+    if ( i2c_himax_write(private_ts->client, 0x00 ,&reg_byte[0], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(private_ts->client, 0x01 ,&reg_byte[1], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(private_ts->client, 0x02 ,&reg_byte[2], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(private_ts->client, 0x03 ,&reg_byte[3], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(private_ts->client, 0x04 ,&write_data[0], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(private_ts->client, 0x05 ,&write_data[1], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(private_ts->client, 0x06 ,&write_data[2], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(private_ts->client, 0x07 ,&write_data[3], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+    if (isBusrtOn == false)
+    {
+        tmpbyte[0] = 0x01;
+		if ( i2c_himax_write(private_ts->client, 0x0C ,&tmpbyte[0], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+		}
+    }
+}
+#endif
+#if 0
+static void himax_83100_Flash_Burst_Write(uint8_t * reg_byte, uint8_t * write_data)
+{
+    //uint8_t tmpbyte[2];
+	int i = 0;
+
+	if ( i2c_himax_write(private_ts->client, 0x00 ,&reg_byte[0], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(private_ts->client, 0x01 ,&reg_byte[1], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(private_ts->client, 0x02 ,&reg_byte[2], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+	if ( i2c_himax_write(private_ts->client, 0x03 ,&reg_byte[3], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		return;
+	}
+
+    // Write 256 bytes with continue burst mode
+    for (i = 0; i < 256; i = i + 4)
+    {
+		if ( i2c_himax_write(private_ts->client, 0x04 ,&write_data[i], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+			return;
+		}
+
+		if ( i2c_himax_write(private_ts->client, 0x05 ,&write_data[i+1], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+			return;
+		}
+
+		if ( i2c_himax_write(private_ts->client, 0x06 ,&write_data[i+2], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+			return;
+		}
+
+		if ( i2c_himax_write(private_ts->client, 0x07 ,&write_data[i+3], 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+			E("%s: i2c access fail!\n", __func__);
+			return;
+		}
+    }
+
+    //if (isBusrtOn == false)
+    //{
+    //   tmpbyte[0] = 0x01;
+	//	if ( i2c_himax_write(private_ts->client, 0x0C ,&tmpbyte[0], 1, 3) < 0) {
+	//	E("%s: i2c access fail!\n", __func__);
+	//	return;
+	//	}
+    //}
+
+}
+#endif
+
+#if 0
+static bool himax_83100_Verify(uint8_t *FW_File, int FW_Size)
+{
+	uint8_t tmp_addr[4];
+	uint8_t tmp_data[4];
+	uint8_t out_buffer[20];
+	uint8_t in_buffer[260];
+
+	int fail_addr=0, fail_cnt=0;
+	int page_prog_start = 0;
+	int i = 0;
+
+	himax_interface_on(private_ts->client);
+	himax_burst_enable(private_ts->client, 0);
+
+	//=====================================
+	// SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780
+	//=====================================
+	tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x10;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x02; tmp_data[1] = 0x07; tmp_data[0] = 0x80;
+	himax_83100_Flash_Write(tmp_addr, tmp_data);
+
+	for (page_prog_start = 0; page_prog_start < FW_Size; page_prog_start = page_prog_start + 256)
+	{
+		//=================================
+		// SPI Transfer Control
+		// Set 256 bytes page read : 0x8000_0020 ==> 0x6940_02FF
+		// Set read start address  : 0x8000_0028 ==> 0x0000_0000
+		// Set command			   : 0x8000_0024 ==> 0x0000_003B
+		//=================================
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20;
+		tmp_data[3] = 0x69; tmp_data[2] = 0x40; tmp_data[1] = 0x02; tmp_data[0] = 0xFF;
+		himax_83100_Flash_Write(tmp_addr, tmp_data);
+
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x28;
+		if (page_prog_start < 0x100)
+		{
+			tmp_data[3] = 0x00;
+			tmp_data[2] = 0x00;
+			tmp_data[1] = 0x00;
+			tmp_data[0] = (uint8_t)page_prog_start;
+		}
+		else if (page_prog_start >= 0x100 && page_prog_start < 0x10000)
+		{
+			tmp_data[3] = 0x00;
+			tmp_data[2] = 0x00;
+			tmp_data[1] = (uint8_t)(page_prog_start >> 8);
+			tmp_data[0] = (uint8_t)page_prog_start;
+		}
+		else if (page_prog_start >= 0x10000 && page_prog_start < 0x1000000)
+		{
+			tmp_data[3] = 0x00;
+			tmp_data[2] = (uint8_t)(page_prog_start >> 16);
+			tmp_data[1] = (uint8_t)(page_prog_start >> 8);
+			tmp_data[0] = (uint8_t)page_prog_start;
+		}
+		himax_83100_Flash_Write(tmp_addr, tmp_data);
+
+		tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x24;
+		tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x3B;
+		himax_83100_Flash_Write(tmp_addr, tmp_data);
+
+		//==================================
+		// AHB_I2C Burst Read
+		// Set SPI data register : 0x8000_002C ==> 0x00
+		//==================================
+		out_buffer[0] = 0x2C;
+		out_buffer[1] = 0x00;
+		out_buffer[2] = 0x00;
+		out_buffer[3] = 0x80;
+		i2c_himax_write(private_ts->client, 0x00 ,out_buffer, 4, HIMAX_I2C_RETRY_TIMES);
+
+		//==================================
+		// Read access : 0x0C ==> 0x00
+		//==================================
+		out_buffer[0] = 0x00;
+		i2c_himax_write(private_ts->client, 0x0C ,out_buffer, 1, HIMAX_I2C_RETRY_TIMES);
+
+		//==================================
+		// Read 128 bytes two times
+		//==================================
+		i2c_himax_read(private_ts->client, 0x08 ,in_buffer, 128, HIMAX_I2C_RETRY_TIMES);
+		for (i = 0; i < 128; i++)
+			flash_buffer[i + page_prog_start] = in_buffer[i];
+
+		i2c_himax_read(private_ts->client, 0x08 ,in_buffer, 128, HIMAX_I2C_RETRY_TIMES);
+		for (i = 0; i < 128; i++)
+			flash_buffer[(i + 128) + page_prog_start] = in_buffer[i];
+
+		//tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x2C;
+		//himax_register_read(tmp_addr, 32, out in_buffer);
+		//for (int i = 0; i < 128; i++)
+		//	  flash_buffer[i + page_prog_start] = in_buffer[i];
+		//tmp_addr[3] = 0x80; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x2C;
+		//himax_register_read(tmp_addr, 32, out in_buffer);
+		//for (int i = 0; i < 128; i++)
+		//	  flash_buffer[i + page_prog_start] = in_buffer[i];
+
+		I("%s:Verify Progress: %x\n", __func__, page_prog_start);
+	}
+
+	fail_cnt = 0;
+	for (i = 0; i < FW_Size; i++)
+	{
+		if (FW_File[i] != flash_buffer[i])
+		{
+			if (fail_cnt == 0)
+				fail_addr = i;
+
+			fail_cnt++;
+			//E("%s Fail Block:%x\n", __func__, i);
+			//return false;
+		}
+	}
+	if (fail_cnt > 0)
+	{
+		E("%s:Start Fail Block:%x and fail block count=%x\n" , __func__,fail_addr,fail_cnt);
+		return false;
+	}
+
+	I("%s:Byte read verify pass.\n", __func__);
+	return true;
+
+}
+#endif
+
+void himax_get_DSRAM_data(struct i2c_client *client, uint8_t *info_data)
+{
+	int i;
+	int cnt = 0;
+	unsigned char tmp_addr[4];
+	unsigned char tmp_data[4];
+  uint8_t max_i2c_size = 32;
+	int total_size = ic_data->HX_TX_NUM * ic_data->HX_RX_NUM * 2;
+	int total_size_4bytes = total_size / 4;
+	int total_read_times = 0;
+	unsigned long address = 0x08000468;
+  tmp_addr[3] = 0x08; tmp_addr[2] = 0x00; tmp_addr[1] = 0x04; tmp_addr[0] = 0x64;
+	tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x5A; tmp_data[0] = 0xA5;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+  do
+	{
+		cnt++;
+		himax_register_read(client, tmp_addr, 1, tmp_data);
+		usleep_range(10000, 20000);
+	} while ((tmp_data[1] != 0xA5 || tmp_data[0] != 0x5A) && cnt < 100);
+  tmp_addr[3] = 0x08; tmp_addr[2] = 0x00; tmp_addr[1] = 0x04; tmp_addr[0] = 0x68;
+  if (total_size_4bytes % max_i2c_size == 0)
+	{
+		total_read_times = total_size_4bytes / max_i2c_size;
+	}
+	else
+	{
+		total_read_times = total_size_4bytes / max_i2c_size + 1;
+	}
+	for (i = 0; i < (total_read_times); i++)
+	{
+		if ( total_size_4bytes >= max_i2c_size)
+		{
+			himax_register_read(client, tmp_addr, max_i2c_size, &info_data[i*max_i2c_size*4]);
+			total_size_4bytes = total_size_4bytes - max_i2c_size;
+		}
+		else
+		{
+			himax_register_read(client, tmp_addr, total_size_4bytes % max_i2c_size, &info_data[i*max_i2c_size*4]);
+		}
+		address += max_i2c_size*4;
+		tmp_addr[1] = (uint8_t)((address>>8)&0x00FF);
+		tmp_addr[0] = (uint8_t)((address)&0x00FF);
+	}
+	tmp_addr[3] = 0x08; tmp_addr[2] = 0x00; tmp_addr[1] = 0x04; tmp_addr[0] = 0x64;
+	tmp_data[3] = 0x11; tmp_data[2] = 0x22; tmp_data[1] = 0x33; tmp_data[0] = 0x44;
+	himax_flash_write_burst(client, tmp_addr, tmp_data);
+}
+//ts_work
+int cal_data_len(int raw_cnt_rmd, int HX_MAX_PT, int raw_cnt_max){
+	int RawDataLen;
+	if (raw_cnt_rmd != 0x00) {
+		RawDataLen = 124 - ((HX_MAX_PT+raw_cnt_max+3)*4) - 1;
+	}else{
+		RawDataLen = 124 - ((HX_MAX_PT+raw_cnt_max+2)*4) - 1;
+	}
+	return RawDataLen;
+}
+
+bool read_event_stack(struct i2c_client *client, uint8_t *buf, int length)
+{
+	uint8_t cmd[4];
+
+	if(length > 56)
+		length = 124;
+	//=====================
+	//AHB I2C Burst Read
+	//=====================
+	cmd[0] = 0x31;
+	if ( i2c_himax_write(client, 0x13 ,cmd, 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		goto err_workqueue_out;
+	}
+
+	cmd[0] = 0x10;
+	if ( i2c_himax_write(client, 0x0D ,cmd, 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		goto err_workqueue_out;
+	}
+	//=====================
+	//Read event stack
+	//=====================
+	cmd[3] = 0x90; cmd[2] = 0x06; cmd[1] = 0x00; cmd[0] = 0x00;
+	if ( i2c_himax_write(client, 0x00 ,cmd, 4, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		goto err_workqueue_out;
+	}
+
+	cmd[0] = 0x00;
+	if ( i2c_himax_write(client, 0x0C ,cmd, 1, HIMAX_I2C_RETRY_TIMES) < 0) {
+		E("%s: i2c access fail!\n", __func__);
+		goto err_workqueue_out;
+	}
+	i2c_himax_read(client, 0x08, buf, length,HIMAX_I2C_RETRY_TIMES);
+	return 1;
+	
+	err_workqueue_out:
+	return 0;
+}
+
+bool post_read_event_stack(struct i2c_client *client)
+{
+	return 1;
+}
+bool diag_check_sum( uint8_t hx_touch_info_size, uint8_t *buf) //return checksum value
+{
+	uint16_t check_sum_cal = 0;
+	int i;
+
+	//Check 124th byte CRC
+	for (i = hx_touch_info_size, check_sum_cal = 0; i < 124; i=i+2)
+	{
+		check_sum_cal += (buf[i+1]*256 + buf[i]);
+	}
+	if (check_sum_cal % 0x10000 != 0)
+	{
+		I("%s: diag check sum fail! check_sum_cal=%X, hx_touch_info_size=%d, \n",__func__,check_sum_cal, hx_touch_info_size);
+		return 0;
+	}
+	return 1;
+}
+
+
+void diag_parse_raw_data(int hx_touch_info_size, int RawDataLen, int mul_num, int self_num, uint8_t *buf, uint8_t diag_cmd, int16_t *mutual_data, int16_t *self_data)
+{
+	int RawDataLen_word;
+	int index = 0;
+	int temp1, temp2,i;
+	
+	if (buf[hx_touch_info_size] == 0x3A && buf[hx_touch_info_size+1] == 0xA3 && buf[hx_touch_info_size+2] > 0 && buf[hx_touch_info_size+3] == diag_cmd+5 )
+	{
+			RawDataLen_word = RawDataLen/2;
+			index = (buf[hx_touch_info_size+2] - 1) * RawDataLen_word;
+		//I("Header[%d]: %x, %x, %x, %x, mutual: %d, self: %d\n", index, buf[56], buf[57], buf[58], buf[59], mul_num, self_num);
+			for (i = 0; i < RawDataLen_word; i++)
+		{
+			temp1 = index + i;
+
+			if (temp1 < mul_num)
+			{ //mutual
+					mutual_data[index + i] = buf[i*2 + hx_touch_info_size+4+1]*256 + buf[i*2 + hx_touch_info_size+4];	//4: RawData Header, 1:HSB
+			}
+			else
+			{//self
+				temp1 = i + index;
+				temp2 = self_num + mul_num;
+				
+				if (temp1 >= temp2)
+				{
+					break;
+				}
+
+					self_data[i+index-mul_num] = buf[i*2 + hx_touch_info_size+4];	//4: RawData Header
+					self_data[i+index-mul_num+1] = buf[i*2 + hx_touch_info_size+4+1];
+			}
+		}
+	}
+	else
+	{
+		I("[HIMAX TP MSG]%s: header format is wrong!\n", __func__);
+			I("Header[%d]: %x, %x, %x, %x, mutual: %d, self: %d\n", index, buf[56], buf[57], buf[58], buf[59], mul_num, self_num);
+	}
+}
diff --git a/drivers/input/touchscreen/hxchipset/himax_ic.h b/drivers/input/touchscreen/hxchipset/himax_ic.h
new file mode 100644
index 0000000..18cd12b
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/himax_ic.h
@@ -0,0 +1,82 @@
+/* Himax Android Driver Sample Code for HMX83100 chipset
+*
+* Copyright (C) 2015 Himax Corporation.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#include "himax_platform.h"
+#include "himax_common.h"
+
+#include <linux/slab.h>
+
+
+#define HX_85XX_A_SERIES_PWON		1
+#define HX_85XX_B_SERIES_PWON		2
+#define HX_85XX_C_SERIES_PWON		3
+#define HX_85XX_D_SERIES_PWON		4
+#define HX_85XX_E_SERIES_PWON		5
+#define HX_85XX_ES_SERIES_PWON		6
+#define HX_85XX_F_SERIES_PWON		7
+#define HX_83100_SERIES_PWON		8
+
+#define HX_TP_BIN_CHECKSUM_SW		1
+#define HX_TP_BIN_CHECKSUM_HW		2
+#define HX_TP_BIN_CHECKSUM_CRC	3
+
+enum fw_image_type {
+	fw_image_60k	= 0x01,
+	fw_image_64k,
+	fw_image_124k,
+	fw_image_128k,
+};
+
+int himax_hand_shaking(struct i2c_client *client);
+void himax_set_SMWP_enable(struct i2c_client *client,uint8_t SMWP_enable);
+void himax_get_SMWP_enable(struct i2c_client *client,uint8_t *tmp_data);
+void himax_set_HSEN_enable(struct i2c_client *client,uint8_t HSEN_enable);
+void himax_get_HSEN_enable(struct i2c_client *client,uint8_t *tmp_data);
+void himax_diag_register_set(struct i2c_client *client, uint8_t diag_command);
+void himax_flash_dump_func(struct i2c_client *client, uint8_t local_flash_command, int Flash_Size, uint8_t *flash_buffer);
+int himax_chip_self_test(struct i2c_client *client);
+int himax_burst_enable(struct i2c_client *client, uint8_t auto_add_4_byte); ////himax_83100_BURST_INC0_EN
+void himax_register_read(struct i2c_client *client, uint8_t *read_addr, int read_length, uint8_t *read_data); ////RegisterRead83100
+void himax_flash_read(struct i2c_client *client, uint8_t *reg_byte, uint8_t *read_data); ////himax_83100_Flash_Read
+void himax_flash_write_burst(struct i2c_client *client, uint8_t * reg_byte, uint8_t * write_data); ////himax_83100_Flash_Write_Burst
+int himax_flash_write_burst_lenth(struct i2c_client *client, uint8_t *reg_byte, uint8_t *write_data, int length); ////himax_83100_Flash_Write_Burst_lenth
+int himax_register_write(struct i2c_client *client, uint8_t *write_addr, int write_length, uint8_t *write_data); ////RegisterWrite83100
+void himax_sense_off(struct i2c_client *client); ////himax_83100_SenseOff
+void himax_interface_on(struct i2c_client *client); ////himax_83100_Interface_on
+bool wait_wip(struct i2c_client *client, int Timing);
+void himax_sense_on(struct i2c_client *client, uint8_t FlashMode); ////himax_83100_SenseOn
+void himax_chip_erase(struct i2c_client *client); ////himax_83100_Chip_Erase
+bool himax_block_erase(struct i2c_client *client); ////himax_83100_Block_Erase
+bool himax_sector_erase(struct i2c_client *client, int start_addr); ////himax_83100_Sector_Erase
+void himax_sram_write(struct i2c_client *client, uint8_t *FW_content); ////himax_83100_Sram_Write
+bool himax_sram_verify(struct i2c_client *client, uint8_t *FW_File, int FW_Size); ////himax_83100_Sram_Verify
+void himax_flash_programming(struct i2c_client *client, uint8_t *FW_content, int FW_Size); ////himax_83100_Flash_Programming
+bool himax_check_chip_version(struct i2c_client *client); ////himax_83100_CheckChipVersion
+int himax_check_CRC(struct i2c_client *client, int mode); ////himax_83100_Check_CRC
+bool Calculate_CRC_with_AP(unsigned char *FW_content , int CRC_from_FW, int mode);
+int fts_ctpm_fw_upgrade_with_sys_fs_60k(struct i2c_client *client, unsigned char *fw, int len, bool change_iref);
+int fts_ctpm_fw_upgrade_with_sys_fs_64k(struct i2c_client *client, unsigned char *fw, int len, bool change_iref);
+int fts_ctpm_fw_upgrade_with_sys_fs_124k(struct i2c_client *client, unsigned char *fw, int len, bool change_iref);
+int fts_ctpm_fw_upgrade_with_sys_fs_128k(struct i2c_client *client, unsigned char *fw, int len, bool change_iref);
+void himax_touch_information(struct i2c_client *client);
+void himax_read_FW_ver(struct i2c_client *client);
+bool himax_ic_package_check(struct i2c_client *client);
+void himax_read_event_stack(struct i2c_client *client, uint8_t *buf, uint8_t length);
+int cal_data_len(int raw_cnt_rmd, int HX_MAX_PT, int raw_cnt_max);
+bool read_event_stack(struct i2c_client *client, uint8_t *buf_ts, int length);
+bool post_read_event_stack(struct i2c_client *client);
+bool diag_check_sum( uint8_t hx_touch_info_size, uint8_t *buf_ts); //return checksum value
+void diag_parse_raw_data(int hx_touch_info_size, int RawDataLen, int mul_num, int self_num, uint8_t *buf_ts, uint8_t diag_cmd, int16_t *mutual_data,	int16_t *self_data);
+void himax_get_DSRAM_data(struct i2c_client *client, uint8_t *info_data);
\ No newline at end of file
diff --git a/drivers/input/touchscreen/hxchipset/himax_platform.c b/drivers/input/touchscreen/hxchipset/himax_platform.c
new file mode 100644
index 0000000..7e8a1d6
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/himax_platform.c
@@ -0,0 +1,796 @@
+/* Himax Android Driver Sample Code for HIMAX chipset
+*
+* Copyright (C) 2015 Himax Corporation.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#include "himax_platform.h"
+#include "himax_common.h"
+
+int irq_enable_count = 0;
+#ifdef HX_SMART_WAKEUP
+#define TS_WAKE_LOCK_TIMEOUT		(2 * HZ)
+#endif
+
+#define PINCTRL_STATE_ACTIVE	"pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND	"pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE	"pmx_ts_release"
+
+extern struct himax_ic_data* ic_data;
+extern void himax_ts_work(struct himax_ts_data *ts);
+extern enum hrtimer_restart himax_ts_timer_func(struct hrtimer *timer);
+extern int himax_ts_init(struct himax_ts_data *ts);
+
+extern int tp_rst_gpio;
+
+#ifdef HX_TP_PROC_DIAG
+extern uint8_t getDiagCommand(void);
+#endif
+
+void himax_vk_parser(struct device_node *dt,
+				struct himax_i2c_platform_data *pdata)
+{
+	u32 data = 0;
+	uint8_t cnt = 0, i = 0;
+	uint32_t coords[4] = {0};
+	struct device_node *node, *pp = NULL;
+	struct himax_virtual_key *vk;
+
+	node = of_parse_phandle(dt, "virtualkey", 0);
+	if (node == NULL) {
+		I(" DT-No vk info in DT");
+		return;
+	} else {
+		while ((pp = of_get_next_child(node, pp)))
+			cnt++;
+		if (!cnt)
+			return;
+
+		vk = kzalloc(cnt * (sizeof *vk), GFP_KERNEL);
+		if (!vk)
+			return;
+		pp = NULL;
+		while ((pp = of_get_next_child(node, pp))) {
+			if (of_property_read_u32(pp, "idx", &data) == 0)
+				vk[i].index = data;
+			if (of_property_read_u32_array(pp, "range", coords, 4) == 0) {
+				vk[i].x_range_min = coords[0], vk[i].x_range_max = coords[1];
+				vk[i].y_range_min = coords[2], vk[i].y_range_max = coords[3];
+			} else
+				I(" range faile");
+			i++;
+		}
+		pdata->virtual_key = vk;
+		for (i = 0; i < cnt; i++)
+			I(" vk[%d] idx:%d x_min:%d, y_max:%d", i,pdata->virtual_key[i].index,
+				pdata->virtual_key[i].x_range_min, pdata->virtual_key[i].y_range_max);
+	}
+}
+
+int himax_parse_dt(struct himax_ts_data *ts,
+				struct himax_i2c_platform_data *pdata)
+{
+	int rc, coords_size = 0;
+	uint32_t coords[4] = {0};
+	struct property *prop;
+	struct device_node *dt = ts->client->dev.of_node;
+	u32 data = 0;
+
+	prop = of_find_property(dt, "himax,panel-coords", NULL);
+	if (prop) {
+		coords_size = prop->length / sizeof(u32);
+		if (coords_size != 4)
+			D(" %s:Invalid panel coords size %d", __func__, coords_size);
+	}
+
+	if (of_property_read_u32_array(dt, "himax,panel-coords", coords, coords_size) == 0) {
+		pdata->abs_x_min = coords[0], pdata->abs_x_max = coords[1];
+		pdata->abs_y_min = coords[2], pdata->abs_y_max = coords[3];
+		I(" DT-%s:panel-coords = %d, %d, %d, %d\n", __func__, pdata->abs_x_min,
+				pdata->abs_x_max, pdata->abs_y_min, pdata->abs_y_max);
+	}
+
+	prop = of_find_property(dt, "himax,display-coords", NULL);
+	if (prop) {
+		coords_size = prop->length / sizeof(u32);
+		if (coords_size != 4)
+			D(" %s:Invalid display coords size %d", __func__, coords_size);
+	}
+	rc = of_property_read_u32_array(dt, "himax,display-coords", coords, coords_size);
+	if (rc && (rc != -EINVAL)) {
+		D(" %s:Fail to read display-coords %d\n", __func__, rc);
+		return rc;
+	}
+	pdata->screenWidth  = coords[1];
+	pdata->screenHeight = coords[3];
+	I(" DT-%s:display-coords = (%d, %d)", __func__, pdata->screenWidth,
+		pdata->screenHeight);
+
+	pdata->gpio_irq = of_get_named_gpio(dt, "himax,irq-gpio", 0);
+	if (!gpio_is_valid(pdata->gpio_irq)) {
+		I(" DT:gpio_irq value is not valid\n");
+	}
+
+	pdata->gpio_reset = of_get_named_gpio(dt, "himax,rst-gpio", 0);
+	if (!gpio_is_valid(pdata->gpio_reset)) {
+		I(" DT:gpio_rst value is not valid\n");
+	}
+	pdata->gpio_3v3_en = of_get_named_gpio(dt, "himax,3v3-gpio", 0);
+	if (!gpio_is_valid(pdata->gpio_3v3_en)) {
+		I(" DT:gpio_3v3_en value is not valid\n");
+	}
+	I(" DT:gpio_irq=%d, gpio_rst=%d, gpio_3v3_en=%d", pdata->gpio_irq, pdata->gpio_reset, pdata->gpio_3v3_en);
+
+	if (of_property_read_u32(dt, "report_type", &data) == 0) {
+		pdata->protocol_type = data;
+		I(" DT:protocol_type=%d", pdata->protocol_type);
+	}
+
+	himax_vk_parser(dt, pdata);
+
+	return 0;
+}
+
+int i2c_himax_read(struct i2c_client *client, uint8_t command, uint8_t *data, uint8_t length, uint8_t toRetry)
+{
+	int retry;
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &command,
+		},
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = length,
+			.buf = data,
+		}
+	};
+
+	for (retry = 0; retry < toRetry; retry++) {
+		if (i2c_transfer(client->adapter, msg, 2) == 2)
+			break;
+		msleep(20);
+	}
+	if (retry == toRetry) {
+		E("%s: i2c_read_block retry over %d\n",
+			__func__, toRetry);
+		return -EIO;
+	}
+	return 0;
+
+}
+
+int i2c_himax_write(struct i2c_client *client, uint8_t command, uint8_t *data, uint8_t length, uint8_t toRetry)
+{
+	int retry/*, loop_i*/;
+	uint8_t buf[length + 1];
+
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = length + 1,
+			.buf = buf,
+		}
+	};
+
+	buf[0] = command;
+	memcpy(buf+1, data, length);
+	
+	for (retry = 0; retry < toRetry; retry++) {
+		if (i2c_transfer(client->adapter, msg, 1) == 1)
+			break;
+		msleep(20);
+	}
+
+	if (retry == toRetry) {
+		E("%s: i2c_write_block retry over %d\n",
+			__func__, toRetry);
+		return -EIO;
+	}
+	return 0;
+
+}
+
+int i2c_himax_read_command(struct i2c_client *client, uint8_t length, uint8_t *data, uint8_t *readlength, uint8_t toRetry)
+{
+	int retry;
+	struct i2c_msg msg[] = {
+		{
+		.addr = client->addr,
+		.flags = I2C_M_RD,
+		.len = length,
+		.buf = data,
+		}
+	};
+
+	for (retry = 0; retry < toRetry; retry++) {
+		if (i2c_transfer(client->adapter, msg, 1) == 1)
+			break;
+		msleep(20);
+	}
+	if (retry == toRetry) {
+		E("%s: i2c_read_block retry over %d\n",
+		       __func__, toRetry);
+		return -EIO;
+	}
+	return 0;
+}
+
+int i2c_himax_write_command(struct i2c_client *client, uint8_t command, uint8_t toRetry)
+{
+	return i2c_himax_write(client, command, NULL, 0, toRetry);
+}
+
+int i2c_himax_master_write(struct i2c_client *client, uint8_t *data, uint8_t length, uint8_t toRetry)
+{
+	int retry/*, loop_i*/;
+	uint8_t buf[length];
+
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = length,
+			.buf = buf,
+		}
+	};
+
+	memcpy(buf, data, length);
+	
+	for (retry = 0; retry < toRetry; retry++) {
+		if (i2c_transfer(client->adapter, msg, 1) == 1)
+			break;
+		msleep(20);
+	}
+
+	if (retry == toRetry) {
+		E("%s: i2c_write_block retry over %d\n",
+		       __func__, toRetry);
+		return -EIO;
+	}
+	return 0;
+}
+
+void himax_int_enable(int irqnum, int enable)
+{
+	if (enable == 1 && irq_enable_count == 0) {
+		enable_irq(irqnum);
+		irq_enable_count++;
+	} else if (enable == 0 && irq_enable_count == 1) {
+		disable_irq_nosync(irqnum);
+		irq_enable_count--;
+	}
+	I("irq_enable_count = %d", irq_enable_count);
+}
+
+void himax_rst_gpio_set(int pinnum, uint8_t value)
+{
+	gpio_direction_output(pinnum, value);
+}
+
+uint8_t himax_int_gpio_read(int pinnum)
+{
+	return gpio_get_value(pinnum);
+}
+
+#if defined(CONFIG_HMX_DB)
+static int himax_regulator_configure(struct i2c_client *client,struct himax_i2c_platform_data *pdata)
+{
+    int retval;
+    pdata->vcc_dig = regulator_get(&client->dev,
+                                   "vdd");
+    if (IS_ERR(pdata->vcc_dig))
+    {
+        E("%s: Failed to get regulator vdd\n",
+          __func__);
+        retval = PTR_ERR(pdata->vcc_dig);
+        return retval;
+    }
+    pdata->vcc_ana = regulator_get(&client->dev,
+                                   "avdd");
+    if (IS_ERR(pdata->vcc_ana))
+    {
+        E("%s: Failed to get regulator avdd\n",
+          __func__);
+        retval = PTR_ERR(pdata->vcc_ana);
+        regulator_put(pdata->vcc_ana);
+        return retval;
+    }
+
+    return 0;
+};
+
+static int himax_power_on(struct himax_i2c_platform_data *pdata, bool on)
+{
+    int retval;
+
+    if (on)
+    {
+        retval = regulator_enable(pdata->vcc_dig);
+        if (retval)
+        {
+            E("%s: Failed to enable regulator vdd\n",
+              __func__);
+            return retval;
+        }
+        msleep(100);
+        retval = regulator_enable(pdata->vcc_ana);
+        if (retval)
+        {
+            E("%s: Failed to enable regulator avdd\n",
+              __func__);
+            regulator_disable(pdata->vcc_dig);
+            return retval;
+        }
+    }
+    else
+    {
+        regulator_disable(pdata->vcc_dig);
+        regulator_disable(pdata->vcc_ana);
+    }
+
+    return 0;
+}
+
+int himax_ts_pinctrl_init(struct himax_ts_data *ts)
+{
+	int retval;
+
+	/* Get pinctrl if target uses pinctrl */
+	ts->ts_pinctrl = devm_pinctrl_get(&(ts->client->dev));
+	if (IS_ERR_OR_NULL(ts->ts_pinctrl)) {
+		retval = PTR_ERR(ts->ts_pinctrl);
+		dev_dbg(&ts->client->dev,
+			"Target does not use pinctrl %d\n", retval);
+		goto err_pinctrl_get;
+	}
+
+	ts->pinctrl_state_active
+		= pinctrl_lookup_state(ts->ts_pinctrl,
+				PINCTRL_STATE_ACTIVE);
+	if (IS_ERR_OR_NULL(ts->pinctrl_state_active)) {
+		retval = PTR_ERR(ts->pinctrl_state_active);
+		dev_err(&ts->client->dev,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_ACTIVE, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	ts->pinctrl_state_suspend
+		= pinctrl_lookup_state(ts->ts_pinctrl,
+			PINCTRL_STATE_SUSPEND);
+	if (IS_ERR_OR_NULL(ts->pinctrl_state_suspend)) {
+		retval = PTR_ERR(ts->pinctrl_state_suspend);
+		dev_err(&ts->client->dev,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_SUSPEND, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	ts->pinctrl_state_release
+		= pinctrl_lookup_state(ts->ts_pinctrl,
+			PINCTRL_STATE_RELEASE);
+	if (IS_ERR_OR_NULL(ts->pinctrl_state_release)) {
+		retval = PTR_ERR(ts->pinctrl_state_release);
+		dev_dbg(&ts->client->dev,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_RELEASE, retval);
+	}
+
+	return 0;
+
+err_pinctrl_lookup:
+	devm_pinctrl_put(ts->ts_pinctrl);
+err_pinctrl_get:
+	ts->ts_pinctrl = NULL;
+	return retval;
+}
+
+int himax_gpio_power_config(struct i2c_client *client,struct himax_i2c_platform_data *pdata)
+{
+    int error;
+
+    error = himax_regulator_configure(client, pdata);
+    if (error)
+    {
+        E("Failed to intialize hardware\n");
+        goto err_regulator_not_on;
+    }
+
+#ifdef HX_RST_PIN_FUNC
+    if (gpio_is_valid(pdata->gpio_reset))
+    {
+        /* configure touchscreen reset out gpio */
+        error = gpio_request(pdata->gpio_reset, "hmx_reset_gpio");
+        if (error)
+        {
+            E("unable to request gpio [%d]\n",
+              pdata->gpio_reset);
+            goto err_regulator_on;
+        }
+
+        error = gpio_direction_output(pdata->gpio_reset, 0);
+        if (error)
+        {
+            E("unable to set direction for gpio [%d]\n",
+              pdata->gpio_reset);
+            goto err_gpio_reset_req;
+        }
+    }
+#endif
+
+    error = himax_power_on(pdata, true);
+    if (error)
+    {
+        E("Failed to power on hardware\n");
+        goto err_gpio_reset_req;
+    }
+#ifdef HX_IRQ_PIN_FUNC
+    if (gpio_is_valid(pdata->gpio_irq))
+    {
+        /* configure touchscreen irq gpio */
+        error = gpio_request(pdata->gpio_irq, "hmx_gpio_irq");
+        if (error)
+        {
+            E("unable to request gpio [%d]\n",
+              pdata->gpio_irq);
+            goto err_power_on;
+        }
+        error = gpio_direction_input(pdata->gpio_irq);
+        if (error)
+        {
+            E("unable to set direction for gpio [%d]\n",
+              pdata->gpio_irq);
+            goto err_gpio_irq_req;
+        }
+        client->irq = gpio_to_irq(pdata->gpio_irq);
+    }
+    else
+    {
+        E("irq gpio not provided\n");
+        goto err_power_on;
+    }
+#endif
+
+    msleep(20);
+
+#ifdef HX_RST_PIN_FUNC
+    if (gpio_is_valid(pdata->gpio_reset))
+    {
+        error = gpio_direction_output(pdata->gpio_reset, 1);
+        if (error)
+        {
+            E("unable to set direction for gpio [%d]\n",
+              pdata->gpio_reset);
+            goto err_gpio_irq_req;
+        }
+    }
+#endif
+    return 0;
+#ifdef HX_RST_PIN_FUNC
+	err_gpio_irq_req:
+#endif
+#ifdef HX_IRQ_PIN_FUNC
+    if (gpio_is_valid(pdata->gpio_irq))
+        gpio_free(pdata->gpio_irq);
+	err_power_on:
+#endif
+    himax_power_on(pdata, false);
+	err_gpio_reset_req:
+#ifdef HX_RST_PIN_FUNC
+    if (gpio_is_valid(pdata->gpio_reset))
+        gpio_free(pdata->gpio_reset);
+	err_regulator_on:
+#endif
+	err_regulator_not_on:
+
+    return error;
+}
+
+#else
+int himax_gpio_power_config(struct i2c_client *client,struct himax_i2c_platform_data *pdata)
+{
+		int error=0;
+	
+#ifdef HX_RST_PIN_FUNC
+		if (pdata->gpio_reset >= 0)
+		{
+			error = gpio_request(pdata->gpio_reset, "himax-reset");
+			if (error < 0)
+			{
+				E("%s: request reset pin failed\n", __func__);
+				return error;
+			}
+			error = gpio_direction_output(pdata->gpio_reset, 0);
+			if (error)
+			{
+				E("unable to set direction for gpio [%d]\n",
+				  pdata->gpio_reset);
+				return error;
+			}
+		}
+#endif
+		if (pdata->gpio_3v3_en >= 0)
+		{
+			error = gpio_request(pdata->gpio_3v3_en, "himax-3v3_en");
+			if (error < 0)
+			{
+				E("%s: request 3v3_en pin failed\n", __func__);
+				return error;
+			}
+			gpio_direction_output(pdata->gpio_3v3_en, 1);
+			I("3v3_en pin =%d\n", gpio_get_value(pdata->gpio_3v3_en));
+		}
+
+#ifdef HX_IRQ_PIN_FUNC
+		if (gpio_is_valid(pdata->gpio_irq))
+		{
+			/* configure touchscreen irq gpio */
+			error = gpio_request(pdata->gpio_irq, "himax_gpio_irq");
+			if (error)
+			{
+				E("unable to request gpio [%d]\n",pdata->gpio_irq);
+				return error;
+			}
+			error = gpio_direction_input(pdata->gpio_irq);
+			if (error)
+			{
+				E("unable to set direction for gpio [%d]\n",pdata->gpio_irq);
+				return error;
+			}
+			client->irq = gpio_to_irq(pdata->gpio_irq);
+		}
+		else
+		{
+			E("irq gpio not provided\n");
+			return error;
+		}
+#endif
+
+		msleep(20);
+	
+#ifdef HX_RST_PIN_FUNC
+		if (pdata->gpio_reset >= 0)
+		{
+			error = gpio_direction_output(pdata->gpio_reset, 1);
+			if (error)
+			{
+				E("unable to set direction for gpio [%d]\n",
+				  pdata->gpio_reset);
+				return error;
+			}
+		}
+		msleep(20);
+#endif
+	
+		return error;
+	}
+#endif
+
+static void himax_ts_isr_func(struct himax_ts_data *ts)
+{
+	himax_ts_work(ts);
+}
+
+irqreturn_t himax_ts_thread(int irq, void *ptr)
+{
+	uint8_t diag_cmd;
+	struct himax_ts_data *ts = ptr;
+	struct timespec timeStart, timeEnd, timeDelta;
+
+	diag_cmd = getDiagCommand();
+
+	if (ts->debug_log_level & BIT(2)) {
+			getnstimeofday(&timeStart);
+			usleep_range(5000, 7000);
+			//I(" Irq start time = %ld.%06ld s\n",
+			//	timeStart.tv_sec, timeStart.tv_nsec/1000);
+	}
+
+#ifdef HX_SMART_WAKEUP
+	if (atomic_read(&ts->suspend_mode)&&(!FAKE_POWER_KEY_SEND)&&(ts->SMWP_enable)&&(!diag_cmd)) {
+		wake_lock_timeout(&ts->ts_SMWP_wake_lock, TS_WAKE_LOCK_TIMEOUT);
+		msleep(200);
+		himax_wake_check_func();
+		return IRQ_HANDLED;
+	}
+#endif
+	himax_ts_isr_func((struct himax_ts_data *)ptr);
+	if(ts->debug_log_level & BIT(2)) {
+			getnstimeofday(&timeEnd);
+				timeDelta.tv_nsec = (timeEnd.tv_sec*1000000000+timeEnd.tv_nsec)
+				-(timeStart.tv_sec*1000000000+timeStart.tv_nsec);
+			//I("Irq finish time = %ld.%06ld s\n",
+			//	timeEnd.tv_sec, timeEnd.tv_nsec/1000);
+			//I("Touch latency = %ld us\n", timeDelta.tv_nsec/1000);
+	}
+	return IRQ_HANDLED;
+}
+
+static void himax_ts_work_func(struct work_struct *work)
+{
+	struct himax_ts_data *ts = container_of(work, struct himax_ts_data, work);
+	himax_ts_work(ts);
+}
+
+int tp_irq = -1;
+
+int himax_ts_register_interrupt(struct i2c_client *client)
+{
+	struct himax_ts_data *ts = i2c_get_clientdata(client);
+	int ret = 0;
+
+	ts->irq_enabled = 0;
+	//Work functon
+	if (client->irq) {/*INT mode*/
+		ts->use_irq = 1;
+		if(ic_data->HX_INT_IS_EDGE)
+			{
+				I("%s edge triiger falling\n ",__func__);
+				ret = request_threaded_irq(client->irq, NULL, himax_ts_thread,IRQF_TRIGGER_FALLING | IRQF_ONESHOT, client->name, ts);
+			}
+		else
+			{
+				I("%s level trigger low\n ",__func__);
+				ret = request_threaded_irq(client->irq, NULL, himax_ts_thread,IRQF_TRIGGER_LOW | IRQF_ONESHOT, client->name, ts);
+			}
+		if (ret == 0) {
+			ts->irq_enabled = 1;
+			irq_enable_count = 1;
+			tp_irq = client->irq;
+			I("%s: irq enabled at qpio: %d\n", __func__, client->irq);
+#ifdef HX_SMART_WAKEUP
+			irq_set_irq_wake(client->irq, 1);
+#endif
+		} else {
+			ts->use_irq = 0;
+			E("%s: request_irq failed\n", __func__);
+		}
+	} else {
+		I("%s: client->irq is empty, use polling mode.\n", __func__);
+	}
+
+	if (!ts->use_irq) {/*if use polling mode need to disable HX_ESD_WORKAROUND function*/
+		ts->himax_wq = create_singlethread_workqueue("himax_touch");
+
+		INIT_WORK(&ts->work, himax_ts_work_func);
+
+		hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		ts->timer.function = himax_ts_timer_func;
+		hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+		I("%s: polling mode enabled\n", __func__);
+	}
+	return ret;
+}
+
+static int himax_common_suspend(struct device *dev)
+{
+	struct himax_ts_data *ts = dev_get_drvdata(dev);
+
+	I("%s: enter \n", __func__);
+
+	himax_chip_common_suspend(ts);
+	return 0;
+}
+
+static int himax_common_resume(struct device *dev)
+{
+	struct himax_ts_data *ts = dev_get_drvdata(dev);
+
+	I("%s: enter \n", __func__);
+
+	himax_chip_common_resume(ts);
+	return 0;
+}
+
+#if defined(CONFIG_FB)
+int fb_notifier_callback(struct notifier_block *self,
+				 unsigned long event, void *data)
+{
+	struct fb_event *evdata = data;
+	int *blank;
+	struct himax_ts_data *ts=
+		container_of(self, struct himax_ts_data, fb_notif);
+
+	I(" %s\n", __func__);
+	if (evdata && evdata->data && event == FB_EVENT_BLANK && ts &&
+			ts->client) {
+		blank = evdata->data;
+
+		mutex_lock(&ts->fb_mutex);
+		switch (*blank) {
+		case FB_BLANK_UNBLANK:
+			if (!ts->probe_done) {
+				himax_ts_init(ts);
+				ts->probe_done = true;
+			} else {
+				himax_common_resume(&ts->client->dev);
+			}
+		break;
+
+		case FB_BLANK_POWERDOWN:
+		case FB_BLANK_HSYNC_SUSPEND:
+		case FB_BLANK_VSYNC_SUSPEND:
+		case FB_BLANK_NORMAL:
+			himax_common_suspend(&ts->client->dev);
+		break;
+		}
+		mutex_unlock(&ts->fb_mutex);
+	}
+
+	return 0;
+}
+#endif
+
+static const struct i2c_device_id himax_common_ts_id[] = {
+	{HIMAX_common_NAME, 0 },
+	{}
+};
+
+static const struct dev_pm_ops himax_common_pm_ops = {
+#if (!defined(CONFIG_FB))
+	.suspend = himax_common_suspend,
+	.resume  = himax_common_resume,
+#endif
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id himax_match_table[] = {
+	{.compatible = "himax,hxcommon" },
+	{},
+};
+#else
+#define himax_match_table NULL
+#endif
+
+static struct i2c_driver himax_common_driver = {
+	.id_table	= himax_common_ts_id,
+	.probe		= himax_chip_common_probe,
+	.remove		= himax_chip_common_remove,
+	.driver		= {
+	.name = HIMAX_common_NAME,
+	.owner = THIS_MODULE,
+	.of_match_table = himax_match_table,
+#ifdef CONFIG_PM
+	.pm				= &himax_common_pm_ops,
+#endif
+	},
+};
+
+static void __init himax_common_init_async(void *unused, async_cookie_t cookie)
+{
+	I("%s:Enter \n", __func__);
+	i2c_add_driver(&himax_common_driver);
+}
+
+static int __init himax_common_init(void)
+{
+	I("Himax common touch panel driver init\n");
+	async_schedule(himax_common_init_async, NULL);
+	return 0;
+}
+
+static void __exit himax_common_exit(void)
+{
+	i2c_del_driver(&himax_common_driver);
+}
+
+module_init(himax_common_init);
+module_exit(himax_common_exit);
+
+MODULE_DESCRIPTION("Himax_common driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/touchscreen/hxchipset/himax_platform.h b/drivers/input/touchscreen/hxchipset/himax_platform.h
new file mode 100644
index 0000000..1223685
--- /dev/null
+++ b/drivers/input/touchscreen/hxchipset/himax_platform.h
@@ -0,0 +1,135 @@
+/* Himax Android Driver Sample Code for Himax chipset
+*
+* Copyright (C) 2015 Himax Corporation.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+*/
+
+#ifndef HIMAX_PLATFORM_H
+#define HIMAX_PLATFORM_H
+
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#if defined(CONFIG_HMX_DB)
+#include <linux/regulator/consumer.h>
+#endif
+
+#define QCT
+
+#define HIMAX_I2C_RETRY_TIMES 10
+
+#if defined(CONFIG_TOUCHSCREEN_HIMAX_DEBUG)
+#define D(x...) pr_debug("[HXTP] " x)
+#define I(x...) pr_info("[HXTP] " x)
+#define W(x...) pr_warning("[HXTP][WARNING] " x)
+#define E(x...) pr_err("[HXTP][ERROR] " x)
+#define DIF(x...) \
+do {\
+	if (debug_flag) \
+	pr_debug("[HXTP][DEBUG] " x) \
+} while(0)
+#else
+#define D(x...)
+#define I(x...)
+#define W(x...)
+#define E(x...)
+#define DIF(x...)
+#endif
+
+#if defined(CONFIG_HMX_DB)
+/* Analog voltage @2.7 V */
+#define HX_VTG_MIN_UV			2700000
+#define HX_VTG_MAX_UV			3300000
+#define HX_ACTIVE_LOAD_UA		15000
+#define HX_LPM_LOAD_UA 			10
+/* Digital voltage @1.8 V */
+#define HX_VTG_DIG_MIN_UV		1800000
+#define HX_VTG_DIG_MAX_UV		1800000
+#define HX_ACTIVE_LOAD_DIG_UA	10000
+#define HX_LPM_LOAD_DIG_UA 		10
+
+#define HX_I2C_VTG_MIN_UV		1800000
+#define HX_I2C_VTG_MAX_UV		1800000
+#define HX_I2C_LOAD_UA 			10000
+#define HX_I2C_LPM_LOAD_UA 		10
+#endif
+
+#define HIMAX_common_NAME 				"himax_tp"
+#define HIMAX_I2C_ADDR					0x48
+#define INPUT_DEV_NAME					"himax-touchscreen"
+
+struct himax_i2c_platform_data {	
+	int abs_x_min;
+	int abs_x_max;
+	int abs_x_fuzz;
+	int abs_y_min;
+	int abs_y_max;
+	int abs_y_fuzz;
+	int abs_pressure_min;
+	int abs_pressure_max;
+	int abs_pressure_fuzz;
+	int abs_width_min;
+	int abs_width_max;
+	int screenWidth;
+	int screenHeight;
+	uint8_t fw_version;
+	uint8_t tw_id;
+	uint8_t powerOff3V3;
+	uint8_t cable_config[2];
+	uint8_t protocol_type;
+	int gpio_irq;
+	int gpio_reset;
+	int gpio_3v3_en;
+	int (*power)(int on);
+	void (*reset)(void);
+	struct himax_virtual_key *virtual_key;
+	struct kobject *vk_obj;
+	struct kobj_attribute *vk2Use;
+
+	struct himax_config *hx_config;
+	int hx_config_size;
+#if defined(CONFIG_HMX_DB)
+	bool	i2c_pull_up;
+	bool	digital_pwr_regulator;
+	int reset_gpio;
+	u32 reset_gpio_flags;
+	int irq_gpio;
+	u32 irq_gpio_flags;
+
+	struct regulator *vcc_ana; //For Dragon Board
+	struct regulator *vcc_dig; //For Dragon Board
+	struct regulator *vcc_i2c; //For Dragon Board
+#endif	
+};
+
+
+extern int irq_enable_count;
+extern int i2c_himax_read(struct i2c_client *client, uint8_t command, uint8_t *data, uint8_t length, uint8_t toRetry);
+extern int i2c_himax_write(struct i2c_client *client, uint8_t command, uint8_t *data, uint8_t length, uint8_t toRetry);
+extern int i2c_himax_write_command(struct i2c_client *client, uint8_t command, uint8_t toRetry);
+extern int i2c_himax_master_write(struct i2c_client *client, uint8_t *data, uint8_t length, uint8_t toRetry);
+extern int i2c_himax_read_command(struct i2c_client *client, uint8_t length, uint8_t *data, uint8_t *readlength, uint8_t toRetry);
+extern void himax_int_enable(int irqnum, int enable);
+extern int himax_ts_register_interrupt(struct i2c_client *client);
+extern void himax_rst_gpio_set(int pinnum, uint8_t value);
+extern uint8_t himax_int_gpio_read(int pinnum);
+
+extern int himax_gpio_power_config(struct i2c_client *client,struct himax_i2c_platform_data *pdata);
+
+#if defined(CONFIG_FB)
+extern int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data);
+#endif
+
+#endif
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
index 21a9e8f..21236e9 100644
--- a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
@@ -113,6 +113,13 @@
 #define F12_WAKEUP_GESTURE_MODE 0x02
 #define F12_UDG_DETECT 0x0f
 
+#define PWR_VTG_MIN_UV		2700000
+#define PWR_VTG_MAX_UV		3600000
+#define PWR_ACTIVE_LOAD_UA	2000
+#define I2C_VTG_MIN_UV		1710000
+#define I2C_VTG_MAX_UV		2000000
+#define I2C_ACTIVE_LOAD_UA	7000
+
 static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
 		bool *was_in_bl_mode);
 static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data);
@@ -3407,6 +3414,66 @@
 	return retval;
 }
 
+static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
+{
+	return (regulator_count_voltages(reg) > 0) ?
+		regulator_set_load(reg, load_uA) : 0;
+}
+
+static int synaptics_rmi4_configure_reg(struct synaptics_rmi4_data *rmi4_data,
+				bool on)
+{
+	int retval;
+
+	if (on == false)
+		goto hw_shutdown;
+
+	if (rmi4_data->pwr_reg) {
+		if (regulator_count_voltages(rmi4_data->pwr_reg) > 0) {
+			retval = regulator_set_voltage(rmi4_data->pwr_reg,
+				PWR_VTG_MIN_UV, PWR_VTG_MAX_UV);
+			if (retval) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"regulator set_vtg failed retval =%d\n",
+					retval);
+				goto err_set_vtg_pwr;
+			}
+		}
+	}
+
+	if (rmi4_data->bus_reg) {
+		if (regulator_count_voltages(rmi4_data->bus_reg) > 0) {
+			retval = regulator_set_voltage(rmi4_data->bus_reg,
+				I2C_VTG_MIN_UV, I2C_VTG_MAX_UV);
+			if (retval) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"regulator set_vtg failed retval =%d\n",
+					retval);
+				goto err_set_vtg_bus;
+			}
+		}
+	}
+
+	return 0;
+
+err_set_vtg_bus:
+	if (rmi4_data->pwr_reg &&
+		regulator_count_voltages(rmi4_data->pwr_reg) > 0)
+		regulator_set_voltage(rmi4_data->pwr_reg, 0, PWR_VTG_MAX_UV);
+err_set_vtg_pwr:
+	return retval;
+
+hw_shutdown:
+	if (rmi4_data->pwr_reg &&
+		regulator_count_voltages(rmi4_data->pwr_reg) > 0)
+		regulator_set_voltage(rmi4_data->pwr_reg, 0, PWR_VTG_MAX_UV);
+	if (rmi4_data->bus_reg &&
+		regulator_count_voltages(rmi4_data->bus_reg) > 0)
+		regulator_set_voltage(rmi4_data->bus_reg, 0, I2C_VTG_MAX_UV);
+
+	return 0;
+}
+
 static int synaptics_rmi4_get_reg(struct synaptics_rmi4_data *rmi4_data,
 		bool get)
 {
@@ -3472,37 +3539,66 @@
 	}
 
 	if (rmi4_data->bus_reg) {
+		retval = reg_set_optimum_mode_check(rmi4_data->bus_reg,
+					I2C_ACTIVE_LOAD_UA);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Regulator set_opt failed rc=%d\n",
+					__func__, retval);
+			return retval;
+		}
+
 		retval = regulator_enable(rmi4_data->bus_reg);
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to enable bus pullup regulator\n",
 					__func__);
-			goto exit;
+			goto err_bus_reg_en;
 		}
 	}
 
 	if (rmi4_data->pwr_reg) {
+		retval = reg_set_optimum_mode_check(rmi4_data->pwr_reg,
+					PWR_ACTIVE_LOAD_UA);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Regulator set_opt failed rc=%d\n",
+					__func__, retval);
+			goto disable_bus_reg;
+		}
+
 		retval = regulator_enable(rmi4_data->pwr_reg);
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to enable power regulator\n",
 					__func__);
-			goto disable_bus_reg;
+			goto err_pwr_reg_en;
 		}
 		msleep(bdata->power_delay_ms);
 	}
 
 	return 0;
 
+err_pwr_reg_en:
+	reg_set_optimum_mode_check(rmi4_data->pwr_reg, 0);
+	goto disable_bus_reg;
+err_bus_reg_en:
+	reg_set_optimum_mode_check(rmi4_data->bus_reg, 0);
+
+	return retval;
+
 disable_pwr_reg:
-	if (rmi4_data->pwr_reg)
+	if (rmi4_data->pwr_reg) {
+		reg_set_optimum_mode_check(rmi4_data->pwr_reg, 0);
 		regulator_disable(rmi4_data->pwr_reg);
+	}
 
 disable_bus_reg:
-	if (rmi4_data->bus_reg)
+	if (rmi4_data->bus_reg) {
+		reg_set_optimum_mode_check(rmi4_data->bus_reg, 0);
 		regulator_disable(rmi4_data->bus_reg);
+	}
 
-exit:
 	return retval;
 }
 
@@ -3976,6 +4072,14 @@
 		goto err_get_reg;
 	}
 
+	retval = synaptics_rmi4_configure_reg(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to configure regulators\n",
+				__func__);
+		goto err_configure_reg;
+	}
+
 	retval = synaptics_rmi4_enable_reg(rmi4_data, true);
 	if (retval < 0) {
 		dev_err(&pdev->dev,
@@ -4202,6 +4306,8 @@
 err_enable_reg:
 	synaptics_rmi4_get_reg(rmi4_data, false);
 
+err_configure_reg:
+	synaptics_rmi4_configure_reg(rmi4_data, false);
 err_get_reg:
 	kfree(rmi4_data);
 
@@ -4284,6 +4390,7 @@
 	}
 
 	synaptics_rmi4_enable_reg(rmi4_data, false);
+	synaptics_rmi4_configure_reg(rmi4_data, false);
 	synaptics_rmi4_get_reg(rmi4_data, false);
 
 	kfree(rmi4_data);
@@ -4557,6 +4664,7 @@
 	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
 	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
 	int retval;
+	int lpm_uA;
 
 	if (rmi4_data->stay_awake)
 		return 0;
@@ -4565,6 +4673,17 @@
 
 	if (rmi4_data->enable_wakeup_gesture) {
 		if (!rmi4_data->suspend) {
+			/* Set lpm current for bus regulator */
+			lpm_uA = rmi4_data->hw_if->board_data->bus_lpm_cur_uA;
+			if (lpm_uA) {
+				retval = reg_set_optimum_mode_check(
+						rmi4_data->bus_reg, lpm_uA);
+				if (retval < 0)
+					dev_err(dev,
+					"Bus Regulator set_opt failed rc=%d\n",
+					retval);
+			}
+
 			synaptics_rmi4_wakeup_gesture(rmi4_data, true);
 			enable_irq_wake(rmi4_data->irq);
 		}
@@ -4594,7 +4713,8 @@
 	}
 	mutex_unlock(&exp_data.mutex);
 
-	if (!rmi4_data->suspend && !rmi4_data->enable_wakeup_gesture)
+	if (!rmi4_data->suspend && !rmi4_data->enable_wakeup_gesture &&
+			!rmi4_data->hw_if->board_data->dont_disable_regs)
 		synaptics_rmi4_enable_reg(rmi4_data, false);
 
 	rmi4_data->suspend = true;
@@ -4623,6 +4743,18 @@
 
 	if (rmi4_data->enable_wakeup_gesture) {
 		if (rmi4_data->suspend) {
+			/* Set active current for the bus regulator */
+			if (rmi4_data->hw_if->board_data->bus_lpm_cur_uA) {
+				retval = reg_set_optimum_mode_check(
+						rmi4_data->bus_reg,
+						I2C_ACTIVE_LOAD_UA);
+				if (retval < 0)
+					dev_err(dev,
+					"Pwr regulator set_opt failed rc=%d\n",
+					retval);
+			}
+
+
 			synaptics_rmi4_wakeup_gesture(rmi4_data, false);
 			disable_irq_wake(rmi4_data->irq);
 		}
@@ -4631,7 +4763,8 @@
 
 	rmi4_data->current_page = MASK_8BIT;
 
-	if (rmi4_data->suspend)
+	if (rmi4_data->suspend &&
+			!rmi4_data->hw_if->board_data->dont_disable_regs)
 		synaptics_rmi4_enable_reg(rmi4_data, true);
 
 	synaptics_rmi4_sleep_enable(rmi4_data, false);
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
index f634f17..e078853 100644
--- a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
@@ -84,6 +84,9 @@
 	bdata->wakeup_gesture_en = of_property_read_bool(np,
 			"synaptics,wakeup-gestures-en");
 
+	bdata->dont_disable_regs = of_property_read_bool(np,
+			"synaptics,do-not-disable-regulators");
+
 	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
 	if (retval < 0)
 		bdata->pwr_reg_name = NULL;
@@ -184,6 +187,10 @@
 		bdata->max_y_for_2d = -1;
 	}
 
+	retval = of_property_read_u32(np, "synaptics,bus-lpm-cur-uA",
+			&value);
+	bdata->bus_lpm_cur_uA = retval < 0 ? 0 : value;
+
 	bdata->swap_axes = of_property_read_bool(np, "synaptics,swap-axes");
 	bdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
 	bdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d52b534..c1a07a5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -60,7 +60,7 @@
 
 config IOMMU_IO_PGTABLE_FAST
 	bool "Fast ARMv7/v8 Long Descriptor Format"
-	select IOMMU_IO_PGTABLE
+	depends on ARM64_DMA_USE_IOMMU || ARM_DMA_USE_IOMMU
 	help
           Enable support for a subset of the ARM long descriptor pagetable
 	  format.  This allocator achieves fast performance by
diff --git a/drivers/iommu/arm-smmu-errata.c b/drivers/iommu/arm-smmu-errata.c
index 2ee2028..e1cb1a4 100644
--- a/drivers/iommu/arm-smmu-errata.c
+++ b/drivers/iommu/arm-smmu-errata.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -41,7 +41,7 @@
 		ret = hyp_assign_phys(page_to_phys(page), PAGE_ALIGN(size),
 				&source_vm, 1,
 				&dest_vm, &dest_perm, 1);
-		if (ret) {
+		if (ret && (ret != -EIO)) {
 			__free_pages(page, get_order(size));
 			page = NULL;
 		}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1719336..c3376df 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -579,7 +579,6 @@
 	{ ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
 	{ ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
 	{ ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
-	{ ARM_SMMU_OPT_HIBERNATION, "qcom,hibernation-support"},
 	{ 0, NULL},
 };
 
@@ -607,6 +606,7 @@
 static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
 static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
 static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
+static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu);
 
 static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
 			       phys_addr_t paddr, size_t size, int prot);
@@ -635,6 +635,13 @@
 				arm_smmu_options[i].prop);
 		}
 	} while (arm_smmu_options[++i].opt);
+
+	if (arm_smmu_opt_hibernation(smmu) &&
+	    smmu->options && ARM_SMMU_OPT_SKIP_INIT) {
+		dev_info(smmu->dev,
+			 "Disabling incompatible option: skip-init\n");
+		smmu->options &= ~ARM_SMMU_OPT_SKIP_INIT;
+	}
 }
 
 static bool is_dynamic_domain(struct iommu_domain *domain)
@@ -707,7 +714,7 @@
 
 static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu)
 {
-	return smmu->options & ARM_SMMU_OPT_HIBERNATION;
+	return IS_ENABLED(CONFIG_HIBERNATION);
 }
 
 /*
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 88bbc8c..1612d3a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1612,8 +1612,7 @@
 	 * flush. However, device IOTLB doesn't need to be flushed in this case.
 	 */
 	if (!cap_caching_mode(iommu->cap) || !map)
-		iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
-				      addr, mask);
+		iommu_flush_dev_iotlb(domain, addr, mask);
 }
 
 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 3a1c406..f846f01 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -389,6 +389,7 @@
 				pasid_max - 1, GFP_KERNEL);
 		if (ret < 0) {
 			kfree(svm);
+			kfree(sdev);
 			goto out;
 		}
 		svm->pasid = ret;
diff --git a/drivers/iommu/io-pgtable-msm-secure.c b/drivers/iommu/io-pgtable-msm-secure.c
index d0a8a79..e0314f9 100644
--- a/drivers/iommu/io-pgtable-msm-secure.c
+++ b/drivers/iommu/io-pgtable-msm-secure.c
@@ -47,6 +47,11 @@
 
 int msm_iommu_sec_pgtbl_init(void)
 {
+	struct msm_scm_ptbl_init {
+		unsigned int paddr;
+		unsigned int size;
+		unsigned int spare;
+	} pinit = {0};
 	int psize[2] = {0, 0};
 	unsigned int spare = 0;
 	int ret, ptbl_ret = 0;
@@ -55,7 +60,12 @@
 	dma_addr_t paddr;
 	unsigned long attrs = 0;
 
-	if (is_scm_armv8()) {
+	struct scm_desc desc = {0};
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &spare,
+				sizeof(spare), psize, sizeof(psize));
+	} else {
 		struct scm_desc desc = {0};
 
 		desc.args[0] = spare;
@@ -64,12 +74,11 @@
 				IOMMU_SECURE_PTBL_SIZE), &desc);
 		psize[0] = desc.ret[0];
 		psize[1] = desc.ret[1];
-		if (ret || psize[1]) {
-			pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
-			return ret;
-		}
 	}
-
+	if (ret || psize[1]) {
+		pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+		goto fail;
+	}
 	/* Now allocate memory for the secure page tables */
 	attrs = DMA_ATTR_NO_KERNEL_MAPPING;
 	dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
@@ -78,34 +87,42 @@
 	if (!cpu_addr) {
 		pr_err("%s: Failed to allocate %d bytes for PTBL\n",
 				__func__, psize[0]);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto fail;
 	}
 
-	if (is_scm_armv8()) {
-		struct scm_desc desc = {0};
+	pinit.paddr = (unsigned int)paddr;
+	/* paddr may be a physical address > 4GB */
+	desc.args[0] = paddr;
+	desc.args[1] = pinit.size = psize[0];
+	desc.args[2] = pinit.spare;
+	desc.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL);
 
-		desc.args[0] = paddr;
-		desc.args[1] = psize[0];
-		desc.args[2] = 0;
-		desc.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL);
-
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &pinit,
+				sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
+	} else {
 		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
 				IOMMU_SECURE_PTBL_INIT), &desc);
 		ptbl_ret = desc.ret[0];
-
-		if (ret) {
-			pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
-			return ret;
-		}
-
-		if (ptbl_ret) {
-			pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
-			return ret;
-		}
+	}
+	if (ret) {
+		pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
+		goto fail_mem;
+	}
+	if (ptbl_ret) {
+		pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
+		goto fail_mem;
 	}
 
 	return 0;
+
+fail_mem:
+	dma_free_attrs(&dev, psize[0], cpu_addr, paddr, attrs);
+fail:
+	return ret;
 }
+
 EXPORT_SYMBOL(msm_iommu_sec_pgtbl_init);
 
 static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 9ae7180..1c2ca8d 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -21,6 +21,8 @@
 
 #include "irq-gic-common.h"
 
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
 static const struct gic_kvm_info *gic_kvm_info;
 
 const struct gic_kvm_info *gic_get_kvm_info(void)
@@ -52,11 +54,13 @@
 	u32 confoff = (irq / 16) * 4;
 	u32 val, oldval;
 	int ret = 0;
+	unsigned long flags;
 
 	/*
 	 * Read current configuration register, and insert the config
 	 * for "irq", depending on "type".
 	 */
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
 	val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
 	if (type & IRQ_TYPE_LEVEL_MASK)
 		val &= ~confmask;
@@ -64,8 +68,10 @@
 		val |= confmask;
 
 	/* If the current configuration is the same, then we are done */
-	if (val == oldval)
+	if (val == oldval) {
+		raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 		return 0;
+	}
 
 	/*
 	 * Write back the new configuration, and possibly re-enable
@@ -83,6 +89,7 @@
 			pr_warn("GIC: PPI%d is secure or misconfigured\n",
 				irq - 16);
 	}
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 
 	if (sync_access)
 		sync_access();
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index aee1c60..cc58b1b 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -133,6 +133,8 @@
 
 	for (np = of_find_matching_node(NULL, its_device_id); np;
 	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
 		if (!of_property_read_bool(np, "msi-controller"))
 			continue;
 
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 470b4aa..e4768fc 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -80,6 +80,8 @@
 
 	for (np = of_find_matching_node(NULL, its_device_id); np;
 	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
 		if (!of_property_read_bool(np, "msi-controller"))
 			continue;
 
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ac15e5d..558c758 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1807,6 +1807,8 @@
 
 	for (np = of_find_matching_node(node, its_device_id); np;
 	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
 		if (!of_property_read_bool(np, "msi-controller")) {
 			pr_warn("%s: no msi-controller property, ITS ignored\n",
 				np->full_name);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 307d545..cb839bd 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -44,6 +44,34 @@
 
 #include "irq-gic-common.h"
 
+#define MAX_IRQ			1020U	/* Max number of SGI+PPI+SPI */
+#define SPI_START_IRQ		32	/* SPI start irq number */
+#define GICD_ICFGR_BITS		2	/* 2 bits per irq in GICD_ICFGR */
+#define GICD_ISENABLER_BITS	1	/* 1 bit per irq in GICD_ISENABLER */
+#define GICD_IPRIORITYR_BITS	8	/* 8 bits per irq in GICD_IPRIORITYR */
+
+/* 32 bit mask with lower n bits set */
+#define UMASK_LOW(n)		(~0U >> (32 - (n)))
+
+/* Number of 32-bit words required to store all irqs, for
+ * registers where each word stores configuration for each irq
+ * in bits_per_irq bits.
+ */
+#define NUM_IRQ_WORDS(bits_per_irq)	(DIV_ROUND_UP(MAX_IRQ, \
+						      32 / (bits_per_irq)))
+#define MAX_IRQS_IGNORE		10
+
+#define IRQ_NR_BOUND(nr)	min((nr), MAX_IRQ)
+
+/* Bitmap to irqs, which are restored */
+static DECLARE_BITMAP(irqs_restore, MAX_IRQ);
+
+/* Bitmap to irqs, for which restore is ignored.
+ * Presently, only GICD_IROUTER mismatches are
+ * ignored.
+ */
+static DECLARE_BITMAP(irqs_ignore_restore, MAX_IRQ);
+
 struct redist_region {
 	void __iomem		*redist_base;
 	phys_addr_t		phys_base;
@@ -60,6 +88,16 @@
 	u32			nr_redist_regions;
 	unsigned int		irq_nr;
 	struct partition_desc	*ppi_descs[16];
+
+	u64 saved_spi_router[MAX_IRQ];
+	u32 saved_spi_enable[NUM_IRQ_WORDS(GICD_ISENABLER_BITS)];
+	u32 saved_spi_cfg[NUM_IRQ_WORDS(GICD_ICFGR_BITS)];
+	u32 saved_spi_priority[NUM_IRQ_WORDS(GICD_IPRIORITYR_BITS)];
+
+	u64 changed_spi_router[MAX_IRQ];
+	u32 changed_spi_enable[NUM_IRQ_WORDS(GICD_ISENABLER_BITS)];
+	u32 changed_spi_cfg[NUM_IRQ_WORDS(GICD_ICFGR_BITS)];
+	u32 changed_spi_priority[NUM_IRQ_WORDS(GICD_IPRIORITYR_BITS)];
 };
 
 static struct gic_chip_data gic_data __read_mostly;
@@ -67,6 +105,58 @@
 
 static struct gic_kvm_info gic_v3_kvm_info;
 
+enum gicd_save_restore_reg {
+	SAVED_ICFGR,
+	SAVED_IS_ENABLER,
+	SAVED_IPRIORITYR,
+	NUM_SAVED_GICD_REGS,
+};
+
+/* Stores start address of spi config for saved gicd regs */
+static u32 *saved_spi_regs_start[NUM_SAVED_GICD_REGS] = {
+	[SAVED_ICFGR] = gic_data.saved_spi_cfg,
+	[SAVED_IS_ENABLER] = gic_data.saved_spi_enable,
+	[SAVED_IPRIORITYR] = gic_data.saved_spi_priority,
+};
+
+/* Stores start address of spi config for changed gicd regs */
+static u32 *changed_spi_regs_start[NUM_SAVED_GICD_REGS] = {
+	[SAVED_ICFGR] = gic_data.changed_spi_cfg,
+	[SAVED_IS_ENABLER] = gic_data.changed_spi_enable,
+	[SAVED_IPRIORITYR] = gic_data.changed_spi_priority,
+};
+
+/* GICD offset for saved registers */
+static u32 gicd_offset[NUM_SAVED_GICD_REGS] = {
+	[SAVED_ICFGR] = GICD_ICFGR,
+	[SAVED_IS_ENABLER] = GICD_ISENABLER,
+	[SAVED_IPRIORITYR] = GICD_IPRIORITYR,
+};
+
+/* Bits per irq word, for gicd saved registers */
+static u32 gicd_reg_bits_per_irq[NUM_SAVED_GICD_REGS] = {
+	[SAVED_ICFGR] = GICD_ICFGR_BITS,
+	[SAVED_IS_ENABLER] = GICD_ISENABLER_BITS,
+	[SAVED_IPRIORITYR] = GICD_IPRIORITYR_BITS,
+};
+
+#define for_each_spi_irq_word(i, reg) \
+	for (i = 0; \
+	    i < DIV_ROUND_UP(IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ, \
+			     32 / gicd_reg_bits_per_irq[reg]); \
+	    i++)
+
+#define read_spi_word_offset(base, reg, i) \
+	readl_relaxed_no_log(	\
+			base + gicd_offset[reg] + i * 4 +	\
+			SPI_START_IRQ * gicd_reg_bits_per_irq[reg] / 8)
+
+#define restore_spi_word_offset(base, reg, i) \
+	writel_relaxed_no_log(	\
+			saved_spi_regs_start[reg][i],\
+			base + gicd_offset[reg] + i * 4 +	\
+			SPI_START_IRQ * gicd_reg_bits_per_irq[reg] / 8)
+
 #define gic_data_rdist()		(this_cpu_ptr(gic_data.rdists.rdist))
 #define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
 #define gic_data_rdist_sgi_base()	(gic_data_rdist_rd_base() + SZ_64K)
@@ -134,6 +224,229 @@
 }
 #endif
 
+void gic_v3_dist_save(void)
+{
+	void __iomem *base = gic_data.dist_base;
+	int reg, i;
+
+	for (reg = SAVED_ICFGR; reg < NUM_SAVED_GICD_REGS; reg++) {
+		for_each_spi_irq_word(i, reg) {
+			saved_spi_regs_start[reg][i] =
+				read_spi_word_offset(base, reg, i);
+		}
+	}
+
+	for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++)
+		gic_data.saved_spi_router[i] =
+			gic_read_irouter(base + GICD_IROUTER + i * 8);
+}
+
+static void _gicd_check_reg(enum gicd_save_restore_reg reg)
+{
+	void __iomem *base = gic_data.dist_base;
+	u32 *saved_spi_cfg = saved_spi_regs_start[reg];
+	u32 *changed_spi_cfg = changed_spi_regs_start[reg];
+	u32 bits_per_irq = gicd_reg_bits_per_irq[reg];
+	u32 current_cfg = 0;
+	int i, j = SPI_START_IRQ, l;
+	u32 k;
+
+	for_each_spi_irq_word(i, reg) {
+		current_cfg = read_spi_word_offset(base, reg, i);
+		if (current_cfg != saved_spi_cfg[i]) {
+			for (k = current_cfg ^ saved_spi_cfg[i],
+			     l = 0; k ; k >>= bits_per_irq, l++) {
+				if (k & UMASK_LOW(bits_per_irq))
+					set_bit(j+l, irqs_restore);
+			}
+			changed_spi_cfg[i] = current_cfg ^ saved_spi_cfg[i];
+		}
+		j += 32 / bits_per_irq;
+	}
+}
+
+#define _gic_v3_dist_check_icfgr()	\
+		_gicd_check_reg(SAVED_ICFGR)
+#define _gic_v3_dist_check_ipriorityr()	\
+		_gicd_check_reg(SAVED_IPRIORITYR)
+#define _gic_v3_dist_check_isenabler()	\
+		_gicd_check_reg(SAVED_IS_ENABLER)
+
+static void _gic_v3_dist_check_irouter(void)
+{
+	void __iomem *base = gic_data.dist_base;
+	u64 current_irouter_cfg = 0;
+	int i;
+
+	for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
+		if (test_bit(i, irqs_ignore_restore))
+			continue;
+		current_irouter_cfg = gic_read_irouter(
+					base + GICD_IROUTER + i * 8);
+		if (current_irouter_cfg != gic_data.saved_spi_router[i]) {
+			set_bit(i, irqs_restore);
+			gic_data.changed_spi_router[i] =
+			    current_irouter_cfg ^ gic_data.saved_spi_router[i];
+		}
+	}
+}
+
+static void _gic_v3_dist_restore_reg(enum gicd_save_restore_reg reg)
+{
+	void __iomem *base = gic_data.dist_base;
+	int i;
+
+	for_each_spi_irq_word(i, reg) {
+		if (changed_spi_regs_start[reg][i])
+			restore_spi_word_offset(base, reg, i);
+	}
+
+	/* Commit all restored configurations before subsequent writes */
+	wmb();
+}
+
+#define _gic_v3_dist_restore_icfgr()	_gic_v3_dist_restore_reg(SAVED_ICFGR)
+#define _gic_v3_dist_restore_ipriorityr()		\
+		_gic_v3_dist_restore_reg(SAVED_IPRIORITYR)
+
+static void _gic_v3_dist_restore_set_reg(u32 offset)
+{
+	void __iomem *base = gic_data.dist_base;
+	int i, j = SPI_START_IRQ, l;
+	int irq_nr = IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ;
+
+	for (i = 0; i < DIV_ROUND_UP(irq_nr, 32); i++, j += 32) {
+		u32 reg_val = readl_relaxed_no_log(base + offset + i * 4 + 4);
+		bool irqs_restore_updated = 0;
+
+		for (l = 0; l < 32; l++) {
+			if (test_bit(j+l, irqs_restore)) {
+				reg_val |= BIT(l);
+				irqs_restore_updated = 1;
+			}
+		}
+
+		if (irqs_restore_updated) {
+			writel_relaxed_no_log(
+				reg_val, base + offset + i * 4 + 4);
+		}
+	}
+
+	/* Commit restored configuration updates before subsequent writes */
+	wmb();
+}
+
+#define _gic_v3_dist_restore_isenabler()		\
+		_gic_v3_dist_restore_set_reg(GICD_ISENABLER)
+
+#define _gic_v3_dist_restore_ispending()		\
+		_gic_v3_dist_restore_set_reg(GICD_ISPENDR)
+
+static void _gic_v3_dist_restore_irouter(void)
+{
+	void __iomem *base = gic_data.dist_base;
+	int i;
+
+	for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
+		if (test_bit(i, irqs_ignore_restore))
+			continue;
+		if (gic_data.changed_spi_router[i]) {
+			gic_write_irouter(gic_data.saved_spi_router[i],
+						base + GICD_IROUTER + i * 8);
+		}
+	}
+
+	/* Commit GICD_IROUTER writes before subsequent writes */
+	wmb();
+}
+
+static void _gic_v3_dist_clear_reg(u32 offset)
+{
+	void __iomem *base = gic_data.dist_base;
+	int i, j = SPI_START_IRQ, l;
+	int irq_nr = IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ;
+
+	for (i = 0; i < DIV_ROUND_UP(irq_nr, 32); i++, j += 32) {
+		u32 clear = 0;
+		bool irqs_restore_updated = 0;
+
+		for (l = 0; l < 32; l++) {
+			if (test_bit(j+l, irqs_restore)) {
+				clear |= BIT(l);
+				irqs_restore_updated = 1;
+			}
+		}
+
+		if (irqs_restore_updated) {
+			writel_relaxed_no_log(
+				clear, base + offset + i * 4 + 4);
+		}
+	}
+
+	/* Commit clearing of irq config before subsequent writes */
+	wmb();
+}
+
+#define _gic_v3_dist_set_icenabler()		\
+		_gic_v3_dist_clear_reg(GICD_ICENABLER)
+
+#define _gic_v3_dist_set_icpending()		\
+		_gic_v3_dist_clear_reg(GICD_ICPENDR)
+
+#define _gic_v3_dist_set_icactive()		\
+		_gic_v3_dist_clear_reg(GICD_ICACTIVER)
+
+/* Restore GICD state for SPIs. SPI configuration is restored
+ * for GICD_ICFGR, GICD_ISENABLER, GICD_IPRIORITYR, GICD_IROUTER
+ * registers. Following is the sequence for restore:
+ *
+ * 1. For SPIs, check whether any of GICD_ICFGR, GICD_ISENABLER,
+ *    GICD_IPRIORITYR, GICD_IROUTER, current configuration is
+ *    different from saved configuration.
+ *
+ * For all irqs, with mismatched configurations,
+ *
+ * 2. Set GICD_ICENABLER and wait for its completion.
+ *
+ * 3. Restore any changed GICD_ICFGR, GICD_IPRIORITYR, GICD_IROUTER
+ *    configurations.
+ *
+ * 4. Set GICD_ICACTIVER.
+ *
+ * 5. Set pending for the interrupt.
+ *
+ * 6. Enable interrupt and wait for its completion.
+ *
+ */
+void gic_v3_dist_restore(void)
+{
+	_gic_v3_dist_check_icfgr();
+	_gic_v3_dist_check_ipriorityr();
+	_gic_v3_dist_check_isenabler();
+	_gic_v3_dist_check_irouter();
+
+	if (bitmap_empty(irqs_restore, IRQ_NR_BOUND(gic_data.irq_nr)))
+		return;
+
+	_gic_v3_dist_set_icenabler();
+	gic_dist_wait_for_rwp();
+
+	_gic_v3_dist_restore_icfgr();
+	_gic_v3_dist_restore_ipriorityr();
+	_gic_v3_dist_restore_irouter();
+
+	_gic_v3_dist_set_icactive();
+
+	_gic_v3_dist_set_icpending();
+	_gic_v3_dist_restore_ispending();
+
+	_gic_v3_dist_restore_isenabler();
+	gic_dist_wait_for_rwp();
+
+	/* Commit all writes before proceeding */
+	wmb();
+}
+
 /*
  * gic_show_pending_irq - Shows the pending interrupts
  * Note: Interrupts should be disabled on the cpu from which
@@ -706,7 +1019,7 @@
 	       MPIDR_TO_SGI_AFFINITY(cluster_id, 1)	|
 	       tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
 
-	pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+	pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
 	gic_write_sgi1r(val);
 }
 
@@ -1244,7 +1557,8 @@
 	struct redist_region *rdist_regs;
 	u64 redist_stride;
 	u32 nr_redist_regions;
-	int err, i;
+	int err, i, ignore_irqs_len;
+	u32 ignore_restore_irqs[MAX_IRQS_IGNORE] = {0};
 
 	dist_base = of_iomap(node, 0);
 	if (!dist_base) {
@@ -1294,6 +1608,14 @@
 
 	gic_populate_ppi_partitions(node);
 	gic_of_setup_kvm_info(node);
+
+	ignore_irqs_len = of_property_read_variable_u32_array(node,
+				"ignored-save-restore-irqs",
+				ignore_restore_irqs,
+				0, MAX_IRQS_IGNORE);
+	for (i = 0; i < ignore_irqs_len; i++)
+		set_bit(ignore_restore_irqs[i], irqs_ignore_restore);
+
 	return 0;
 
 out_unmap_rdist:
@@ -1359,6 +1681,10 @@
 	u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
 	void __iomem *redist_base;
 
+	/* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
+	if (!(gicc->flags & ACPI_MADT_ENABLED))
+		return 0;
+
 	redist_base = ioremap(gicc->gicr_base_address, size);
 	if (!redist_base)
 		return -ENOMEM;
@@ -1408,6 +1734,13 @@
 	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
 		return 0;
 
+	/*
+	 * It's perfectly valid firmware can pass disabled GICC entry, driver
+	 * should not treat as errors, skip the entry instead of probe fail.
+	 */
+	if (!(gicc->flags & ACPI_MADT_ENABLED))
+		return 0;
+
 	return -ENODEV;
 }
 
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 03b79b0..05d87f6 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -105,10 +105,7 @@
 static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
 					u32 *mask, u32 *addr)
 {
-	unsigned int ofst;
-
-	hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP;
-	ofst = hwirq / 32 * 4;
+	unsigned int ofst = (hwirq / 32) * 4;
 
 	*mask = 1 << (hwirq % 32);
 	*addr = ofst + REG_MBIGEN_CLEAR_OFFSET;
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 9cb4b62..b92a19a 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -72,7 +72,7 @@
 		if (sk->sk_state != MISDN_BOUND)
 			continue;
 		if (!cskb)
-			cskb = skb_copy(skb, GFP_KERNEL);
+			cskb = skb_copy(skb, GFP_ATOMIC);
 		if (!cskb) {
 			printk(KERN_WARNING "%s no skb\n", __func__);
 			break;
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 8cb3265..2522a3d 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -56,6 +56,7 @@
 	if (state == LED_OFF && !(led_cdev->flags & LED_KEEP_TRIGGER))
 		led_trigger_remove(led_cdev);
 	led_set_brightness(led_cdev, state);
+	led_cdev->usr_brightness_req = state;
 
 	ret = size;
 unlock:
@@ -71,7 +72,24 @@
 
 	return sprintf(buf, "%u\n", led_cdev->max_brightness);
 }
-static DEVICE_ATTR_RO(max_brightness);
+
+static ssize_t max_brightness_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	unsigned long state;
+	ssize_t ret = -EINVAL;
+
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
+
+	led_cdev->max_brightness = state;
+	led_set_brightness(led_cdev, led_cdev->usr_brightness_req);
+
+	return size;
+}
+static DEVICE_ATTR_RW(max_brightness);
 
 #ifdef CONFIG_LEDS_TRIGGERS
 static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 840401a..f672648 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -266,7 +266,7 @@
 			"slave address 0x%02x\n",
 			id->name, chip->bits, client->addr);
 
-	if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
 		return -EIO;
 
 	if (pdata) {
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 759d853..d773ec5 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -123,6 +123,7 @@
 
 #define	FLASH_LED_REG_MULTI_STROBE_CTRL(base)	(base + 0x71)
 #define	LED3_FLASH_ONCE_ONLY_BIT		BIT(1)
+#define LED1N2_FLASH_ONCE_ONLY_BIT		BIT(0)
 
 #define	FLASH_LED_REG_LPG_INPUT_CTRL(base)	(base + 0x72)
 #define	LPG_INPUT_SEL_BIT			BIT(0)
@@ -425,7 +426,7 @@
 static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
 {
 	int rc, i, addr_offset;
-	u8 val = 0, mask;
+	u8 val = 0, mask, strobe_mask = 0, strobe_ctrl;
 
 	for (i = 0; i < led->num_fnodes; i++) {
 		addr_offset = led->fnode[i].id;
@@ -436,6 +437,51 @@
 			return rc;
 
 		val |= 0x1 << led->fnode[i].id;
+
+		if (led->fnode[i].strobe_sel == HW_STROBE) {
+			if (led->fnode[i].id == LED3)
+				strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT;
+			else
+				strobe_mask |= LED1N2_FLASH_ONCE_ONLY_BIT;
+		}
+
+		if (led->fnode[i].id == LED3 &&
+				led->fnode[i].strobe_sel == LPG_STROBE)
+			strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT;
+		/*
+		 * As per the hardware recommendation, to use LED2/LED3 in HW
+		 * strobe mode, LED1 should be set to HW strobe mode as well.
+		 */
+		if (led->fnode[i].strobe_sel == HW_STROBE &&
+		      (led->fnode[i].id == LED2 || led->fnode[i].id == LED3)) {
+			mask = FLASH_HW_STROBE_MASK;
+			addr_offset = led->fnode[LED1].id;
+			/*
+			 * HW_STROBE: enable, TRIGGER: level,
+			 * POLARITY: active high
+			 */
+			strobe_ctrl = BIT(2) | BIT(0);
+			rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_STROBE_CTRL(
+				led->base + addr_offset),
+				mask, strobe_ctrl);
+			if (rc < 0)
+				return rc;
+		}
+	}
+
+	rc = qpnp_flash_led_masked_write(led,
+		FLASH_LED_REG_MULTI_STROBE_CTRL(led->base),
+		strobe_mask, 0);
+	if (rc < 0)
+		return rc;
+
+	if (led->fnode[LED3].strobe_sel == LPG_STROBE) {
+		rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_LPG_INPUT_CTRL(led->base),
+			LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT);
+		if (rc < 0)
+			return rc;
 	}
 
 	rc = qpnp_flash_led_write(led,
@@ -629,19 +675,6 @@
 			return rc;
 	}
 
-	if (led->fnode[LED3].strobe_sel == LPG_STROBE) {
-		rc = qpnp_flash_led_masked_write(led,
-			FLASH_LED_REG_MULTI_STROBE_CTRL(led->base),
-			LED3_FLASH_ONCE_ONLY_BIT, 0);
-		if (rc < 0)
-			return rc;
-
-		rc = qpnp_flash_led_masked_write(led,
-			FLASH_LED_REG_LPG_INPUT_CTRL(led->base),
-			LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT);
-		if (rc < 0)
-			return rc;
-	}
 	return 0;
 }
 
diff --git a/drivers/leds/leds-qpnp-haptics.c b/drivers/leds/leds-qpnp-haptics.c
index 764657a..8a850c5 100644
--- a/drivers/leds/leds-qpnp-haptics.c
+++ b/drivers/leds/leds-qpnp-haptics.c
@@ -24,6 +24,7 @@
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/qpnp/qpnp-misc.h>
 #include <linux/qpnp/qpnp-revid.h>
@@ -321,6 +322,7 @@
 	int				sc_irq;
 	struct pwm_param		pwm_data;
 	struct hap_lra_ares_param	ares_cfg;
+	struct regulator		*vcc_pon;
 	u32				play_time_ms;
 	u32				max_play_time_ms;
 	u32				vmax_mv;
@@ -355,6 +357,7 @@
 	bool				lra_auto_mode;
 	bool				play_irq_en;
 	bool				auto_res_err_recovery_hw;
+	bool				vcc_pon_enabled;
 };
 
 static int qpnp_haptics_parse_buffer_dt(struct hap_chip *chip);
@@ -801,10 +804,29 @@
 
 	enable = atomic_read(&chip->state);
 	pr_debug("state: %d\n", enable);
+
+	if (chip->vcc_pon && enable && !chip->vcc_pon_enabled) {
+		rc = regulator_enable(chip->vcc_pon);
+		if (rc < 0)
+			pr_err("%s: could not enable vcc_pon regulator rc=%d\n",
+				 __func__, rc);
+		else
+			chip->vcc_pon_enabled = true;
+	}
+
 	rc = qpnp_haptics_play(chip, enable);
 	if (rc < 0)
 		pr_err("Error in %sing haptics, rc=%d\n",
 			enable ? "play" : "stopp", rc);
+
+	if (chip->vcc_pon && !enable && chip->vcc_pon_enabled) {
+		rc = regulator_disable(chip->vcc_pon);
+		if (rc)
+			pr_err("%s: could not disable vcc_pon regulator rc=%d\n",
+				 __func__, rc);
+		else
+			chip->vcc_pon_enabled = false;
+	}
 }
 
 static enum hrtimer_restart hap_stop_timer(struct hrtimer *timer)
@@ -2054,6 +2076,7 @@
 	struct device_node *revid_node, *misc_node;
 	const char *temp_str;
 	int rc, temp;
+	struct regulator *vcc_pon;
 
 	rc = of_property_read_u32(node, "reg", &temp);
 	if (rc < 0) {
@@ -2381,6 +2404,16 @@
 	else if (chip->play_mode == HAP_PWM)
 		rc = qpnp_haptics_parse_pwm_dt(chip);
 
+	if (of_find_property(node, "vcc_pon-supply", NULL)) {
+		vcc_pon = regulator_get(&chip->pdev->dev, "vcc_pon");
+		if (IS_ERR(vcc_pon)) {
+			rc = PTR_ERR(vcc_pon);
+			dev_err(&chip->pdev->dev,
+				"regulator get failed vcc_pon rc=%d\n", rc);
+		}
+		chip->vcc_pon = vcc_pon;
+	}
+
 	return rc;
 }
 
diff --git a/drivers/leds/leds-qpnp-vibrator-ldo.c b/drivers/leds/leds-qpnp-vibrator-ldo.c
index 6a14324..dd19dd1 100644
--- a/drivers/leds/leds-qpnp-vibrator-ldo.c
+++ b/drivers/leds/leds-qpnp-vibrator-ldo.c
@@ -65,9 +65,29 @@
 	bool			disable_overdrive;
 };
 
-static int qpnp_vib_ldo_set_voltage(struct vib_ldo_chip *chip, int new_uV)
+static inline int qpnp_vib_ldo_poll_status(struct vib_ldo_chip *chip)
 {
 	unsigned int val;
+	int ret;
+
+	ret = regmap_read_poll_timeout(chip->regmap,
+			chip->base + QPNP_VIB_LDO_REG_STATUS1, val,
+			val & QPNP_VIB_LDO_VREG_READY, 100, 1000);
+	if (ret < 0) {
+		pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n",
+			val, ret);
+
+		/* Keep VIB_LDO disabled */
+		regmap_update_bits(chip->regmap,
+			chip->base + QPNP_VIB_LDO_REG_EN_CTL,
+			QPNP_VIB_LDO_EN, 0);
+	}
+
+	return ret;
+}
+
+static int qpnp_vib_ldo_set_voltage(struct vib_ldo_chip *chip, int new_uV)
+{
 	u32 vlevel;
 	u8 reg[2];
 	int ret;
@@ -86,13 +106,9 @@
 	}
 
 	if (chip->vib_enabled) {
-		ret = regmap_read_poll_timeout(chip->regmap,
-					chip->base + QPNP_VIB_LDO_REG_STATUS1,
-					val, val & QPNP_VIB_LDO_VREG_READY,
-					100, 1000);
+		ret = qpnp_vib_ldo_poll_status(chip);
 		if (ret < 0) {
-			pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n",
-				val, ret);
+			pr_err("Vibrator LDO status polling timedout\n");
 			return ret;
 		}
 	}
@@ -103,7 +119,6 @@
 
 static inline int qpnp_vib_ldo_enable(struct vib_ldo_chip *chip, bool enable)
 {
-	unsigned int val;
 	int ret;
 
 	if (chip->vib_enabled == enable)
@@ -120,13 +135,9 @@
 	}
 
 	if (enable) {
-		ret = regmap_read_poll_timeout(chip->regmap,
-					chip->base + QPNP_VIB_LDO_REG_STATUS1,
-					val, val & QPNP_VIB_LDO_VREG_READY,
-					100, 1000);
+		ret = qpnp_vib_ldo_poll_status(chip);
 		if (ret < 0) {
-			pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n",
-				val, ret);
+			pr_err("Vibrator LDO status polling timedout\n");
 			return ret;
 		}
 	}
@@ -430,6 +441,7 @@
 	}
 	hrtimer_cancel(&chip->stop_timer);
 	cancel_work_sync(&chip->vib_work);
+	qpnp_vib_ldo_enable(chip, false);
 	mutex_unlock(&chip->lock);
 
 	return 0;
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index d2e576d..861d987 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2215,12 +2215,16 @@
 			return rc;
 
 		if (wled->en_ext_pfet_sc_pro) {
-			reg = QPNP_WLED_EXT_FET_DTEST2;
-			rc = qpnp_wled_sec_write_reg(wled,
+			if (!(wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE
+				&& wled->pmic_rev_id->rev4 ==
+					PMI8998_V2P0_REV4)) {
+				reg = QPNP_WLED_EXT_FET_DTEST2;
+				rc = qpnp_wled_sec_write_reg(wled,
 					QPNP_WLED_TEST1_REG(wled->ctrl_base),
 					reg);
-			if (rc)
-				return rc;
+				if (rc)
+					return rc;
+			}
 		}
 	} else {
 		rc = qpnp_wled_read_reg(wled,
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 7755271..25852e3 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -154,8 +154,8 @@
 		DBDMA_DO_STOP(rm->dma_regs);
 		return;
 	}
-	memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1));
-	memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2));
+	memset(rdma->buf1, 0, sizeof(rdma->buf1));
+	memset(rdma->buf2, 0, sizeof(rdma->buf2));
 
 	rm->dma_buf_v->mark = 0;
 
diff --git a/drivers/mailbox/qcom-rpmh-mailbox.c b/drivers/mailbox/qcom-rpmh-mailbox.c
index 00ebed9..9f699db 100644
--- a/drivers/mailbox/qcom-rpmh-mailbox.c
+++ b/drivers/mailbox/qcom-rpmh-mailbox.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -129,6 +129,7 @@
 	unsigned long addr;
 	void __iomem *base; /* start address of the RSC's registers */
 	void __iomem *reg_base; /* start address for DRV specific register */
+	int irq;
 	int drv_id;
 	struct platform_device *pdev;
 	struct tcs_mbox tcs[TCS_TYPE_NR];
@@ -694,9 +695,10 @@
 		}
 		/* sanity check to ensure the seq is same */
 		for (j = 1; j < len; j++) {
-			WARN((tcs->cmd_addr[i + j] != cmd[j].addr),
-				"Message does not match previous sequence.\n");
+			if (tcs->cmd_addr[i + j] != cmd[j].addr) {
+				pr_debug("Message does not match previous sequence.\n");
 				return -EINVAL;
+			}
 		}
 		found = true;
 		break;
@@ -724,12 +726,12 @@
 	do {
 		slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
 						n, msg->num_payload, 0);
-		if (slot == MAX_TCS_SLOTS)
+		if (slot >= MAX_TCS_SLOTS)
 			break;
 		n += tcs->ncpt;
 	} while (slot + msg->num_payload - 1 >= n);
 
-	return (slot != MAX_TCS_SLOTS) ? slot : -ENOMEM;
+	return (slot < MAX_TCS_SLOTS) ? slot : -ENOMEM;
 }
 
 static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
@@ -878,6 +880,8 @@
 {
 	int i;
 	unsigned long long curr = arch_counter_get_cntvct();
+	struct irq_data *rsc_irq_data = irq_get_irq_data(drv->irq);
+	bool irq_sts;
 
 	for (i = 0; i < drv->num_tcs; i++) {
 		if (!atomic_read(&drv->tcs_in_use[i]))
@@ -891,6 +895,20 @@
 		print_tcs_regs(drv, i);
 		print_response(drv, i);
 	}
+
+	if (rsc_irq_data) {
+		irq_get_irqchip_state(drv->irq, IRQCHIP_STATE_PENDING,
+								&irq_sts);
+		pr_warn("HW IRQ %lu is %s at GIC\n", rsc_irq_data->hwirq,
+					irq_sts ? "PENDING" : "NOT PENDING");
+	}
+
+	if (test_bit(TASKLET_STATE_SCHED, &drv->tasklet.state))
+		pr_warn("Tasklet is scheduled for execution\n");
+	else if (test_bit(TASKLET_STATE_RUN, &drv->tasklet.state))
+		pr_warn("Tasklet is running\n");
+	else
+		pr_warn("Tasklet is not active\n");
 }
 
 static void chan_debug(struct mbox_chan *chan)
@@ -980,7 +998,8 @@
 
 	/* If we were just busy waiting for TCS, dump the state and return */
 	if (ret == -EBUSY) {
-		pr_info_ratelimited("TCS Busy, retrying RPMH message send\n");
+		dev_err_ratelimited(chan->cl->dev,
+				"TCS Busy, retrying RPMH message send\n");
 		ret = -EAGAIN;
 	}
 
@@ -1261,6 +1280,8 @@
 	if (ret)
 		return ret;
 
+	drv->irq = irq;
+
 	/* Enable interrupts for AMC TCS */
 	write_tcs_reg(drv->reg_base, RSC_DRV_IRQ_ENABLE, 0, 0,
 					drv->tcs[ACTIVE_TCS].tcs_mask);
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 537903b..dd344ee 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -284,8 +284,10 @@
 			break;						\
 									\
 		mutex_unlock(&(ca)->set->bucket_lock);			\
-		if (kthread_should_stop())				\
+		if (kthread_should_stop()) {				\
+			set_current_state(TASK_RUNNING);		\
 			return 0;					\
+		}							\
 									\
 		schedule();						\
 		mutex_lock(&(ca)->set->bucket_lock);			\
@@ -512,15 +514,21 @@
 
 /*
  * We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
  *
  * The ideas is if you've got multiple tasks pulling data into the cache at the
  * same time, you'll get better cache utilization if you try to segregate their
  * data and preserve locality.
  *
- * For example, say you've starting Firefox at the same time you're copying a
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
  * bunch of files. Firefox will likely end up being fairly hot and stay in the
  * cache awhile, but the data you copied might not be; if you wrote all that
  * data to the same buckets it'd get invalidated at the same time.
@@ -537,7 +545,10 @@
 	struct open_bucket *ret, *ret_task = NULL;
 
 	list_for_each_entry_reverse(ret, &c->data_buckets, list)
-		if (!bkey_cmp(&ret->key, search))
+		if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+		    UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+			continue;
+		else if (!bkey_cmp(&ret->key, search))
 			goto found;
 		else if (ret->last_write_point == write_point)
 			ret_task = ret;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 02619ca..7fe7df5 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -904,7 +904,7 @@
 
 int bch_flash_dev_create(struct cache_set *c, uint64_t size);
 
-int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
+int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
 void bch_cached_dev_detach(struct cached_dev *);
 void bch_cached_dev_run(struct cached_dev *);
 void bcache_device_stop(struct bcache_device *);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index cac297f..cf7c689 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1864,14 +1864,17 @@
 	 */
 	for_each_cache(ca, c, i) {
 		for_each_bucket(b, ca) {
-			if (fifo_full(&ca->free[RESERVE_PRIO]))
+			if (fifo_full(&ca->free[RESERVE_PRIO]) &&
+			    fifo_full(&ca->free[RESERVE_BTREE]))
 				break;
 
 			if (bch_can_invalidate_bucket(ca, b) &&
 			    !GC_MARK(b)) {
 				__bch_invalidate_one_bucket(ca, b);
-				fifo_push(&ca->free[RESERVE_PRIO],
-					  b - ca->buckets);
+				if (!fifo_push(&ca->free[RESERVE_PRIO],
+				   b - ca->buckets))
+					fifo_push(&ca->free[RESERVE_BTREE],
+						  b - ca->buckets);
 			}
 		}
 	}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 723302c..faa9bfe 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -633,11 +633,11 @@
 static void search_free(struct closure *cl)
 {
 	struct search *s = container_of(cl, struct search, cl);
-	bio_complete(s);
 
 	if (s->iop.bio)
 		bio_put(s->iop.bio);
 
+	bio_complete(s);
 	closure_debug_destroy(cl);
 	mempool_free(s, s->d->c->search);
 }
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 28864ad..11c9953 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -892,6 +892,12 @@
 
 	mutex_lock(&bch_register_lock);
 
+	cancel_delayed_work_sync(&dc->writeback_rate_update);
+	if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+		kthread_stop(dc->writeback_thread);
+		dc->writeback_thread = NULL;
+	}
+
 	memset(&dc->sb.set_uuid, 0, 16);
 	SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
 
@@ -932,7 +938,8 @@
 	cached_dev_put(dc);
 }
 
-int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+			  uint8_t *set_uuid)
 {
 	uint32_t rtime = cpu_to_le32(get_seconds());
 	struct uuid_entry *u;
@@ -941,7 +948,8 @@
 
 	bdevname(dc->bdev, buf);
 
-	if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
+	if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
+	    (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
 		return -ENOENT;
 
 	if (dc->disk.c) {
@@ -1185,7 +1193,7 @@
 
 	list_add(&dc->list, &uncached_devices);
 	list_for_each_entry(c, &bch_cache_sets, list)
-		bch_cached_dev_attach(dc, c);
+		bch_cached_dev_attach(dc, c, NULL);
 
 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
 	    BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
@@ -1708,7 +1716,7 @@
 	bcache_write_super(c);
 
 	list_for_each_entry_safe(dc, t, &uncached_devices, list)
-		bch_cached_dev_attach(dc, c);
+		bch_cached_dev_attach(dc, c, NULL);
 
 	flash_devs_run(c);
 
@@ -1825,6 +1833,7 @@
 static int cache_alloc(struct cache *ca)
 {
 	size_t free;
+	size_t btree_buckets;
 	struct bucket *b;
 
 	__module_get(THIS_MODULE);
@@ -1834,9 +1843,19 @@
 	ca->journal.bio.bi_max_vecs = 8;
 	ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
 
+	/*
+	 * when ca->sb.njournal_buckets is not zero, journal exists,
+	 * and in bch_journal_replay(), tree node may split,
+	 * so bucket of RESERVE_BTREE type is needed,
+	 * the worst situation is all journal buckets are valid journal,
+	 * and all the keys need to replay,
+	 * so the number of  RESERVE_BTREE type buckets should be as much
+	 * as journal buckets
+	 */
+	btree_buckets = ca->sb.njournal_buckets ?: 8;
 	free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
 
-	if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
+	if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
 	    !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
 	    !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
 	    !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4fbb553..5a5c1f1 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -191,7 +191,7 @@
 {
 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
 					     disk.kobj);
-	ssize_t v = size;
+	ssize_t v;
 	struct cache_set *c;
 	struct kobj_uevent_env *env;
 
@@ -263,17 +263,20 @@
 	}
 
 	if (attr == &sysfs_attach) {
-		if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
+		uint8_t		set_uuid[16];
+
+		if (bch_parse_uuid(buf, set_uuid) < 16)
 			return -EINVAL;
 
+		v = -ENOENT;
 		list_for_each_entry(c, &bch_cache_sets, list) {
-			v = bch_cached_dev_attach(dc, c);
+			v = bch_cached_dev_attach(dc, c, set_uuid);
 			if (!v)
 				return size;
 		}
 
 		pr_err("Can't attach %s: cache set not found", buf);
-		size = v;
+		return v;
 	}
 
 	if (attr == &sysfs_detach && dc->disk.c)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 4ce2b19..bb7aa31 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -420,18 +420,27 @@
 
 	while (!kthread_should_stop()) {
 		down_write(&dc->writeback_lock);
-		if (!atomic_read(&dc->has_dirty) ||
-		    (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
-		     !dc->writeback_running)) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		/*
+		 * If the bache device is detaching, skip here and continue
+		 * to perform writeback. Otherwise, if no dirty data on cache,
+		 * or there is dirty data on cache but writeback is disabled,
+		 * the writeback thread should sleep here and wait for others
+		 * to wake up it.
+		 */
+		if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
+		    (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
 			up_write(&dc->writeback_lock);
-			set_current_state(TASK_INTERRUPTIBLE);
 
-			if (kthread_should_stop())
+			if (kthread_should_stop()) {
+				set_current_state(TASK_RUNNING);
 				return 0;
+			}
 
 			schedule();
 			continue;
 		}
+		set_current_state(TASK_RUNNING);
 
 		searched_full_index = refill_dirty(dc);
 
@@ -441,6 +450,14 @@
 			cached_dev_put(dc);
 			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
 			bch_write_bdev_super(dc, NULL);
+			/*
+			 * If bcache device is detaching via sysfs interface,
+			 * writeback thread should stop after there is no dirty
+			 * data on cache. BCACHE_DEV_DETACHING flag is set in
+			 * bch_cached_dev_detach().
+			 */
+			if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+				break;
 		}
 
 		up_write(&dc->writeback_lock);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 3ec647e..809a4df 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -827,7 +827,8 @@
 	 * dm-bufio is resistant to allocation failures (it just keeps
 	 * one buffer reserved in cases all the allocations fail).
 	 * So set flags to not try too hard:
-	 *	GFP_NOIO: don't recurse into the I/O layer
+	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
+	 *		    mutex and wait ourselves.
 	 *	__GFP_NORETRY: don't retry and rather return failure
 	 *	__GFP_NOMEMALLOC: don't use emergency reserves
 	 *	__GFP_NOWARN: don't print a warning in case of failure
@@ -837,7 +838,7 @@
 	 */
 	while (1) {
 		if (dm_bufio_cache_size_latch != 1) {
-			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 			if (b)
 				return b;
 		}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index be13ebf..f688bfe 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1777,12 +1777,12 @@
 	    cmd == DM_LIST_VERSIONS_CMD)
 		return 0;
 
-	if ((cmd == DM_DEV_CREATE_CMD)) {
+	if (cmd == DM_DEV_CREATE_CMD) {
 		if (!*param->name) {
 			DMWARN("name not supplied when creating device");
 			return -EINVAL;
 		}
-	} else if ((*param->uuid && *param->name)) {
+	} else if (*param->uuid && *param->name) {
 		DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
 		return -EINVAL;
 	}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 5d0a996..d96aa84 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -19,6 +19,7 @@
 
 #include <linux/module.h>
 #include <linux/reboot.h>
+#include <linux/vmalloc.h>
 
 #define DM_MSG_PREFIX			"verity"
 
@@ -32,6 +33,7 @@
 #define DM_VERITY_OPT_LOGGING		"ignore_corruption"
 #define DM_VERITY_OPT_RESTART		"restart_on_corruption"
 #define DM_VERITY_OPT_IGN_ZEROES	"ignore_zero_blocks"
+#define DM_VERITY_OPT_AT_MOST_ONCE	"check_at_most_once"
 
 #define DM_VERITY_OPTS_MAX		(2 + DM_VERITY_OPTS_FEC)
 
@@ -395,6 +397,18 @@
 }
 
 /*
+ * Moves the bio iter one data block forward.
+ */
+static inline void verity_bv_skip_block(struct dm_verity *v,
+					struct dm_verity_io *io,
+					struct bvec_iter *iter)
+{
+	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
+
+	bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits);
+}
+
+/*
  * Verify one "dm_verity_io" structure.
  */
 static int verity_verify_io(struct dm_verity_io *io)
@@ -406,9 +420,16 @@
 
 	for (b = 0; b < io->n_blocks; b++) {
 		int r;
+		sector_t cur_block = io->block + b;
 		struct shash_desc *desc = verity_io_hash_desc(v, io);
 
-		r = verity_hash_for_block(v, io, io->block + b,
+		if (v->validated_blocks &&
+		    likely(test_bit(cur_block, v->validated_blocks))) {
+			verity_bv_skip_block(v, io, &io->iter);
+			continue;
+		}
+
+		r = verity_hash_for_block(v, io, cur_block,
 					  verity_io_want_digest(v, io),
 					  &is_zero);
 		if (unlikely(r < 0))
@@ -441,13 +462,16 @@
 			return r;
 
 		if (likely(memcmp(verity_io_real_digest(v, io),
-				  verity_io_want_digest(v, io), v->digest_size) == 0))
+				  verity_io_want_digest(v, io), v->digest_size) == 0)) {
+			if (v->validated_blocks)
+				set_bit(cur_block, v->validated_blocks);
 			continue;
+		}
 		else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
-					   io->block + b, NULL, &start) == 0)
+					   cur_block, NULL, &start) == 0)
 			continue;
 		else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
-					   io->block + b))
+					   cur_block))
 			return -EIO;
 	}
 
@@ -641,6 +665,8 @@
 			args += DM_VERITY_OPTS_FEC;
 		if (v->zero_digest)
 			args++;
+		if (v->validated_blocks)
+			args++;
 		if (!args)
 			return;
 		DMEMIT(" %u", args);
@@ -659,6 +685,8 @@
 		}
 		if (v->zero_digest)
 			DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
+		if (v->validated_blocks)
+			DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
 		sz = verity_fec_status_table(v, sz, result, maxlen);
 		break;
 	}
@@ -712,6 +740,7 @@
 	if (v->bufio)
 		dm_bufio_client_destroy(v->bufio);
 
+	vfree(v->validated_blocks);
 	kfree(v->salt);
 	kfree(v->root_digest);
 	kfree(v->zero_digest);
@@ -733,6 +762,26 @@
 }
 EXPORT_SYMBOL_GPL(verity_dtr);
 
+static int verity_alloc_most_once(struct dm_verity *v)
+{
+	struct dm_target *ti = v->ti;
+
+	/* the bitset can only handle INT_MAX blocks */
+	if (v->data_blocks > INT_MAX) {
+		ti->error = "device too large to use check_at_most_once";
+		return -E2BIG;
+	}
+
+	v->validated_blocks = vzalloc(BITS_TO_LONGS(v->data_blocks) *
+				       sizeof(unsigned long));
+	if (!v->validated_blocks) {
+		ti->error = "failed to allocate bitset for check_at_most_once";
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
 static int verity_alloc_zero_digest(struct dm_verity *v)
 {
 	int r = -ENOMEM;
@@ -802,6 +851,12 @@
 			}
 			continue;
 
+		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
+			r = verity_alloc_most_once(v);
+			if (r)
+				return r;
+			continue;
+
 		} else if (verity_is_fec_opt_arg(arg_name)) {
 			r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
 			if (r)
@@ -1070,7 +1125,7 @@
 
 static struct target_type verity_target = {
 	.name		= "verity",
-	.version	= {1, 3, 0},
+	.version	= {1, 4, 0},
 	.module		= THIS_MODULE,
 	.ctr		= verity_ctr,
 	.dtr		= verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 75effca..6d6d8df 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -63,6 +63,7 @@
 	sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
 
 	struct dm_verity_fec *fec;	/* forward error correction */
+	unsigned long *validated_blocks; /* bitset blocks validated */
 };
 
 struct dm_verity_io {
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index ba7edcd..fcc2b57 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1122,8 +1122,10 @@
 	cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
 	lock_comm(cinfo);
 	ret = __sendmsg(cinfo, &cmsg);
-	if (ret)
+	if (ret) {
+		unlock_comm(cinfo);
 		return ret;
+	}
 	cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
 	ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
 	cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 93059dd..de0f8de 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8200,6 +8200,19 @@
 	set_mask_bits(&mddev->flags, 0,
 		      BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
 
+	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+			!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+			mddev->delta_disks > 0 &&
+			mddev->pers->finish_reshape &&
+			mddev->pers->size &&
+			mddev->queue) {
+		mddev_lock_nointr(mddev);
+		md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
+		mddev_unlock(mddev);
+		set_capacity(mddev->gendisk, mddev->array_sectors);
+		revalidate_disk(mddev->gendisk);
+	}
+
 	spin_lock(&mddev->lock);
 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
 		/* We completed so min/max setting can be forgotten if used. */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 208fbf7..eb145ec 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1673,6 +1673,17 @@
 			struct md_rdev *repl =
 				conf->mirrors[conf->raid_disks + number].rdev;
 			freeze_array(conf, 0);
+			if (atomic_read(&repl->nr_pending)) {
+				/* It means that some queued IO of retry_list
+				 * hold repl. Thus, we cannot set replacement
+				 * as NULL, avoiding rdev NULL pointer
+				 * dereference in sync_request_write and
+				 * handle_write_finished.
+				 */
+				err = -EBUSY;
+				unfreeze_array(conf);
+				goto abort;
+			}
 			clear_bit(Replacement, &repl->flags);
 			p->rdev = repl;
 			conf->mirrors[conf->raid_disks + number].rdev = NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c17c882..9b982d4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2636,7 +2636,8 @@
 		for (m = 0; m < conf->copies; m++) {
 			int dev = r10_bio->devs[m].devnum;
 			rdev = conf->mirrors[dev].rdev;
-			if (r10_bio->devs[m].bio == NULL)
+			if (r10_bio->devs[m].bio == NULL ||
+				r10_bio->devs[m].bio->bi_end_io == NULL)
 				continue;
 			if (!r10_bio->devs[m].bio->bi_error) {
 				rdev_clear_badblocks(
@@ -2651,7 +2652,8 @@
 					md_error(conf->mddev, rdev);
 			}
 			rdev = conf->mirrors[dev].replacement;
-			if (r10_bio->devs[m].repl_bio == NULL)
+			if (r10_bio->devs[m].repl_bio == NULL ||
+				r10_bio->devs[m].repl_bio->bi_end_io == NULL)
 				continue;
 
 			if (!r10_bio->devs[m].repl_bio->bi_error) {
@@ -3681,6 +3683,7 @@
 
 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
 			discard_supported = true;
+		first = 0;
 	}
 
 	if (mddev->queue) {
@@ -4681,17 +4684,11 @@
 		return;
 
 	if (mddev->delta_disks > 0) {
-		sector_t size = raid10_size(mddev, 0, 0);
-		md_set_array_sectors(mddev, size);
 		if (mddev->recovery_cp > mddev->resync_max_sectors) {
 			mddev->recovery_cp = mddev->resync_max_sectors;
 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 		}
-		mddev->resync_max_sectors = size;
-		if (mddev->queue) {
-			set_capacity(mddev->gendisk, mddev->array_sectors);
-			revalidate_disk(mddev->gendisk);
-		}
+		mddev->resync_max_sectors = mddev->array_sectors;
 	} else {
 		int d;
 		rcu_read_lock();
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index edf37fd..8de95a5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -110,8 +110,7 @@
 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
 {
 	int i;
-	local_irq_disable();
-	spin_lock(conf->hash_locks);
+	spin_lock_irq(conf->hash_locks);
 	for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
 		spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
 	spin_lock(&conf->device_lock);
@@ -121,9 +120,9 @@
 {
 	int i;
 	spin_unlock(&conf->device_lock);
-	for (i = NR_STRIPE_HASH_LOCKS; i; i--)
-		spin_unlock(conf->hash_locks + i - 1);
-	local_irq_enable();
+	for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
+		spin_unlock(conf->hash_locks + i);
+	spin_unlock_irq(conf->hash_locks);
 }
 
 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
@@ -732,12 +731,11 @@
 
 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
 {
-	local_irq_disable();
 	if (sh1 > sh2) {
-		spin_lock(&sh2->stripe_lock);
+		spin_lock_irq(&sh2->stripe_lock);
 		spin_lock_nested(&sh1->stripe_lock, 1);
 	} else {
-		spin_lock(&sh1->stripe_lock);
+		spin_lock_irq(&sh1->stripe_lock);
 		spin_lock_nested(&sh2->stripe_lock, 1);
 	}
 }
@@ -745,8 +743,7 @@
 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
 {
 	spin_unlock(&sh1->stripe_lock);
-	spin_unlock(&sh2->stripe_lock);
-	local_irq_enable();
+	spin_unlock_irq(&sh2->stripe_lock);
 }
 
 /* Only freshly new full stripe normal write stripe can be added to a batch list */
@@ -2052,15 +2049,16 @@
 static int grow_stripes(struct r5conf *conf, int num)
 {
 	struct kmem_cache *sc;
+	size_t namelen = sizeof(conf->cache_name[0]);
 	int devs = max(conf->raid_disks, conf->previous_raid_disks);
 
 	if (conf->mddev->gendisk)
-		sprintf(conf->cache_name[0],
+		snprintf(conf->cache_name[0], namelen,
 			"raid%d-%s", conf->level, mdname(conf->mddev));
 	else
-		sprintf(conf->cache_name[0],
+		snprintf(conf->cache_name[0], namelen,
 			"raid%d-%p", conf->level, conf->mddev);
-	sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
+	snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
 
 	conf->active_name = 0;
 	sc = kmem_cache_create(conf->cache_name[conf->active_name],
@@ -7617,13 +7615,7 @@
 
 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
 
-		if (mddev->delta_disks > 0) {
-			md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
-			if (mddev->queue) {
-				set_capacity(mddev->gendisk, mddev->array_sectors);
-				revalidate_disk(mddev->gendisk);
-			}
-		} else {
+		if (mddev->delta_disks <= 0) {
 			int d;
 			spin_lock_irq(&conf->device_lock);
 			mddev->degraded = calc_degraded(conf);
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 142ae28..d558ed3 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -420,11 +420,13 @@
 	INIT_WORK(&state->fw_work, cx25840_work_handler);
 	init_waitqueue_head(&state->fw_wait);
 	q = create_singlethread_workqueue("cx25840_fw");
-	prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
-	queue_work(q, &state->fw_work);
-	schedule();
-	finish_wait(&state->fw_wait, &wait);
-	destroy_workqueue(q);
+	if (q) {
+		prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+		queue_work(q, &state->fw_work);
+		schedule();
+		finish_wait(&state->fw_wait, &wait);
+		destroy_workqueue(q);
+	}
 
 	/* 6. */
 	cx25840_write(client, 0x115, 0x8c);
@@ -634,11 +636,13 @@
 	INIT_WORK(&state->fw_work, cx25840_work_handler);
 	init_waitqueue_head(&state->fw_wait);
 	q = create_singlethread_workqueue("cx25840_fw");
-	prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
-	queue_work(q, &state->fw_work);
-	schedule();
-	finish_wait(&state->fw_wait, &wait);
-	destroy_workqueue(q);
+	if (q) {
+		prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+		queue_work(q, &state->fw_work);
+		schedule();
+		finish_wait(&state->fw_wait, &wait);
+		destroy_workqueue(q);
+	}
 
 	/* Call the cx23888 specific std setup func, we no longer rely on
 	 * the generic cx24840 func.
@@ -752,11 +756,13 @@
 	INIT_WORK(&state->fw_work, cx25840_work_handler);
 	init_waitqueue_head(&state->fw_wait);
 	q = create_singlethread_workqueue("cx25840_fw");
-	prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
-	queue_work(q, &state->fw_work);
-	schedule();
-	finish_wait(&state->fw_wait, &wait);
-	destroy_workqueue(q);
+	if (q) {
+		prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+		queue_work(q, &state->fw_work);
+		schedule();
+		finish_wait(&state->fw_wait, &wait);
+		destroy_workqueue(q);
+	}
 
 	cx25840_std_setup(client);
 
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 99ba8d6..427ece1 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -2282,6 +2282,10 @@
 				&dev->i2c_bus[2].i2c_adap,
 				"cx25840", 0x88 >> 1, NULL);
 		if (dev->sd_cx25840) {
+			/* set host data for clk_freq configuration */
+			v4l2_set_subdev_hostdata(dev->sd_cx25840,
+						&dev->clk_freq);
+
 			dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
 			v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
 		}
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index c86b109..dcbb3a2 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -872,6 +872,16 @@
 	if (cx23885_boards[dev->board].clk_freq > 0)
 		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
 
+	if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
+		dev->pci->subsystem_device == 0x7137) {
+		/* Hauppauge ImpactVCBe device ID 0x7137 is populated
+		 * with an 888, and a 25Mhz crystal, instead of the
+		 * usual third overtone 50Mhz. The default clock rate must
+		 * be overridden so the cx25840 is properly configured
+		 */
+		dev->clk_freq = 25000000;
+	}
+
 	dev->pci_bus  = dev->pci->bus->number;
 	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
 	cx23885_irq_add(dev, 0x001f00);
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 9a5f912..0d4cacb 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -871,6 +871,10 @@
 	dev->nr = ++cx25821_devcount;
 	sprintf(dev->name, "cx25821[%d]", dev->nr);
 
+	if (dev->nr >= ARRAY_SIZE(card)) {
+		CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card));
+		return -ENODEV;
+	}
 	if (dev->pci->device != 0x8210) {
 		pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
 			__func__, dev->pci->device);
@@ -886,9 +890,6 @@
 		dev->channels[i].sram_channels = &cx25821_sram_channels[i];
 	}
 
-	if (dev->nr > 1)
-		CX25821_INFO("dev->nr > 1!");
-
 	/* board config */
 	dev->board = 1;		/* card[dev->nr]; */
 	dev->_max_num_decoders = MAX_DECODERS;
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 8beffc4..891b738 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -355,7 +355,7 @@
 {
 	int rc = 0;
 
-	if (!ctx->state_machine) {
+	if (!ctx || !ctx->state_machine) {
 		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
@@ -384,7 +384,7 @@
 {
 	int rc = 0;
 
-	if (!ctx->state_machine) {
+	if (!ctx || !ctx->state_machine) {
 		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 5e4ff0d..90603de 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -22,11 +22,6 @@
 #include "cam_cpas_hw_intf.h"
 #include "cam_cpas_soc.h"
 
-#define CAM_CPAS_AXI_MIN_MNOC_AB_BW   (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_MNOC_IB_BW   (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000L)
-
 static uint cam_min_camnoc_ib_bw;
 module_param(cam_min_camnoc_ib_bw, uint, 0644);
 
@@ -82,8 +77,8 @@
 	if (level == bus_client->curr_vote_level)
 		return 0;
 
-	CAM_DBG(CAM_CPAS, "Bus client[%d] index[%d]", bus_client->client_id,
-		level);
+	CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] index[%d]",
+		bus_client->client_id, bus_client->name, level);
 	msm_bus_scale_client_update_request(bus_client->client_id, level);
 	bus_client->curr_vote_level = level;
 
@@ -152,8 +147,8 @@
 	path->vectors[0].ab = ab;
 	path->vectors[0].ib = ib;
 
-	CAM_DBG(CAM_CPAS, "Bus client[%d] :ab[%llu] ib[%llu], index[%d]",
-		bus_client->client_id, ab, ib, idx);
+	CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] :ab[%llu] ib[%llu], index[%d]",
+		bus_client->client_id, bus_client->name, ab, ib, idx);
 	msm_bus_scale_client_update_request(bus_client->client_id, idx);
 
 	return 0;
@@ -208,10 +203,12 @@
 	bus_client->num_paths = pdata->usecase[0].num_paths;
 	bus_client->curr_vote_level = 0;
 	bus_client->valid = true;
+	bus_client->name = pdata->name;
 	mutex_init(&bus_client->lock);
 
-	CAM_DBG(CAM_CPAS, "Bus Client : src=%d, dst=%d, bus_client=%d",
-		bus_client->src, bus_client->dst, bus_client->client_id);
+	CAM_DBG(CAM_CPAS, "Bus Client=[%d][%s] : src=%d, dst=%d",
+		bus_client->client_id, bus_client->name,
+		bus_client->src, bus_client->dst);
 
 	return 0;
 fail_unregister_client:
@@ -463,6 +460,7 @@
 {
 	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
 	int reg_base_index = cpas_core->regbase_index[reg_base];
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
@@ -478,9 +476,12 @@
 		return -EINVAL;
 
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto unlock_client;
 	}
@@ -503,6 +504,7 @@
 {
 	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
 	int reg_base_index = cpas_core->regbase_index[reg_base];
 	uint32_t reg_value;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
@@ -522,9 +524,12 @@
 		return -EINVAL;
 
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto unlock_client;
 	}
@@ -580,8 +585,9 @@
 			soc_private->camnoc_axi_clk_bw_margin) / 100;
 
 		if ((required_camnoc_bw > 0) &&
-			(required_camnoc_bw < CAM_CPAS_AXI_MIN_CAMNOC_IB_BW))
-			required_camnoc_bw = CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+			(required_camnoc_bw <
+			soc_private->camnoc_axi_min_ib_bw))
+			required_camnoc_bw = soc_private->camnoc_axi_min_ib_bw;
 
 		clk_rate = required_camnoc_bw / soc_private->camnoc_bus_width;
 
@@ -695,6 +701,7 @@
 {
 	struct cam_axi_vote axi_vote;
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
 
@@ -719,16 +726,20 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto unlock_client;
 	}
 
 	CAM_DBG(CAM_CPAS,
-		"Client[%d] Requested compressed[%llu], uncompressed[%llu]",
-		client_indx, axi_vote.compressed_bw,
+		"Client=[%d][%s][%d] Requested compressed[%llu], uncompressed[%llu]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, axi_vote.compressed_bw,
 		axi_vote.uncompressed_bw);
 
 	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
@@ -809,7 +820,8 @@
 	mutex_lock(&ahb_bus_client->lock);
 	cpas_client->ahb_level = required_level;
 
-	CAM_DBG(CAM_CPAS, "Clients required level[%d], curr_level[%d]",
+	CAM_DBG(CAM_CPAS, "Client=[%d][%s] required level[%d], curr_level[%d]",
+		ahb_bus_client->client_id, ahb_bus_client->name,
 		required_level, ahb_bus_client->curr_vote_level);
 
 	if (required_level == ahb_bus_client->curr_vote_level)
@@ -853,6 +865,7 @@
 {
 	struct cam_ahb_vote ahb_vote;
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
 
@@ -875,17 +888,21 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto unlock_client;
 	}
 
 	CAM_DBG(CAM_CPAS,
-		"client[%d] : type[%d], level[%d], freq[%ld], applied[%d]",
-		client_indx, ahb_vote.type, ahb_vote.vote.level,
-		ahb_vote.vote.freq,
+		"client=[%d][%s][%d] : type[%d], level[%d], freq[%ld], applied[%d]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, ahb_vote.type,
+		ahb_vote.vote.level, ahb_vote.vote.freq,
 		cpas_core->cpas_client[client_indx]->ahb_level);
 
 	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
@@ -948,32 +965,37 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client is not registered %d", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d] is not registered",
+			client_indx);
 		rc = -EPERM;
 		goto done;
 	}
 
 	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "Client %d is in start state", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] is in start state",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto done;
 	}
 
-	cpas_client = cpas_core->cpas_client[client_indx];
-
-	CAM_DBG(CAM_CPAS, "AHB :client[%d] type[%d], level[%d], applied[%d]",
-		client_indx, ahb_vote->type, ahb_vote->vote.level,
-		cpas_client->ahb_level);
+	CAM_DBG(CAM_CPAS,
+		"AHB :client=[%d][%s][%d] type[%d], level[%d], applied[%d]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index,
+		ahb_vote->type, ahb_vote->vote.level, cpas_client->ahb_level);
 	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
 		ahb_vote, &applied_level);
 	if (rc)
 		goto done;
 
 	CAM_DBG(CAM_CPAS,
-		"AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]",
-		client_indx, axi_vote->compressed_bw,
+		"AXI client=[%d][%s][%d] compressed_bw[%llu], uncompressed_bw[%llu]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, axi_vote->compressed_bw,
 		axi_vote->uncompressed_bw);
 	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
 		cpas_client, axi_vote);
@@ -1010,9 +1032,9 @@
 	cpas_client->started = true;
 	cpas_core->streamon_clients++;
 
-	CAM_DBG(CAM_CPAS, "client=%s, streamon_clients=%d",
-		soc_private->client_name[client_indx],
-		cpas_core->streamon_clients);
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d] streamon_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->streamon_clients);
 done:
 	mutex_unlock(&cpas_core->client_mutex[client_indx]);
 	mutex_unlock(&cpas_hw->hw_mutex);
@@ -1062,18 +1084,20 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
-	CAM_DBG(CAM_CPAS, "client=%s, streamon_clients=%d",
-		soc_private->client_name[client_indx],
-		cpas_core->streamon_clients);
+	CAM_DBG(CAM_CPAS, "Client=[%d][%s][%d] streamon_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->streamon_clients);
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "Client %d is not started", client_indx);
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto done;
 	}
 
-	cpas_client = cpas_core->cpas_client[client_indx];
 	cpas_client->started = false;
 	cpas_core->streamon_clients--;
 
@@ -1207,8 +1231,9 @@
 		cpas_client, client_indx);
 	if (rc) {
 		CAM_ERR(CAM_CPAS,
-			"axi_port_insert failed client_indx=%d, rc=%d",
-			client_indx, rc);
+			"axi_port_insert failed Client=[%d][%s][%d], rc=%d",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index, rc);
 		kfree(cpas_client);
 		mutex_unlock(&cpas_hw->hw_mutex);
 		return -EINVAL;
@@ -1223,8 +1248,9 @@
 
 	mutex_unlock(&cpas_hw->hw_mutex);
 
-	CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
-		client_indx, cpas_core->registered_clients);
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->registered_clients);
 
 	return 0;
 }
@@ -1233,6 +1259,7 @@
 	uint32_t client_handle)
 {
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
 
@@ -1241,15 +1268,20 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client not registered %d", client_indx);
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] not registered",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto done;
 	}
 
 	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "Client %d is not stopped", client_indx);
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not stopped",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto done;
 	}
@@ -1257,8 +1289,9 @@
 	cam_cpas_util_remove_client_from_axi_port(
 		cpas_core->cpas_client[client_indx]);
 
-	CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
-		client_indx, cpas_core->registered_clients);
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->registered_clients);
 
 	kfree(cpas_core->cpas_client[client_indx]);
 	cpas_core->cpas_client[client_indx] = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index 2e660b1..d51b152 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -20,6 +20,11 @@
 #define CAM_CPAS_MAX_CLIENTS 30
 #define CAM_CPAS_INFLIGHT_WORKS 5
 
+#define CAM_CPAS_AXI_MIN_MNOC_AB_BW   (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_MNOC_IB_BW   (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000L)
+
 #define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
 #define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
 
@@ -118,6 +123,7 @@
  * @dyn_vote: Whether dynamic voting enabled
  * @lock: Mutex lock used while voting on this client
  * @valid: Whether bus client is valid
+ * @name: Name of the bus client
  *
  */
 struct cam_cpas_bus_client {
@@ -131,6 +137,7 @@
 	bool dyn_vote;
 	struct mutex lock;
 	bool valid;
+	const char *name;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 8f9ec14..b73b32a 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -56,6 +56,27 @@
 
 	CAM_DBG(CAM_CPAS, "CPAS HW VERSION %x", soc_private->hw_version);
 
+	soc_private->camnoc_axi_min_ib_bw = 0;
+	rc = of_property_read_u64(of_node,
+		"camnoc-axi-min-ib-bw",
+		&soc_private->camnoc_axi_min_ib_bw);
+	if (rc == -EOVERFLOW) {
+		soc_private->camnoc_axi_min_ib_bw = 0;
+		rc = of_property_read_u32(of_node,
+			"camnoc-axi-min-ib-bw",
+			(u32 *)&soc_private->camnoc_axi_min_ib_bw);
+	}
+
+	if (rc) {
+		CAM_DBG(CAM_CPAS,
+			"failed to read camnoc-axi-min-ib-bw rc:%d", rc);
+		soc_private->camnoc_axi_min_ib_bw =
+			CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+	}
+
+	CAM_DBG(CAM_CPAS, "camnoc-axi-min-ib-bw = %llu",
+		soc_private->camnoc_axi_min_ib_bw);
+
 	soc_private->client_id_based = of_property_read_bool(of_node,
 		"client-id-based");
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index 91e8d0c0..f6ae8a8 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -48,6 +48,7 @@
  * @camnoc_bus_width : CAMNOC Bus width
  * @camnoc_axi_clk_bw_margin : BW Margin in percentage to add while calculating
  *      camnoc axi clock
+ * @camnoc_axi_min_ib_bw: Min camnoc BW which varies based on target
  *
  */
 struct cam_cpas_private_soc {
@@ -66,6 +67,7 @@
 	bool control_camnoc_axi_clk;
 	uint32_t camnoc_bus_width;
 	uint32_t camnoc_axi_clk_bw_margin;
+	uint64_t camnoc_axi_min_ib_bw;
 };
 
 int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index f9985eb..7b02aac 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -504,6 +504,18 @@
 
 }
 
+static int __cam_isp_ctx_reg_upd_in_epoch_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	if (ctx_isp->frame_id == 1)
+		CAM_DBG(CAM_ISP, "Reg update for early PCR");
+	else
+		CAM_WARN(CAM_ISP,
+			"Unexpected reg update in activated substate:%d for frame_id:%lld",
+			ctx_isp->substate_activated, ctx_isp->frame_id);
+	return 0;
+}
+
 static int __cam_isp_ctx_reg_upd_in_activated_state(
 	struct cam_isp_context *ctx_isp, void *evt_data)
 {
@@ -1119,7 +1131,7 @@
 		.irq_ops = {
 			__cam_isp_ctx_handle_error,
 			__cam_isp_ctx_sof_in_epoch,
-			NULL,
+			__cam_isp_ctx_reg_upd_in_epoch_state,
 			__cam_isp_ctx_notify_sof_in_actived_state,
 			__cam_isp_ctx_notify_eof_in_actived_state,
 			__cam_isp_ctx_buf_done_in_epoch,
@@ -1325,9 +1337,7 @@
 	struct list_head                  flush_list;
 
 	INIT_LIST_HEAD(&flush_list);
-	spin_lock_bh(&ctx->lock);
 	if (list_empty(req_list)) {
-		spin_unlock_bh(&ctx->lock);
 		CAM_DBG(CAM_ISP, "request list is empty");
 		return 0;
 	}
@@ -1346,7 +1356,6 @@
 		list_del_init(&req->list);
 		list_add_tail(&req->list, &flush_list);
 	}
-	spin_unlock_bh(&ctx->lock);
 
 	list_for_each_entry_safe(req, req_temp, &flush_list, list) {
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
@@ -1364,16 +1373,16 @@
 				req_isp->fence_map_out[i].sync_id = -1;
 			}
 		}
-		spin_lock_bh(&ctx->lock);
 		list_add_tail(&req->list, &ctx->free_req_list);
-		spin_unlock_bh(&ctx->lock);
 	}
 
 	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
-		!cancel_req_id_found)
-		CAM_DBG(CAM_ISP,
+		!cancel_req_id_found) {
+		CAM_INFO(CAM_ISP,
 			"Flush request id:%lld is not found in the list",
 			flush_req->req_id);
+		return -EINVAL;
+	}
 
 	return 0;
 }
@@ -1385,7 +1394,9 @@
 	int rc = 0;
 
 	CAM_DBG(CAM_ISP, "try to flush pending list");
+	spin_lock_bh(&ctx->lock);
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+	spin_unlock_bh(&ctx->lock);
 	CAM_DBG(CAM_ISP, "Flush request in top state %d",
 		 ctx->state);
 	return rc;
@@ -1399,13 +1410,17 @@
 	struct cam_isp_context *ctx_isp;
 
 	ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
-	spin_lock_bh(&ctx->lock);
-	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_FLUSH;
-	ctx_isp->frame_skip_count = 2;
-	spin_unlock_bh(&ctx->lock);
 
 	CAM_DBG(CAM_ISP, "Flush request in state %d", ctx->state);
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+
+	/* only if request is found in pending queue, move to flush state*/
+	if (!rc) {
+		spin_lock_bh(&ctx->lock);
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_FLUSH;
+		ctx_isp->frame_skip_count = 2;
+		spin_unlock_bh(&ctx->lock);
+	}
 	return rc;
 }
 
@@ -1416,10 +1431,10 @@
 	int rc = 0;
 
 	CAM_DBG(CAM_ISP, "try to flush pending list");
+	spin_lock_bh(&ctx->lock);
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
 
 	/* if nothing is in pending req list, change state to acquire*/
-	spin_lock_bh(&ctx->lock);
 	if (list_empty(&ctx->pending_req_list))
 		ctx->state = CAM_CTX_ACQUIRED;
 	spin_unlock_bh(&ctx->lock);
@@ -1995,8 +2010,9 @@
 	flush_req.dev_hdl = ctx->dev_hdl;
 
 	CAM_DBG(CAM_ISP, "try to flush pending list");
+	spin_lock_bh(&ctx->lock);
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
-
+	spin_unlock_bh(&ctx->lock);
 	ctx->state = CAM_CTX_AVAILABLE;
 
 	trace_cam_context_state("ISP", ctx);
@@ -2381,7 +2397,9 @@
 	ctx_isp->active_req_cnt = 0;
 	ctx_isp->reported_req_id = 0;
 	ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
-		CAM_ISP_CTX_ACTIVATED_APPLIED : CAM_ISP_CTX_ACTIVATED_SOF;
+		CAM_ISP_CTX_ACTIVATED_APPLIED :
+		(req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
+		CAM_ISP_CTX_ACTIVATED_SOF;
 
 	/*
 	 * Only place to change state before calling the hw due to
@@ -2399,6 +2417,11 @@
 		goto end;
 	}
 	CAM_DBG(CAM_ISP, "start device success");
+
+	if (req_isp->num_fence_map_out) {
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->active_req_list);
+	}
 end:
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 38a4497..c1aa501 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -210,7 +210,8 @@
 	int rc = -1;
 	struct cam_hw_intf      *hw_intf;
 
-	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+	/* Start slave (which is right split) first */
+	for (i = CAM_ISP_HW_SPLIT_MAX - 1; i >= 0; i--) {
 		if (!isp_hw_res->hw_res[i])
 			continue;
 		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
@@ -959,6 +960,7 @@
 {
 	int rc = -1;
 	int i;
+	int master_idx = -1;
 
 	struct cam_ife_hw_mgr               *ife_hw_mgr;
 	struct cam_ife_hw_mgr_res           *csid_res;
@@ -1010,18 +1012,27 @@
 			if (!cid_res->hw_res[i])
 				continue;
 
-			csid_acquire.node_res = NULL;
-			if (csid_res->is_dual_vfe) {
-				if (i == CAM_ISP_HW_SPLIT_LEFT)
-					csid_acquire.sync_mode =
-						CAM_ISP_HW_SYNC_MASTER;
-				else
-					csid_acquire.sync_mode =
-						CAM_ISP_HW_SYNC_SLAVE;
-			}
-
 			hw_intf = ife_hw_mgr->csid_devices[
 				cid_res->hw_res[i]->hw_intf->hw_idx];
+
+			csid_acquire.node_res = NULL;
+			if (csid_res->is_dual_vfe) {
+				if (i == CAM_ISP_HW_SPLIT_LEFT) {
+					master_idx = hw_intf->hw_idx;
+					csid_acquire.sync_mode =
+						CAM_ISP_HW_SYNC_MASTER;
+				} else {
+					if (master_idx == -1) {
+						CAM_ERR(CAM_ISP,
+							"No Master found");
+						goto err;
+					}
+					csid_acquire.sync_mode =
+						CAM_ISP_HW_SYNC_SLAVE;
+					csid_acquire.master_idx = master_idx;
+				}
+			}
+
 			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
 				&csid_acquire, sizeof(csid_acquire));
 			if (rc) {
@@ -1356,6 +1367,7 @@
 	uint32_t                           num_rdi_port_per_in = 0;
 	uint32_t                           total_pix_port = 0;
 	uint32_t                           total_rdi_port = 0;
+	uint32_t                           in_port_length = 0;
 
 	CAM_DBG(CAM_ISP, "Enter...");
 
@@ -1416,9 +1428,27 @@
 			isp_resource[i].res_hdl,
 			isp_resource[i].length);
 
+		in_port_length = sizeof(struct cam_isp_in_port_info);
+
+		if (in_port_length > isp_resource[i].length) {
+			CAM_ERR(CAM_ISP, "buffer size is not enough");
+			rc = -EINVAL;
+			goto free_res;
+		}
+
 		in_port = memdup_user((void __user *)isp_resource[i].res_hdl,
 			isp_resource[i].length);
 		if (!IS_ERR(in_port)) {
+			in_port_length = sizeof(struct cam_isp_in_port_info) +
+				(in_port->num_out_res - 1) *
+				sizeof(struct cam_isp_out_port_info);
+			if (in_port_length > isp_resource[i].length) {
+				CAM_ERR(CAM_ISP, "buffer size is not enough");
+				rc = -EINVAL;
+				kfree(in_port);
+				goto free_res;
+			}
+
 			rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
 				&num_pix_port_per_in, &num_rdi_port_per_in);
 			total_pix_port += num_pix_port_per_in;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index d20450c..3edae4a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -851,7 +851,6 @@
 	path_data->cid = reserve->cid;
 	path_data->in_format = reserve->in_port->format;
 	path_data->out_format = reserve->out_port->format;
-	path_data->master_idx = reserve->master_idx;
 	path_data->sync_mode = reserve->sync_mode;
 	path_data->height  = reserve->in_port->height;
 	path_data->start_line = reserve->in_port->line_start;
@@ -877,9 +876,11 @@
 		goto end;
 	}
 
-	CAM_DBG(CAM_ISP, "Res id: %d height:%d line_start %d line_stop %d",
+	CAM_DBG(CAM_ISP,
+		"Res id: %d height:%d line_start %d line_stop %d crop_en %d",
 		reserve->res_id, reserve->in_port->height,
-		reserve->in_port->line_start, reserve->in_port->line_stop);
+		reserve->in_port->line_start, reserve->in_port->line_stop,
+		path_data->crop_enable);
 
 	if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
 		path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
@@ -893,20 +894,23 @@
 		path_data->start_pixel = reserve->in_port->left_start;
 		path_data->end_pixel = reserve->in_port->left_stop;
 		path_data->width  = reserve->in_port->left_width;
-		CAM_DBG(CAM_ISP, "CSID:%dmaster:startpixel 0x%x endpixel:0x%x",
+		CAM_DBG(CAM_ISP, "CSID:%d master:startpixel 0x%x endpixel:0x%x",
 			csid_hw->hw_intf->hw_idx, path_data->start_pixel,
 			path_data->end_pixel);
-		CAM_DBG(CAM_ISP, "CSID:%dmaster:line start:0x%x line end:0x%x",
+		CAM_DBG(CAM_ISP, "CSID:%d master:line start:0x%x line end:0x%x",
 			csid_hw->hw_intf->hw_idx, path_data->start_line,
 			path_data->end_line);
 	} else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
+		path_data->master_idx = reserve->master_idx;
+		CAM_DBG(CAM_ISP, "CSID:%d master_idx=%d",
+			csid_hw->hw_intf->hw_idx, path_data->master_idx);
 		path_data->start_pixel = reserve->in_port->right_start;
 		path_data->end_pixel = reserve->in_port->right_stop;
 		path_data->width  = reserve->in_port->right_width;
 		CAM_DBG(CAM_ISP, "CSID:%d slave:start:0x%x end:0x%x width 0x%x",
 			csid_hw->hw_intf->hw_idx, path_data->start_pixel,
 			path_data->end_pixel, path_data->width);
-		CAM_DBG(CAM_ISP, "CSID:%dmaster:line start:0x%x line end:0x%x",
+		CAM_DBG(CAM_ISP, "CSID:%d slave:line start:0x%x line end:0x%x",
 			csid_hw->hw_intf->hw_idx, path_data->start_line,
 			path_data->end_line);
 	} else {
@@ -1431,19 +1435,6 @@
 	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_line_drop_period_addr);
 
-	/*Set master or slave IPP */
-	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
-		/*Set halt mode as master */
-		val = CSID_HALT_MODE_MASTER << 2;
-	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
-		/*Set halt mode as slave and set master idx */
-		val = path_data->master_idx  << 4 | CSID_HALT_MODE_SLAVE << 2;
-	else
-		/* Default is internal halt mode */
-		val = 0;
-
-	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-		csid_reg->ipp_reg->csid_ipp_ctrl_addr);
 
 	/* Enable the IPP path */
 	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
@@ -1545,19 +1536,31 @@
 
 	CAM_DBG(CAM_ISP, "Enable IPP path");
 
-	/* Resume at frame boundary */
-	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
-		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
-			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+	/* Set master or slave IPP */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
+		/*Set halt mode as master */
+		val = CSID_HALT_MODE_MASTER << 2;
+	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		/*Set halt mode as slave and set master idx */
+		val = path_data->master_idx  << 4 | CSID_HALT_MODE_SLAVE << 2;
+	else
+		/* Default is internal halt mode */
+		val = 0;
+
+	/*
+	 * Resume at frame boundary if Master or No Sync.
+	 * Slave will get resume command from Master.
+	 */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
+		path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
 		val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
-		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
-	} else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
-		cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
-			soc_info->reg_map[0].mem_base +
-			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
-	}
-	/* for slave mode, not need to resume for slave device */
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+	CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
 
 	/* Enable the required ipp interrupts */
 	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
@@ -1569,6 +1572,7 @@
 
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	CAM_DBG(CAM_ISP, "Enable IPP IRQ mask 0x%x", val);
 
 	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
@@ -2618,38 +2622,46 @@
 
 	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) {
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL0_EOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL0_EOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL1_EOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL1_EOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL2_EOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL2_EOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL3_EOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL3_EOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 	}
 
 	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ) {
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL0_SOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL0_SOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL1_SOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL1_SOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL2_SOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL2_SOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL3_SOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL3_SOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 	}
@@ -2709,16 +2721,17 @@
 
 		if ((irq_status_ipp & CSID_PATH_INFO_INPUT_SOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ))
-			CAM_ERR(CAM_ISP, "CSID:%d IPP SOF received",
+			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d IPP SOF received",
 				csid_hw->hw_intf->hw_idx);
 
 		if ((irq_status_ipp & CSID_PATH_INFO_INPUT_EOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
-			CAM_ERR(CAM_ISP, "CSID:%d IPP EOF received",
+			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received",
 				csid_hw->hw_intf->hw_idx);
 
 		if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
-			CAM_ERR(CAM_ISP, "CSID:%d IPP fifo over flow",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d IPP fifo over flow",
 				csid_hw->hw_intf->hw_idx);
 			/*Stop IPP path immediately */
 			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
@@ -2736,14 +2749,17 @@
 
 		if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ))
-			CAM_ERR(CAM_ISP, "CSID RDI:%d SOF received", i);
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID RDI:%d SOF received", i);
 
 		if ((irq_status_rdi[i]  & CSID_PATH_INFO_INPUT_EOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
-			CAM_ERR(CAM_ISP, "CSID RDI:%d EOF received", i);
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID RDI:%d EOF received", i);
 
 		if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
-			CAM_ERR(CAM_ISP, "CSID:%d RDI fifo over flow",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d RDI fifo over flow",
 				csid_hw->hw_intf->hw_idx);
 			/*Stop RDI path immediately */
 			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index a4ba2e1..984adf7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 734cbdb..8bc9bd2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -138,6 +138,9 @@
 	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
 	camif_data->last_line   = acquire_data->vfe_in.in_port->line_stop;
 
+	CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d",
+		camif_res->hw_intf->hw_idx,
+		camif_data->pix_pattern, camif_data->dsp_mode);
 	return rc;
 }
 
@@ -201,6 +204,9 @@
 {
 	struct cam_vfe_mux_camif_data       *rsrc_data;
 	uint32_t                             val = 0;
+	uint32_t                             epoch0_irq_mask;
+	uint32_t                             epoch1_irq_mask;
+	uint32_t                             computed_epoch_line_cfg;
 
 	if (!camif_res) {
 		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -240,15 +246,24 @@
 		rsrc_data->common_reg->module_ctrl[
 		CAM_VFE_TOP_VER2_MODULE_STATS]->cgc_ovd);
 
-	/* epoch config with 20 line */
-	cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
+	/* epoch config */
+	epoch0_irq_mask = ((rsrc_data->last_line - rsrc_data->first_line) / 2) +
+		rsrc_data->first_line;
+	epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg & 0xFFFF;
+	computed_epoch_line_cfg = (epoch0_irq_mask << 16) | epoch1_irq_mask;
+	cam_io_w_mb(computed_epoch_line_cfg,
 		rsrc_data->mem_base + rsrc_data->camif_reg->epoch_irq);
+	CAM_DBG(CAM_ISP, "first_line:%u last_line:%u epoch_line_cfg: 0x%x",
+		rsrc_data->first_line, rsrc_data->last_line,
+		computed_epoch_line_cfg);
 
 	camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
 	/* Reg Update */
 	cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
 		rsrc_data->mem_base + rsrc_data->camif_reg->reg_update_cmd);
+	CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d", camif_res->hw_intf->hw_idx,
+		rsrc_data->reg_data->reg_update_cmd_data);
 
 	CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index 5d6045b..be0ca18 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -34,11 +34,12 @@
 	struct cam_vfe_top_ver2_common_data common_data;
 	struct cam_isp_resource_node        mux_rsrc[CAM_VFE_TOP_VER2_MUX_MAX];
 	unsigned long                       hw_clk_rate;
-	struct cam_axi_vote                 to_be_applied_axi_vote;
 	struct cam_axi_vote                 applied_axi_vote;
-	uint32_t                            counter_to_update_axi_vote;
 	struct cam_axi_vote             req_axi_vote[CAM_VFE_TOP_VER2_MUX_MAX];
 	unsigned long                   req_clk_rate[CAM_VFE_TOP_VER2_MUX_MAX];
+	struct cam_axi_vote             last_vote[CAM_VFE_TOP_VER2_MUX_MAX *
+					CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES];
+	uint32_t                        last_counter;
 	enum cam_vfe_bw_control_action
 		axi_vote_control[CAM_VFE_TOP_VER2_MUX_MAX];
 };
@@ -128,6 +129,7 @@
 	bool start_stop)
 {
 	struct cam_axi_vote sum = {0, 0};
+	struct cam_axi_vote to_be_applied_axi_vote = {0, 0};
 	int i, rc = 0;
 	struct cam_hw_soc_info   *soc_info =
 		top_priv->common_data.soc_info;
@@ -156,6 +158,11 @@
 		sum.uncompressed_bw,
 		sum.compressed_bw);
 
+	top_priv->last_vote[top_priv->last_counter] = sum;
+	top_priv->last_counter = (top_priv->last_counter + 1) %
+		(CAM_VFE_TOP_VER2_MUX_MAX *
+		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
+
 	if ((top_priv->applied_axi_vote.uncompressed_bw ==
 		sum.uncompressed_bw) &&
 		(top_priv->applied_axi_vote.compressed_bw ==
@@ -163,77 +170,60 @@
 		CAM_DBG(CAM_ISP, "BW config unchanged %llu %llu",
 			top_priv->applied_axi_vote.uncompressed_bw,
 			top_priv->applied_axi_vote.compressed_bw);
-		top_priv->counter_to_update_axi_vote = 0;
 		return 0;
 	}
 
-	if ((top_priv->to_be_applied_axi_vote.uncompressed_bw !=
-		sum.uncompressed_bw) ||
-		(top_priv->to_be_applied_axi_vote.compressed_bw !=
-		sum.compressed_bw)) {
-		// we got a new bw value to apply
-		top_priv->counter_to_update_axi_vote = 0;
-
-		top_priv->to_be_applied_axi_vote.uncompressed_bw =
-			sum.uncompressed_bw;
-		top_priv->to_be_applied_axi_vote.compressed_bw =
-			sum.compressed_bw;
-	}
-
 	if (start_stop == true) {
-		CAM_DBG(CAM_ISP,
-			"New bw in start/stop, applying bw now, counter=%d",
-			top_priv->counter_to_update_axi_vote);
-		top_priv->counter_to_update_axi_vote = 0;
-		apply_bw_update = true;
-	} else if ((top_priv->to_be_applied_axi_vote.uncompressed_bw <
-		top_priv->applied_axi_vote.uncompressed_bw) ||
-		(top_priv->to_be_applied_axi_vote.compressed_bw <
-		top_priv->applied_axi_vote.compressed_bw)) {
-		if (top_priv->counter_to_update_axi_vote >=
+		/* need to vote current request immediately */
+		to_be_applied_axi_vote = sum;
+		/* Reset everything, we can start afresh */
+		memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
 			(CAM_VFE_TOP_VER2_MUX_MAX *
-			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES)) {
-			CAM_DBG(CAM_ISP,
-				"New bw is less, applying bw now, counter=%d",
-				top_priv->counter_to_update_axi_vote);
-			top_priv->counter_to_update_axi_vote = 0;
-			apply_bw_update = true;
-		} else {
-			CAM_DBG(CAM_ISP,
-				"New bw is less, Defer applying bw, counter=%d",
-				top_priv->counter_to_update_axi_vote);
-
-			top_priv->counter_to_update_axi_vote++;
-			apply_bw_update = false;
-		}
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+		top_priv->last_counter = 0;
+		top_priv->last_vote[top_priv->last_counter] = sum;
+		top_priv->last_counter = (top_priv->last_counter + 1) %
+			(CAM_VFE_TOP_VER2_MUX_MAX *
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
 	} else {
-		CAM_DBG(CAM_ISP,
-			"New bw is more, applying bw now, counter=%d",
-			top_priv->counter_to_update_axi_vote);
-		top_priv->counter_to_update_axi_vote = 0;
-		apply_bw_update = true;
+		/*
+		 * Find max bw request in last few frames. This will the bw
+		 *that we want to vote to CPAS now.
+		 */
+		for (i = 0; i < (CAM_VFE_TOP_VER2_MUX_MAX *
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES); i++) {
+			if (to_be_applied_axi_vote.compressed_bw <
+				top_priv->last_vote[i].compressed_bw)
+				to_be_applied_axi_vote.compressed_bw =
+					top_priv->last_vote[i].compressed_bw;
+
+			if (to_be_applied_axi_vote.uncompressed_bw <
+				top_priv->last_vote[i].uncompressed_bw)
+				to_be_applied_axi_vote.uncompressed_bw =
+					top_priv->last_vote[i].uncompressed_bw;
+		}
 	}
 
-	CAM_DBG(CAM_ISP,
-		"counter=%d, apply_bw_update=%d",
-		top_priv->counter_to_update_axi_vote,
-		apply_bw_update);
+	if ((to_be_applied_axi_vote.uncompressed_bw !=
+		top_priv->applied_axi_vote.uncompressed_bw) ||
+		(to_be_applied_axi_vote.compressed_bw !=
+		top_priv->applied_axi_vote.compressed_bw))
+		apply_bw_update = true;
+
+	CAM_DBG(CAM_ISP, "apply_bw_update=%d", apply_bw_update);
 
 	if (apply_bw_update == true) {
 		rc = cam_cpas_update_axi_vote(
 			soc_private->cpas_handle,
-			&top_priv->to_be_applied_axi_vote);
+			&to_be_applied_axi_vote);
 		if (!rc) {
 			top_priv->applied_axi_vote.uncompressed_bw =
-				top_priv->
-				to_be_applied_axi_vote.uncompressed_bw;
+			to_be_applied_axi_vote.uncompressed_bw;
 			top_priv->applied_axi_vote.compressed_bw =
-				top_priv->
 				to_be_applied_axi_vote.compressed_bw;
 		} else {
 			CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
 		}
-		top_priv->counter_to_update_axi_vote = 0;
 	}
 
 	return rc;
@@ -706,11 +696,12 @@
 	}
 	vfe_top->top_priv = top_priv;
 	top_priv->hw_clk_rate = 0;
-	top_priv->to_be_applied_axi_vote.compressed_bw = 0;
-	top_priv->to_be_applied_axi_vote.uncompressed_bw = 0;
 	top_priv->applied_axi_vote.compressed_bw = 0;
 	top_priv->applied_axi_vote.uncompressed_bw = 0;
-	top_priv->counter_to_update_axi_vote = 0;
+	memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
+		(CAM_VFE_TOP_VER2_MUX_MAX *
+		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+	top_priv->last_counter = 0;
 
 	for (i = 0, j = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
 		top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index f172a79..97d076a 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -770,13 +770,6 @@
 			} else {
 				CAM_ERR(CAM_JPEG, "process_cmd null ");
 			}
-			rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
-				hw_mgr->devices[dev_type][0]->hw_priv,
-				CAM_JPEG_CMD_SET_IRQ_CB,
-				&irq_cb, sizeof(irq_cb));
-			if (rc)
-				CAM_ERR(CAM_JPEG,
-					"CMD_SET_IRQ_CB failed %d", rc);
 
 			if (hw_mgr->devices[dev_type][0]->hw_ops.stop) {
 				rc = hw_mgr->devices[dev_type][0]->hw_ops.stop(
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
index 898997a..a60661e 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -879,6 +879,7 @@
 	if (args->num_in_map_entries == 0 || args->num_out_map_entries == 0) {
 		CAM_ERR(CAM_LRME, "Error in port number in %d, out %d",
 			args->num_in_map_entries, args->num_out_map_entries);
+		rc = -EINVAL;
 		goto error;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index adfac57..4602d6c 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -163,6 +163,34 @@
 }
 
 /**
+ * __cam_req_mgr_validate_inject_delay()
+ *
+ * @brief    : Check if any pd device is introducing inject delay
+ * @tbl      : cam_req_mgr_req_tbl
+ * @curr_idx : slot idx
+ *
+ * @return   : 0 for success, negative for failure
+ */
+static int __cam_req_mgr_validate_inject_delay(
+	struct cam_req_mgr_req_tbl  *tbl,
+	int32_t curr_idx)
+{
+	struct cam_req_mgr_tbl_slot *slot = NULL;
+
+	while (tbl) {
+		slot = &tbl->slot[curr_idx];
+		if (slot->inject_delay > 0) {
+			slot->inject_delay--;
+			return -EAGAIN;
+		}
+		__cam_req_mgr_dec_idx(&curr_idx, tbl->pd_delta,
+			tbl->num_slots);
+		tbl = tbl->next;
+	}
+	return 0;
+}
+
+/**
  * __cam_req_mgr_traverse()
  *
  * @brief    : Traverse through pd tables, it will internally cover all linked
@@ -201,14 +229,17 @@
 		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status,
 		traverse_data->in_q->slot[curr_idx].skip_idx);
 
-	if ((slot->inject_delay > 0) &&
-		(traverse_data->self_link == true)) {
-		CAM_DBG(CAM_CRM, "Injecting Delay of one frame");
-		apply_data[tbl->pd].req_id = -1;
-		slot->inject_delay--;
-		/* This pd table is not ready to proceed with asked idx */
-		SET_FAILURE_BIT(traverse_data->result, tbl->pd);
-		return -EAGAIN;
+	if ((traverse_data->self_link == true) &&
+		(!traverse_data->inject_delay_chk)) {
+		rc = __cam_req_mgr_validate_inject_delay(tbl, curr_idx);
+		if (rc) {
+			CAM_DBG(CAM_CRM, "Injecting Delay of one frame");
+			apply_data[tbl->pd].req_id = -1;
+			/* This pd tbl not ready to proceed with asked idx */
+			SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+			return -EAGAIN;
+		}
+		traverse_data->inject_delay_chk = true;
 	}
 
 	/* Check if req is ready or in skip mode or pd tbl is in skip mode */
@@ -520,6 +551,7 @@
 	traverse_data.result = 0;
 	traverse_data.validate_only = validate_only;
 	traverse_data.self_link = self_link;
+	traverse_data.inject_delay_chk = false;
 	traverse_data.open_req_cnt = link->open_req_cnt;
 	/*
 	 *  Traverse through all pd tables, if result is success,
@@ -1447,6 +1479,8 @@
 		if (idx < 0) {
 			CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
 			flush_info->req_id);
+			mutex_unlock(&link->req.lock);
+			return -EINVAL;
 		} else {
 			CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
 				flush_info->req_id, idx);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 73ffb81..025c16a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -126,15 +126,16 @@
 
 /**
  * struct cam_req_mgr_traverse
- * @idx           : slot index
- * @result        : contains which all tables were able to apply successfully
- * @tbl           : pointer of pipeline delay based request table
- * @apply_data    : pointer which various tables will update during traverse
- * @in_q          : input request queue pointer
- * @validate_only : Whether to validate only and/or update settings
- * @self_link     : To indicate whether the check is for the given link or the
- *                  other sync link
- * @open_req_cnt  : Count of open requests yet to be serviced in the kernel.
+ * @idx              : slot index
+ * @result           : contains which all tables were able to apply successfully
+ * @tbl              : pointer of pipeline delay based request table
+ * @apply_data       : pointer which various tables will update during traverse
+ * @in_q             : input request queue pointer
+ * @validate_only    : Whether to validate only and/or update settings
+ * @self_link        : To indicate whether the check is for the given link or
+ *                     the other sync link
+ * @inject_delay_chk : if inject delay has been validated for all pd devices
+ * @open_req_cnt     : Count of open requests yet to be serviced in the kernel.
  */
 struct cam_req_mgr_traverse {
 	int32_t                       idx;
@@ -144,7 +145,8 @@
 	struct cam_req_mgr_req_queue *in_q;
 	bool                          validate_only;
 	bool                          self_link;
-	int32_t                      open_req_cnt;
+	bool                          inject_delay_chk;
+	int32_t                       open_req_cnt;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 966b573..066efd6 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -59,11 +59,11 @@
 		(struct cam_req_mgr_core_workq *)task->parent;
 	unsigned long flags = 0;
 
-	WORKQ_ACQUIRE_LOCK(workq, flags);
 	list_del_init(&task->entry);
 	task->cancel = 0;
 	task->process_cb = NULL;
 	task->priv = NULL;
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	list_add_tail(&task->entry,
 		&workq->task.empty_head);
 	atomic_add(1, &workq->task.free_cnt);
@@ -127,28 +127,6 @@
 	}
 }
 
-void crm_workq_clear_q(struct cam_req_mgr_core_workq *workq)
-{
-	int32_t                 i = CRM_TASK_PRIORITY_0;
-	struct crm_workq_task  *task, *task_save;
-
-	CAM_DBG(CAM_CRM, "pending_cnt %d",
-		atomic_read(&workq->task.pending_cnt));
-
-	while (i < CRM_TASK_PRIORITY_MAX) {
-		if (!list_empty(&workq->task.process_head[i])) {
-			list_for_each_entry_safe(task, task_save,
-				&workq->task.process_head[i], entry) {
-				cam_req_mgr_workq_put_task(task);
-				CAM_WARN(CAM_CRM, "flush task %pK, %d, cnt %d",
-					task, i, atomic_read(
-					&workq->task.free_cnt));
-			}
-		}
-		i++;
-	}
-}
-
 int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
 	void *priv, int32_t prio)
 {
@@ -167,10 +145,6 @@
 		rc = -EINVAL;
 		goto end;
 	}
-	if (!workq->job) {
-		rc = -EINVAL;
-		goto end;
-	}
 
 	if (task->cancel == 1) {
 		cam_req_mgr_workq_put_task(task);
@@ -184,16 +158,21 @@
 		? prio : CRM_TASK_PRIORITY_0;
 
 	WORKQ_ACQUIRE_LOCK(workq, flags);
+		if (!workq->job) {
+			rc = -EINVAL;
+			WORKQ_RELEASE_LOCK(workq, flags);
+			goto end;
+		}
+
 	list_add_tail(&task->entry,
 		&workq->task.process_head[task->priority]);
-	WORKQ_RELEASE_LOCK(workq, flags);
 
 	atomic_add(1, &workq->task.pending_cnt);
 	CAM_DBG(CAM_CRM, "enq task %pK pending_cnt %d",
 		task, atomic_read(&workq->task.pending_cnt));
 
 	queue_work(workq->job, &workq->work);
-
+	WORKQ_RELEASE_LOCK(workq, flags);
 end:
 	return rc;
 }
@@ -252,8 +231,7 @@
 			task = &crm_workq->task.pool[i];
 			task->parent = (void *)crm_workq;
 			/* Put all tasks in free pool */
-			list_add_tail(&task->entry,
-			&crm_workq->task.process_head[CRM_TASK_PRIORITY_0]);
+			INIT_LIST_HEAD(&task->entry);
 			cam_req_mgr_workq_put_task(task);
 		}
 		*workq = crm_workq;
@@ -266,13 +244,18 @@
 
 void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
 {
+	unsigned long flags = 0;
+	struct workqueue_struct   *job;
 	CAM_DBG(CAM_CRM, "destroy workque %pK", crm_workq);
 	if (*crm_workq) {
-		crm_workq_clear_q(*crm_workq);
+		WORKQ_ACQUIRE_LOCK(*crm_workq, flags);
 		if ((*crm_workq)->job) {
-			destroy_workqueue((*crm_workq)->job);
+			job = (*crm_workq)->job;
 			(*crm_workq)->job = NULL;
-		}
+			WORKQ_RELEASE_LOCK(*crm_workq, flags);
+			destroy_workqueue(job);
+		} else
+			WORKQ_RELEASE_LOCK(*crm_workq, flags);
 		kfree((*crm_workq)->task.pool);
 		kfree(*crm_workq);
 		*crm_workq = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 2f74765..a34d70c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -603,7 +603,11 @@
 
 void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl)
 {
-	int rc;
+	int rc = 0;
+	struct cam_actuator_soc_private  *soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info =
+		&soc_private->power_info;
 
 	if (a_ctrl->cam_act_state == CAM_ACTUATOR_INIT)
 		return;
@@ -612,6 +616,7 @@
 		rc = cam_actuator_power_down(a_ctrl);
 		if (rc < 0)
 			CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
+		a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
 	}
 
 	if (a_ctrl->cam_act_state >= CAM_ACTUATOR_ACQUIRE) {
@@ -622,6 +627,12 @@
 		a_ctrl->bridge_intf.link_hdl = -1;
 		a_ctrl->bridge_intf.session_hdl = -1;
 	}
+
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+
 	a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index fe69fcb..b47f4f3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -205,8 +205,6 @@
 	CAM_INFO(CAM_CCI, "****CCI MASTER %d Registers ****",
 		master);
 	for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
-		if (i == 6)
-			continue;
 		reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
 		read_val = cam_io_r_mb(base + reg_offset);
 		CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
@@ -868,6 +866,180 @@
 	return rc;
 }
 
+static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	uint32_t val = 0, i = 0;
+	unsigned long rem_jiffies;
+	int32_t read_words = 0, exp_words = 0;
+	int32_t index = 0, first_byte = 0, total_read_words = 0;
+	enum cci_i2c_master_t master;
+	enum cci_i2c_queue_t queue = QUEUE_1;
+	struct cci_device                  *cci_dev = NULL;
+	struct cam_cci_read_cfg            *read_cfg = NULL;
+	struct cam_hw_soc_info             *soc_info = NULL;
+	void __iomem                       *base = NULL;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	master = c_ctrl->cci_info->cci_i2c_master;
+	read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+
+	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+		|| c_ctrl->cci_info->cci_i2c_master < 0) {
+		CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+		return -EINVAL;
+	}
+
+	soc_info = &cci_dev->soc_info;
+	base = soc_info->reg_map[0].mem_base;
+	mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+	/*
+	 * Todo: If there is a change in frequency of operation
+	 * Wait for previos transaction to complete
+	 */
+
+	/* Set the I2C Frequency */
+	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
+		goto rel_mutex;
+	}
+
+	/*
+	 * Call validate queue to make sure queue is empty before starting.
+	 * If this call fails, don't proceed with i2c_read call. This is to
+	 * avoid overflow / underflow of queue
+	 */
+	rc = cam_cci_validate_queue(cci_dev,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
+		master, queue);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
+		goto rel_mutex;
+	}
+
+	if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+		CAM_ERR(CAM_CCI, "More than max retries");
+		goto rel_mutex;
+	}
+
+	if (read_cfg->data == NULL) {
+		CAM_ERR(CAM_CCI, "Data ptr is NULL");
+		goto rel_mutex;
+	}
+
+	if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
+			read_cfg->addr_type);
+		rc = -EINVAL;
+		goto rel_mutex;
+	}
+
+	CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
+		c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+		c_ctrl->cci_info->id_map);
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_LOCK_CMD;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4);
+	for (i = 0; i < read_cfg->addr_type; i++) {
+		val |= ((read_cfg->addr >> (i << 3)) & 0xFF)  <<
+		((read_cfg->addr_type - i) << 3);
+	}
+
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_UNLOCK_CMD;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+			+ master * 0x200 + queue * 0x100);
+	CAM_DBG(CAM_CCI, "cur word cnt 0x%x", val);
+	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+			+ master * 0x200 + queue * 0x100);
+
+	val = 1 << ((master * 2) + queue);
+	cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
+	exp_words = ((read_cfg->num_byte / 4) + 1);
+
+	while (exp_words != total_read_words) {
+		rem_jiffies = wait_for_completion_timeout(
+			&cci_dev->cci_master_info[master].reset_complete,
+			CCI_TIMEOUT);
+		if (!rem_jiffies) {
+			rc = -ETIMEDOUT;
+			val = cam_io_r_mb(base +
+				CCI_I2C_M0_READ_BUF_LEVEL_ADDR +
+				master * 0x100);
+			CAM_ERR(CAM_CCI,
+				"wait_for_completion_timeout rc = %d FIFO buf_lvl:0x%x",
+				rc, val);
+#ifdef DUMP_CCI_REGISTERS
+			cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+			cam_cci_flush_queue(cci_dev, master);
+			goto rel_mutex;
+		}
+
+		read_words = cam_io_r_mb(base +
+			CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+		total_read_words += read_words;
+		do {
+			val = cam_io_r_mb(base +
+				CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+			for (i = 0; (i < 4) &&
+				(index < read_cfg->num_byte); i++) {
+				CAM_DBG(CAM_CCI, "i:%d index:%d", i, index);
+				if (!first_byte) {
+					CAM_DBG(CAM_CCI, "sid 0x%x",
+						val & 0xFF);
+					first_byte++;
+				} else {
+					read_cfg->data[index] =
+						(val  >> (i * 8)) & 0xFF;
+					CAM_DBG(CAM_CCI, "data[%d] 0x%x", index,
+						read_cfg->data[index]);
+					index++;
+				}
+			}
+		} while (--read_words > 0);
+	}
+
+rel_mutex:
+	mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+	return rc;
+}
+
 static int32_t cam_cci_read(struct v4l2_subdev *sd,
 	struct cam_cci_ctrl *c_ctrl)
 {
@@ -1004,7 +1176,11 @@
 #endif
 		if (rc == 0)
 			rc = -ETIMEDOUT;
-		CAM_ERR(CAM_CCI, "wait_for_completion_timeout rc = %d", rc);
+		val = cam_io_r_mb(base +
+			CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+		CAM_ERR(CAM_CCI,
+			"wait_for_completion_timeout rc = %d FIFO buf_lvl: 0x%x",
+			rc, val);
 		cam_cci_flush_queue(cci_dev, master);
 		goto rel_mutex;
 	} else {
@@ -1224,19 +1400,26 @@
 
 	read_bytes = read_cfg->num_byte;
 	do {
-		if (read_bytes > CCI_READ_MAX)
-			read_cfg->num_byte = CCI_READ_MAX;
+		if (read_bytes > CCI_I2C_MAX_BYTE_COUNT)
+			read_cfg->num_byte = CCI_I2C_MAX_BYTE_COUNT;
 		else
 			read_cfg->num_byte = read_bytes;
-		rc = cam_cci_read(sd, c_ctrl);
-		if (rc < 0) {
-			CAM_ERR(CAM_CCI, "failed rc %d", rc);
+
+		if (read_cfg->num_byte > CCI_READ_MAX)
+			rc = cam_cci_burst_read(sd, c_ctrl);
+		else
+			rc = cam_cci_read(sd, c_ctrl);
+
+		if (!rc) {
+			CAM_ERR(CAM_CCI, "failed to read rc:%d", rc);
 			goto ERROR;
 		}
-		if (read_bytes > CCI_READ_MAX) {
-			read_cfg->addr += CCI_READ_MAX;
-			read_cfg->data += CCI_READ_MAX;
-			read_bytes -= CCI_READ_MAX;
+
+		if (read_bytes > CCI_I2C_MAX_BYTE_COUNT) {
+			read_cfg->addr += (CCI_I2C_MAX_BYTE_COUNT /
+				read_cfg->data_type);
+			read_cfg->data += CCI_I2C_MAX_BYTE_COUNT;
+			read_bytes -= CCI_I2C_MAX_BYTE_COUNT;
 		} else {
 			read_bytes = 0;
 		}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index da08bc7..c8ca85d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -58,18 +58,23 @@
 
 irqreturn_t cam_cci_irq(int irq_num, void *data)
 {
-	uint32_t irq;
+	uint32_t irq_status0 = 0;
+	uint32_t irq_status1 = 0;
 	struct cci_device *cci_dev = data;
 	struct cam_hw_soc_info *soc_info =
 		&cci_dev->soc_info;
 	void __iomem *base = soc_info->reg_map[0].mem_base;
 	unsigned long flags;
+	bool burst_read_assert = false;
 
-	irq = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
-	cam_io_w_mb(irq, base + CCI_IRQ_CLEAR_0_ADDR);
+	irq_status0 = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
+	irq_status1 = cam_io_r_mb(base + CCI_IRQ_STATUS_1_ADDR);
+	cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR);
+	cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR);
 	cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
 
-	if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+	CAM_DBG(CAM_CCI, "irq0:%x irq1:%x", irq_status0, irq_status1);
+	if (irq_status0 & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
 		if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
 			cci_dev->cci_master_info[MASTER_0].reset_pending =
 				FALSE;
@@ -83,11 +88,24 @@
 			&cci_dev->cci_master_info[MASTER_1].reset_complete);
 		}
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) {
+
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
+		(irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)) {
+		cci_dev->cci_master_info[MASTER_0].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+		burst_read_assert = true;
+	}
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
+		(!burst_read_assert)) {
 		cci_dev->cci_master_info[MASTER_0].status = 0;
 		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
+	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) &&
+		(!burst_read_assert)) {
+		cci_dev->cci_master_info[MASTER_0].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
 		struct cam_cci_master_info *cci_master_info;
 
 		cci_master_info = &cci_dev->cci_master_info[MASTER_0];
@@ -104,7 +122,7 @@
 			&cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_0],
 			flags);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
 		struct cam_cci_master_info *cci_master_info;
 
 		cci_master_info = &cci_dev->cci_master_info[MASTER_0];
@@ -121,11 +139,23 @@
 			&cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_1],
 			flags);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
+		(irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD)) {
+		cci_dev->cci_master_info[MASTER_1].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+		burst_read_assert = true;
+	}
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
+		(!burst_read_assert)) {
 		cci_dev->cci_master_info[MASTER_1].status = 0;
 		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
+	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) &&
+		(!burst_read_assert)) {
+		cci_dev->cci_master_info[MASTER_1].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
 		struct cam_cci_master_info *cci_master_info;
 
 		cci_master_info = &cci_dev->cci_master_info[MASTER_1];
@@ -142,7 +172,7 @@
 			&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_0],
 			flags);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
 		struct cam_cci_master_info *cci_master_info;
 
 		cci_master_info = &cci_dev->cci_master_info[MASTER_1];
@@ -159,27 +189,27 @@
 			&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1],
 			flags);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
 		cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
 		cam_io_w_mb(CCI_M0_RESET_RMSK,
 			base + CCI_RESET_CMD_ADDR);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
 		cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
 		cam_io_w_mb(CCI_M1_RESET_RMSK,
 			base + CCI_RESET_CMD_ADDR);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
 		cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
 		cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
 			base + CCI_HALT_REQ_ADDR);
-		CAM_DBG(CAM_CCI, "MASTER_0 error 0x%x", irq);
+		CAM_DBG(CAM_CCI, "MASTER_0 error 0x%x", irq_status0);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
 		cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
 		cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
 			base + CCI_HALT_REQ_ADDR);
-		CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq);
+		CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq_status0);
 	}
 	return IRQ_HANDLED;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index 7cde619..d48ffd1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -65,19 +65,14 @@
 #define MAX_LRME_V4l2_EVENTS 30
 
 /* Max bytes that can be read per CCI read transaction */
-#define CCI_READ_MAX 12
+#define CCI_READ_MAX 256
 #define CCI_I2C_READ_MAX_RETRIES 3
 #define CCI_I2C_MAX_READ 8192
 #define CCI_I2C_MAX_WRITE 8192
+#define CCI_I2C_MAX_BYTE_COUNT 65535
 
 #define CAMX_CCI_DEV_NAME "cam-cci-driver"
 
-/* Max bytes that can be read per CCI read transaction */
-#define CCI_READ_MAX 12
-#define CCI_I2C_READ_MAX_RETRIES 3
-#define CCI_I2C_MAX_READ 8192
-#define CCI_I2C_MAX_WRITE 8192
-
 #define PRIORITY_QUEUE (QUEUE_0)
 #define SYNC_QUEUE (QUEUE_1)
 
@@ -125,6 +120,7 @@
 	uint16_t addr_type;
 	uint8_t *data;
 	uint16_t num_byte;
+	uint16_t data_type;
 };
 
 struct cam_cci_i2c_queue_info {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
index c18593e..31c8e26 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
@@ -43,27 +43,36 @@
 #define CCI_I2C_M0_Q0_LOAD_DATA_ADDR                                0x00000310
 #define CCI_IRQ_MASK_0_ADDR                                         0x00000c04
 #define CCI_IRQ_MASK_0_RMSK                                         0x7fff7ff7
+#define CCI_IRQ_MASK_1_ADDR                                         0x00000c10
+#define CCI_IRQ_MASK_1_RMSK                                         0x00110000
 #define CCI_IRQ_CLEAR_0_ADDR                                        0x00000c08
+#define CCI_IRQ_CLEAR_1_ADDR                                        0x00000c14
 #define CCI_IRQ_STATUS_0_ADDR                                       0x00000c0c
+#define CCI_IRQ_STATUS_1_ADDR                                       0x00000c18
 #define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK                   0x4000000
 #define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK                   0x2000000
 #define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK                           0x1000000
 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK                        0x100000
 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK                         0x10000
 #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK                            0x1000
+#define CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD                          0x100000
 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK                           0x100
 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK                            0x10
 #define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK                          0x18000EE6
 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK                          0x60EE6000
 #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK                               0x1
+#define CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD                           0x10000
+#define CCI_I2C_M0_RD_THRESHOLD_ADDR                                0x00000120
+#define CCI_I2C_M1_RD_THRESHOLD_ADDR                                0x00000220
+#define CCI_I2C_RD_THRESHOLD_VALUE                                        0x38
 #define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR                               0x00000c00
 
 #define DEBUG_TOP_REG_START                                                0x0
 #define DEBUG_TOP_REG_COUNT                                                 14
 #define DEBUG_MASTER_REG_START                                           0x100
-#define DEBUG_MASTER_REG_COUNT                                               8
+#define DEBUG_MASTER_REG_COUNT                                               9
 #define DEBUG_MASTER_QUEUE_REG_START                                     0x300
-#define DEBUG_MASTER_QUEUE_REG_COUNT                                         6
+#define DEBUG_MASTER_QUEUE_REG_COUNT                                         7
 #define DEBUG_INTR_REG_START                                             0xC00
 #define DEBUG_INTR_REG_COUNT                                                 7
 #endif /* _CAM_CCI_HWREG_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index 295259d..e0b27ca 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -146,6 +146,10 @@
 		base + CCI_IRQ_MASK_0_ADDR);
 	cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
 		base + CCI_IRQ_CLEAR_0_ADDR);
+	cam_io_w_mb(CCI_IRQ_MASK_1_RMSK,
+		base + CCI_IRQ_MASK_1_ADDR);
+	cam_io_w_mb(CCI_IRQ_MASK_1_RMSK,
+		base + CCI_IRQ_CLEAR_1_ADDR);
 	cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
 
 	for (i = 0; i < MASTER_MAX; i++) {
@@ -157,6 +161,13 @@
 			flush_workqueue(cci_dev->write_wq[i]);
 		}
 	}
+
+	/* Set RD FIFO threshold for M0 & M1 */
+	cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
+		base + CCI_I2C_M0_RD_THRESHOLD_ADDR);
+	cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
+		base + CCI_I2C_M1_RD_THRESHOLD_ADDR);
+
 	cci_dev->cci_state = CCI_STATE_ENABLED;
 
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index dbbac08..2688cd5 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -614,6 +614,13 @@
 
 		if (csiphy_dev->acquire_count == 0)
 			csiphy_dev->csiphy_state = CAM_CSIPHY_INIT;
+
+		if (csiphy_dev->config_count == 0) {
+			CAM_DBG(CAM_CSIPHY, "reset csiphy_info");
+			csiphy_dev->csiphy_info.lane_mask = 0;
+			csiphy_dev->csiphy_info.lane_cnt = 0;
+			csiphy_dev->csiphy_info.combo_mode = 0;
+		}
 	}
 		break;
 	case CAM_CONFIG_DEV: {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
index 3f743fc..3245093 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
@@ -152,7 +152,7 @@
 		{0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0060, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -168,7 +168,7 @@
 		{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0760, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -183,7 +183,7 @@
 		{0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0260, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -198,7 +198,7 @@
 		{0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0460, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -214,7 +214,7 @@
 		{0x060C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0638, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0660, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 };
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 30b9d96..7f94f8d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -109,6 +109,7 @@
 			rc = camera_io_dev_read_seq(&e_ctrl->io_master_info,
 				emap[j].mem.addr, memptr,
 				emap[j].mem.addr_type,
+				emap[j].mem.data_type,
 				emap[j].mem.valid_size);
 			if (rc) {
 				CAM_ERR(CAM_EEPROM, "read failed rc %d",
@@ -314,8 +315,8 @@
 power_down:
 	cam_eeprom_power_down(e_ctrl);
 data_mem_free:
-	kfree(e_ctrl->cal_data.mapdata);
-	kfree(e_ctrl->cal_data.map);
+	vfree(e_ctrl->cal_data.mapdata);
+	vfree(e_ctrl->cal_data.map);
 	e_ctrl->cal_data.num_data = 0;
 	e_ctrl->cal_data.num_map = 0;
 	e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
@@ -542,9 +543,9 @@
 		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
 	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
 
-	e_ctrl->cal_data.map = kcalloc((MSM_EEPROM_MEMORY_MAP_MAX_SIZE *
-		MSM_EEPROM_MAX_MEM_MAP_CNT),
-		(sizeof(struct cam_eeprom_memory_map_t)), GFP_KERNEL);
+	e_ctrl->cal_data.map = vzalloc((MSM_EEPROM_MEMORY_MAP_MAX_SIZE *
+		MSM_EEPROM_MAX_MEM_MAP_CNT) *
+		(sizeof(struct cam_eeprom_memory_map_t)));
 	if (!e_ctrl->cal_data.map) {
 		rc = -ENOMEM;
 		CAM_ERR(CAM_EEPROM, "failed");
@@ -737,8 +738,8 @@
 				return rc;
 			}
 			rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
-			kfree(e_ctrl->cal_data.mapdata);
-			kfree(e_ctrl->cal_data.map);
+			vfree(e_ctrl->cal_data.mapdata);
+			vfree(e_ctrl->cal_data.map);
 			e_ctrl->cal_data.num_data = 0;
 			e_ctrl->cal_data.num_map = 0;
 			CAM_DBG(CAM_EEPROM,
@@ -753,7 +754,7 @@
 		}
 
 		e_ctrl->cal_data.mapdata =
-			kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
+			vzalloc(e_ctrl->cal_data.num_data);
 		if (!e_ctrl->cal_data.mapdata) {
 			rc = -ENOMEM;
 			CAM_ERR(CAM_EEPROM, "failed");
@@ -778,8 +779,12 @@
 		rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
 		rc = cam_eeprom_power_down(e_ctrl);
 		e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
-		kfree(e_ctrl->cal_data.mapdata);
-		kfree(e_ctrl->cal_data.map);
+		vfree(e_ctrl->cal_data.mapdata);
+		vfree(e_ctrl->cal_data.map);
+		kfree(power_info->power_setting);
+		kfree(power_info->power_down_setting);
+		power_info->power_setting = NULL;
+		power_info->power_down_setting = NULL;
 		e_ctrl->cal_data.num_data = 0;
 		e_ctrl->cal_data.num_map = 0;
 		break;
@@ -790,11 +795,13 @@
 power_down:
 	cam_eeprom_power_down(e_ctrl);
 memdata_free:
-	kfree(e_ctrl->cal_data.mapdata);
+	vfree(e_ctrl->cal_data.mapdata);
 error:
 	kfree(power_info->power_setting);
 	kfree(power_info->power_down_setting);
-	kfree(e_ctrl->cal_data.map);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	vfree(e_ctrl->cal_data.map);
 	e_ctrl->cal_data.num_data = 0;
 	e_ctrl->cal_data.num_map = 0;
 	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
@@ -829,6 +836,8 @@
 
 		kfree(power_info->power_setting);
 		kfree(power_info->power_down_setting);
+		power_info->power_setting = NULL;
+		power_info->power_down_setting = NULL;
 	}
 
 	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index 196df08..dfcb9fc 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -636,7 +636,11 @@
 
 void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl)
 {
-	int rc;
+	int rc = 0;
+	struct cam_ois_soc_private  *soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info =
+		&soc_private->power_info;
 
 	if (o_ctrl->cam_ois_state == CAM_OIS_INIT)
 		return;
@@ -645,6 +649,7 @@
 		rc = cam_ois_power_down(o_ctrl);
 		if (rc < 0)
 			CAM_ERR(CAM_OIS, "OIS Power down failed");
+		o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
 	}
 
 	if (o_ctrl->cam_ois_state >= CAM_OIS_ACQUIRE) {
@@ -656,6 +661,11 @@
 		o_ctrl->bridge_intf.session_hdl = -1;
 	}
 
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+
 	o_ctrl->cam_ois_state = CAM_OIS_INIT;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index d58834c..2133932 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -518,6 +518,8 @@
 
 	kfree(power_info->power_setting);
 	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
 
 	s_ctrl->streamon_count = 0;
 	s_ctrl->streamoff_count = 0;
@@ -584,24 +586,6 @@
 				"Already Sensor Probed in the slot");
 			break;
 		}
-		/* Allocate memory for power up setting */
-		pu = kzalloc(sizeof(struct cam_sensor_power_setting) *
-			MAX_POWER_CONFIG, GFP_KERNEL);
-		if (!pu) {
-			rc = -ENOMEM;
-			goto release_mutex;
-		}
-
-		pd = kzalloc(sizeof(struct cam_sensor_power_setting) *
-			MAX_POWER_CONFIG, GFP_KERNEL);
-		if (!pd) {
-			kfree(pu);
-			rc = -ENOMEM;
-			goto release_mutex;
-		}
-
-		power_info->power_setting = pu;
-		power_info->power_down_setting = pd;
 
 		if (cmd->handle_type ==
 			CAM_HANDLE_MEM_HANDLE) {
@@ -618,6 +602,9 @@
 			return -EINVAL;
 		}
 
+		pu = power_info->power_setting;
+		pd = power_info->power_down_setting;
+
 		/* Parse and fill vreg params for powerup settings */
 		rc = msm_camera_fill_vreg_params(
 			&s_ctrl->soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
index 2c1f520..1ec7d9a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -33,6 +33,7 @@
 	cci_ctrl.cci_info = cci_client;
 	cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
 	cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+	cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type;
 	cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
 	cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
 	rc = v4l2_subdev_call(cci_client->cci_subdev,
@@ -59,6 +60,7 @@
 int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *cci_client,
 	uint32_t addr, uint8_t *data,
 	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
 	uint32_t num_byte)
 {
 	int32_t                    rc = -EFAULT;
@@ -67,6 +69,7 @@
 	struct cam_cci_ctrl        cci_ctrl;
 
 	if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
 		|| (num_byte > I2C_REG_DATA_MAX)) {
 		CAM_ERR(CAM_SENSOR, "addr_type %d num_byte %d", addr_type,
 			num_byte);
@@ -81,6 +84,7 @@
 	cci_ctrl.cci_info = cci_client;
 	cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
 	cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+	cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type;
 	cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
 	cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
 	cci_ctrl.status = -EFAULT;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
index 7cddcf9..e79fffb 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
  * @addr: I2c address
  * @data: I2C data
  * @addr_type: I2c address type
+ * @data_type: I2c data type
  * @num_byte: number of bytes
  *
  * This API handles CCI sequential read
@@ -53,6 +54,7 @@
 int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *client,
 	uint32_t addr, uint8_t *data,
 	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
 	uint32_t num_byte);
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index 89aad4e..ed490fd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -69,11 +69,12 @@
 
 int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
 	uint32_t addr, uint8_t *data,
-	enum camera_sensor_i2c_type addr_type, int32_t num_bytes)
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type, int32_t num_bytes)
 {
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_camera_cci_i2c_read_seq(io_master_info->cci_client,
-			addr, data, addr_type, num_bytes);
+			addr, data, addr_type, data_type, num_bytes);
 	} else if (io_master_info->master_type == I2C_MASTER) {
 		return cam_qup_i2c_read_seq(io_master_info->client,
 			addr, data, addr_type, num_bytes);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
index ec5ed25..f1143c8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -52,6 +52,7 @@
  * @io_master_info: I2C/SPI master information
  * @addr: I2C address
  * @data: I2C data
+ * @addr_type: I2C addr type
  * @data_type: I2C data type
  * @num_bytes: number of bytes
  *
@@ -60,6 +61,7 @@
 int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
 	uint32_t addr, uint8_t *data,
 	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
 	int32_t num_bytes);
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 10d29c9..73a0cf7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -860,8 +860,10 @@
 	return rc;
 free_power_down_settings:
 	kfree(power_info->power_down_setting);
+	power_info->power_down_setting = NULL;
 free_power_settings:
 	kfree(power_info->power_setting);
+	power_info->power_setting = NULL;
 	return rc;
 }
 
@@ -1304,7 +1306,9 @@
 		CAM_DBG(CAM_SENSOR, "index: %d", index);
 		power_setting = &ctrl->power_setting[index];
 		if (!power_setting) {
-			CAM_ERR(CAM_SENSOR, "Invalid power up settings");
+			CAM_ERR(CAM_SENSOR,
+				"Invalid power up settings for index %d",
+				index);
 			return -EINVAL;
 		}
 
@@ -1548,7 +1552,7 @@
 		if (ret)
 			CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
 		cam_res_mgr_shared_pinctrl_select_state(false);
-		pinctrl_put(ctrl->pinctrl_info.pinctrl);
+		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
 		cam_res_mgr_shared_pinctrl_put();
 	}
 
@@ -1594,11 +1598,6 @@
 
 	pd = &ctrl->power_down_setting[index];
 
-	if (!pd) {
-		CAM_ERR(CAM_SENSOR, "Invalid power down setting");
-		return -EINVAL;
-	}
-
 	for (j = 0; j < num_vreg; j++) {
 		if (!strcmp(soc_info->rgltr_name[j], "cam_clk")) {
 
@@ -1641,7 +1640,7 @@
 {
 	int index = 0, ret = 0, num_vreg = 0, i;
 	struct cam_sensor_power_setting *pd = NULL;
-	struct cam_sensor_power_setting *ps;
+	struct cam_sensor_power_setting *ps = NULL;
 	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
 
 	CAM_DBG(CAM_SENSOR, "Enter");
@@ -1661,6 +1660,13 @@
 	for (index = 0; index < ctrl->power_down_setting_size; index++) {
 		CAM_DBG(CAM_SENSOR, "index %d",  index);
 		pd = &ctrl->power_down_setting[index];
+		if (!pd) {
+			CAM_ERR(CAM_SENSOR,
+				"Invalid power down settings for index %d",
+				index);
+			return -EINVAL;
+		}
+
 		ps = NULL;
 		CAM_DBG(CAM_SENSOR, "type %d",  pd->seq_type);
 		switch (pd->seq_type) {
@@ -1760,7 +1766,7 @@
 			CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
 
 		cam_res_mgr_shared_pinctrl_select_state(false);
-		pinctrl_put(ctrl->pinctrl_info.pinctrl);
+		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
 		cam_res_mgr_shared_pinctrl_put();
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index ecfc566..5cd3008 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -3343,6 +3343,7 @@
 		rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
 		if (rc < 0) {
 			CAM_ERR(CAM_SMMU, "Error: populating context banks");
+			cam_smmu_release_cb(pdev);
 			return -ENOMEM;
 		}
 		return rc;
diff --git a/drivers/media/platform/msm/camera_v2/Kconfig b/drivers/media/platform/msm/camera_v2/Kconfig
index cabc612..e5439d8 100644
--- a/drivers/media/platform/msm/camera_v2/Kconfig
+++ b/drivers/media/platform/msm/camera_v2/Kconfig
@@ -95,13 +95,22 @@
           and also provides support for writing data in case of FLASH ROM.
 	  Currently supports I2C, CCI and SPI protocol
 
+config MSM_ISP_V1
+        bool "QTI MSM Image Signal Processing interface support"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for Image Signal Processing interface module.
+          This module acts as a crossbar between CSID and VFE. Output
+          of any CID of CSID can be routed to of pix or raw
+          data interface in VFE.
+
 config MSM_ISPIF
         bool "QTI MSM Image Signal Processing interface support"
         depends on MSMB_CAMERA
         ---help---
           Enable support for Image Signal Processing interface module.
           This module acts as a crossbar between CSID and VFE. Output
-          of any CID of CSID can be routed to of of pix or raw
+          of any CID of CSID can be routed to of pix or raw
           data interface in VFE.
 
 config MSM_ISPIF_V1
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
index 22f4891..d32a0f2 100644
--- a/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
@@ -20,8 +20,6 @@
 #define EMPTY_QSEECOM_HANDLE    NULL
 #define QSEECOM_SBUFF_SIZE      SZ_128K
 
-#define MSM_CAMERA_TZ_UTIL_VERBOSE
-
 #define MSM_CAMERA_TZ_BOOT_PROTECTED (false)
 
 /* Update version major number in case the HLOS-TA interface is changed*/
diff --git a/drivers/media/platform/msm/camera_v2/isp/Makefile b/drivers/media/platform/msm/camera_v2/isp/Makefile
index 621d81d..d36b1e2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/Makefile
+++ b/drivers/media/platform/msm/camera_v2/isp/Makefile
@@ -1,5 +1,10 @@
 ccflags-y += -Idrivers/media/platform/msm/camera_v2
 ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
 ccflags-y += -Idrivers/media/platform/msm/camera_v2/common/
+ifeq ($(CONFIG_MSM_ISP_V1),y)
+obj-$(CONFIG_MSMB_CAMERA) += msm_isp_32.o msm_buf_mgr.o msm_isp_util_32.o msm_isp_axi_util_32.o msm_isp_stats_util_32.o
+obj-$(CONFIG_MSMB_CAMERA) += msm_isp32.o
+else
 obj-$(CONFIG_MSMB_CAMERA) += msm_buf_mgr.o msm_isp_util.o msm_isp_axi_util.o msm_isp_stats_util.o
 obj-$(CONFIG_MSMB_CAMERA) += msm_isp48.o msm_isp47.o msm_isp46.o msm_isp44.o msm_isp40.o msm_isp.o
+endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index 691b492..6196a8c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -189,7 +189,9 @@
 	int i, rc = -1;
 	int ret;
 	struct msm_isp_buffer_mapped_info *mapped_info;
+#ifndef CONFIG_MSM_ISP_V1
 	uint32_t accu_length = 0;
+#endif
 	struct msm_isp_bufq *bufq = NULL;
 
 	bufq = msm_isp_get_bufq(buf_mgr, buf_info->bufq_handle);
@@ -228,8 +230,12 @@
 			goto get_phy_err;
 		}
 
+#ifdef CONFIG_MSM_ISP_V1
+		mapped_info->paddr += qbuf_buf->planes[i].offset;
+#else
 		mapped_info->paddr += accu_length;
 		accu_length += qbuf_buf->planes[i].length;
+#endif
 
 		CDBG("%s: plane: %d addr:%pK\n",
 			__func__, i, (void *)mapped_info->paddr);
@@ -732,10 +738,17 @@
 	spin_lock_irqsave(&bufq->bufq_lock, flags);
 
 	buf_info->frame_id = frame_id;
+#ifdef CONFIG_MSM_ISP_V1
+	if (buf_info->state == MSM_ISP_BUFFER_STATE_DEQUEUED) {
+		buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
+		buf_info->tv = tv;
+	}
+#else
 	if (BUF_SRC(bufq->stream_id) == MSM_ISP_BUFFER_SRC_NATIVE) {
 		buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
 		buf_info->tv = tv;
 	}
+#endif
 	spin_unlock_irqrestore(&bufq->bufq_lock, flags);
 	return 0;
 }
@@ -1077,7 +1090,6 @@
 	}
 }
 
-
 /**
  * msm_isp_buf_put_scratch() - Release scratch buffers
  * @buf_mgr: The buffer structure for h/w
@@ -1220,7 +1232,6 @@
 	return rc;
 }
 
-
 static int msm_isp_init_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
 	const char *ctx_name)
 {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 6da1360..a95917c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -12,21 +12,15 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
-
 #include "msm_isp32.h"
-#include "msm_isp_util.h"
-#include "msm_isp_axi_util.h"
-#include "msm_isp_stats_util.h"
-#include "msm_isp.h"
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+#include "msm_isp_stats_util_32.h"
+#include "msm_isp_32.h"
 #include "msm.h"
 #include "msm_camera_io_util.h"
 
-static const struct platform_device_id msm_vfe32_dev_id[] = {
-	{"msm_vfe32", (kernel_ulong_t) &vfe32_hw_info},
-	{}
-};
-
-#define VFE32_BURST_LEN 2
+#define VFE32_BURST_LEN 3
 #define VFE32_UB_SIZE 1024
 #define VFE32_UB_SIZE_32KB 2048
 #define VFE32_EQUAL_SLICE_UB 194
@@ -36,7 +30,7 @@
 #define VFE32_XBAR_BASE(idx) (0x40 + 0x4 * (idx / 4))
 #define VFE32_XBAR_SHIFT(idx) ((idx % 4) * 8)
 #define VFE32_PING_PONG_BASE(wm, ping_pong) \
-	(VFE32_WM_BASE(wm) + 0x4 * (1 + ((~ping_pong) & 0x1)))
+	(VFE32_WM_BASE(wm) + 0x4 * (1 + (~(ping_pong >> wm) & 0x1)))
 
 static uint8_t stats_pingpong_offset_map[] = {
 	7, 8, 9, 10, 11, 12, 13};
@@ -60,16 +54,6 @@
 	{"csi_vfe_clk", -1},
 };
 
-static uint32_t msm_vfe32_ub_reg_offset(struct vfe_device *vfe_dev, int idx)
-{
-	return (VFE32_WM_BASE(idx) + 0x10);
-}
-
-static uint32_t msm_vfe32_get_ub_size(struct vfe_device *vfe_dev)
-{
-	return MSM_ISP32_TOTAL_WM_UB;
-}
-
 static int32_t msm_vfe32_init_qos_parms(struct vfe_device *vfe_dev,
 				struct msm_vfe_hw_init_parms *qos_parms,
 				struct msm_vfe_hw_init_parms *ds_parms)
@@ -284,8 +268,6 @@
 		pr_err("%s: vfe ioremap failed\n", __func__);
 		goto vfe_remap_failed;
 	}
-	vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
-		vfe_dev->vfe_base;
 
 	vfe_dev->vfe_vbif_base = ioremap(vfe_dev->vfe_vbif_mem->start,
 		resource_size(vfe_dev->vfe_vbif_mem));
@@ -330,14 +312,12 @@
 
 static void msm_vfe32_release_hardware(struct vfe_device *vfe_dev)
 {
-	msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x1C);
-	msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x20);
-	disable_irq(vfe_dev->vfe_irq->start);
 	free_irq(vfe_dev->vfe_irq->start, vfe_dev);
 	tasklet_kill(&vfe_dev->vfe_tasklet);
-	msm_isp_flush_tasklet(vfe_dev);
 	iounmap(vfe_dev->vfe_vbif_base);
 	vfe_dev->vfe_vbif_base = NULL;
+	iounmap(vfe_dev->vfe_base);
+	vfe_dev->vfe_base = NULL;
 	if (vfe_dev->vfe_clk_idx == 1)
 		msm_cam_clk_enable(&vfe_dev->pdev->dev,
 				msm_vfe32_1_clk_info, vfe_dev->vfe_clk,
@@ -346,9 +326,6 @@
 		msm_cam_clk_enable(&vfe_dev->pdev->dev,
 				msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
 				ARRAY_SIZE(msm_vfe32_2_clk_info), 0);
-	vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
-	iounmap(vfe_dev->vfe_base);
-	vfe_dev->vfe_base = NULL;
 	kfree(vfe_dev->vfe_clk);
 	regulator_disable(vfe_dev->fs_vfe);
 	msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
@@ -369,7 +346,6 @@
 	ds_parms.entries = "ds-entries";
 	ds_parms.regs = "ds-regs";
 	ds_parms.settings = "ds-settings";
-
 	msm_vfe32_init_qos_parms(vfe_dev, &qos_parms, &ds_parms);
 	msm_vfe32_init_vbif_parms(vfe_dev, &vbif_parms);
 
@@ -381,6 +357,11 @@
 	msm_camera_io_w_mb(0x1CFFFFFF, vfe_dev->vfe_base + 0x20);
 	msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
 	msm_camera_io_w_mb(0x1FFFFFFF, vfe_dev->vfe_base + 0x28);
+	msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x6FC);
+	msm_camera_io_w(0x10000000, vfe_dev->vfe_base + VFE32_RDI_BASE(1));
+	msm_camera_io_w(0x10000000, vfe_dev->vfe_base + VFE32_RDI_BASE(2));
+	msm_camera_io_w(0x0, vfe_dev->vfe_base + VFE32_XBAR_BASE(0));
+	msm_camera_io_w(0x0, vfe_dev->vfe_base + VFE32_XBAR_BASE(4));
 
 }
 
@@ -396,13 +377,30 @@
 static void msm_vfe32_process_reset_irq(struct vfe_device *vfe_dev,
 	uint32_t irq_status0, uint32_t irq_status1)
 {
-	if (irq_status1 & BIT(23))
+	if (irq_status1 & BIT(23)) {
+		if (vfe_dev->vfe_reset_timeout_processed == 1) {
+			pr_err("%s:vfe reset was processed.\n", __func__);
+			return;
+		}
 		complete(&vfe_dev->reset_complete);
+	}
 }
 
 static void msm_vfe32_process_halt_irq(struct vfe_device *vfe_dev,
 	uint32_t irq_status0, uint32_t irq_status1)
 {
+	if (irq_status1 & (1 << 24))
+		msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x1D8);
+}
+
+static void msm_vfe32_process_epoch_irq(struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1,
+	struct msm_isp_timestamp *ts)
+{
+	if (!(irq_status0 & 0x18))
+		return;
+	if (irq_status0 & (1 << 3))
+		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
 }
 
 static void msm_vfe32_process_camif_irq(struct vfe_device *vfe_dev,
@@ -416,10 +414,12 @@
 		ISP_DBG("%s: SOF IRQ\n", __func__);
 		if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
 			&& vfe_dev->axi_data.src_info[VFE_PIX_0].
-			stream_count == 0) {
+			pix_stream_count == 0) {
 			msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
-			msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0, ts);
-			msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
+			if (vfe_dev->axi_data.stream_update)
+				msm_isp_axi_stream_update(vfe_dev,
+					(1 << VFE_PIX_0));
+			msm_isp_update_framedrop_reg(vfe_dev, (1 << VFE_PIX_0));
 		}
 	}
 }
@@ -485,7 +485,20 @@
 
 static void msm_vfe32_get_overflow_mask(uint32_t *overflow_mask)
 {
-	*overflow_mask = 0x0;
+	*overflow_mask = 0x003FFF7E;
+}
+
+static void msm_vfe32_get_rdi_wm_mask(struct vfe_device *vfe_dev,
+	uint32_t *rdi_wm_mask)
+{
+	*rdi_wm_mask = vfe_dev->axi_data.rdi_wm_mask;
+}
+
+static void msm_vfe32_get_irq_mask(struct vfe_device *vfe_dev,
+	uint32_t *irq0_mask, uint32_t *irq1_mask)
+{
+	*irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+	*irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
 }
 
 static void msm_vfe32_process_error_status(struct vfe_device *vfe_dev)
@@ -571,7 +584,7 @@
 		pr_err("%s: axi error\n", __func__);
 }
 
-static void msm_vfe32_read_and_clear_irq_status(struct vfe_device *vfe_dev,
+static void msm_vfe32_read_irq_status(struct vfe_device *vfe_dev,
 	uint32_t *irq_status0, uint32_t *irq_status1)
 {
 	*irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
@@ -589,122 +602,127 @@
 			msm_camera_io_r(vfe_dev->vfe_base + 0x7B4);
 }
 
-static void msm_vfe32_read_irq_status(struct vfe_device *vfe_dev,
-	uint32_t *irq_status0, uint32_t irq_status1)
-{
-	*irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
-	*irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
-}
-
 static void msm_vfe32_process_reg_update(struct vfe_device *vfe_dev,
 	uint32_t irq_status0, uint32_t irq_status1,
 	struct msm_isp_timestamp *ts)
 {
-	uint32_t rdi_status;
-	enum msm_vfe_input_src i;
+	uint8_t input_src = 0x0;
 
 	if (!(irq_status0 & 0x20) && !(irq_status1 & 0x1C000000))
 		return;
 
 	if (irq_status0 & BIT(5)) {
-		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
-		vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
-			VFE_PIX_0);
-		if (vfe_dev->axi_data.stream_update[VFE_PIX_0]) {
-			rdi_status = msm_camera_io_r(vfe_dev->vfe_base +
-				VFE32_XBAR_BASE(0));
-			rdi_status |= msm_camera_io_r(vfe_dev->vfe_base +
-				VFE32_XBAR_BASE(4));
-
-			if ((rdi_status & BIT(7)) && (!(irq_status0 & 0x20)))
-				return;
-		}
-		msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
-				MSM_ISP_COMP_IRQ_REG_UPD);
+		msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE, VFE_PIX_0, ts);
+		input_src |= (1 << VFE_PIX_0);
+	}
+	if (irq_status1 & BIT(26)) {
+		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_RAW_0, ts);
+		input_src |= (1 << VFE_RAW_0);
+	}
+	if (irq_status1 & BIT(27)) {
+		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_RAW_1, ts);
+		input_src |= (1 << VFE_RAW_1);
+	}
+	if (irq_status1 & BIT(28)) {
+		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_RAW_2, ts);
+		input_src |= (1 << VFE_RAW_2);
 	}
 
-	for (i = VFE_RAW_0; i <= VFE_RAW_2; i++) {
-		if (irq_status1 & BIT(26 + (i - VFE_RAW_0))) {
-			msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
-			msm_isp_axi_stream_update(vfe_dev, i, ts);
-			msm_isp_update_framedrop_reg(vfe_dev, i);
-
-			vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
-				i);
+	if (vfe_dev->axi_data.stream_update)
+		msm_isp_axi_stream_update(vfe_dev, input_src);
+	if (atomic_read(&vfe_dev->stats_data.stats_update))
+		msm_isp_stats_stream_update(vfe_dev);
+	if (vfe_dev->axi_data.stream_update ||
+		atomic_read(&vfe_dev->stats_data.stats_update)) {
+		if (input_src & (1 << VFE_PIX_0)) {
+			vfe_dev->hw_info->vfe_ops.core_ops.
+				reg_update(vfe_dev, (1 << VFE_PIX_0));
 		}
 	}
-
+	msm_isp_update_framedrop_reg(vfe_dev, input_src);
+	msm_isp_update_stats_framedrop_reg(vfe_dev);
 	msm_isp_update_error_frame_count(vfe_dev);
-
-}
-
-static void msm_vfe32_process_epoch_irq(struct vfe_device *vfe_dev,
-	uint32_t irq_status0, uint32_t irq_status1,
-	struct msm_isp_timestamp *ts)
-{
-	/* Not supported */
-}
-
-static void msm_vfe32_reg_update(struct vfe_device *vfe_dev,
-	enum msm_vfe_input_src frame_src)
-{
-	if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) {
-		msm_camera_io_w_mb(0xF,
-			vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]
-			+ 0x260);
-		msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x260);
-	} else if (!vfe_dev->is_split) {
-		msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x260);
+	if ((input_src & (1 << VFE_RAW_0)) ||
+		(input_src & (1 << VFE_RAW_1)) ||
+		(input_src & (1 << VFE_RAW_2))) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+		reg_update(vfe_dev, input_src);
 	}
+
+	return;
+}
+
+static void msm_vfe32_reg_update(
+	struct vfe_device *vfe_dev, uint32_t input_src)
+{
+	msm_camera_io_w_mb(input_src, vfe_dev->vfe_base + 0x260);
 }
 
 static long msm_vfe32_reset_hardware(struct vfe_device *vfe_dev,
 	uint32_t first_start, uint32_t blocking)
 {
-	init_completion(&vfe_dev->reset_complete);
-	msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
-	return wait_for_completion_timeout(
-	   &vfe_dev->reset_complete, msecs_to_jiffies(50));
+	long rc = 0;
+	uint32_t irq_status1;
+
+	if (blocking) {
+		init_completion(&vfe_dev->reset_complete);
+		msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
+		vfe_dev->vfe_reset_timeout_processed = 0;
+		rc = wait_for_completion_timeout(
+			&vfe_dev->reset_complete, msecs_to_jiffies(500));
+	} else {
+		msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
+	}
+
+	if (blocking && rc <= 0) {
+		/*read ISP status register*/
+		irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
+		pr_err("%s: handling vfe reset time out error. irq_status1 0x%x\n",
+			__func__, irq_status1);
+		if (irq_status1 & BIT(23)) {
+			pr_err("%s: vfe reset has done actually\n", __func__);
+			vfe_dev->vfe_reset_timeout_processed = 1;
+			return 1;
+		}
+	}
+	return rc;
 }
 
 static void msm_vfe32_axi_reload_wm(
-	struct vfe_device *vfe_dev, void __iomem *vfe_base,
-	uint32_t reload_mask)
+	struct vfe_device *vfe_dev, uint32_t reload_mask)
 {
 	if (!vfe_dev->pdev->dev.of_node) {
 		/*vfe32 A-family: 8960*/
-		msm_camera_io_w_mb(reload_mask, vfe_base + 0x38);
+		msm_camera_io_w_mb(reload_mask, vfe_dev->vfe_base + 0x38);
 	} else {
 		/*vfe32 B-family: 8610*/
-		msm_camera_io_w(0x0, vfe_base + 0x24);
-		msm_camera_io_w(0x0, vfe_base + 0x28);
-		msm_camera_io_w(0x0, vfe_base + 0x20);
-		msm_camera_io_w_mb(0x1, vfe_base + 0x18);
-		msm_camera_io_w(0x9AAAAAAA, vfe_base + 0x600);
-		msm_camera_io_w(reload_mask, vfe_base + 0x38);
+		msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x28);
+		msm_camera_io_w(0x1C800000, vfe_dev->vfe_base + 0x20);
+		msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x18);
+		msm_camera_io_w(0x9AAAAAAA, vfe_dev->vfe_base + 0x600);
+		msm_camera_io_w(reload_mask, vfe_dev->vfe_base + 0x38);
 	}
 }
 
-static void msm_vfe32_axi_enable_wm(void __iomem *vfe_base,
+static void msm_vfe32_axi_enable_wm(struct vfe_device *vfe_dev,
 	uint8_t wm_idx, uint8_t enable)
 {
 	uint32_t val = msm_camera_io_r(
-	   vfe_base + VFE32_WM_BASE(wm_idx));
+	   vfe_dev->vfe_base + VFE32_WM_BASE(wm_idx));
 	if (enable)
 		val |= 0x1;
 	else
 		val &= ~0x1;
 	msm_camera_io_w_mb(val,
-		vfe_base + VFE32_WM_BASE(wm_idx));
+		vfe_dev->vfe_base + VFE32_WM_BASE(wm_idx));
 }
 
 static void msm_vfe32_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
 	struct msm_vfe_axi_stream *stream_info)
 {
 	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
 	uint32_t comp_mask, comp_mask_index =
-		stream_info->comp_mask_index[vfe_idx];
+		stream_info->comp_mask_index;
 	uint32_t irq_mask;
 
 	comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
@@ -721,9 +739,7 @@
 static void msm_vfe32_axi_clear_comp_mask(struct vfe_device *vfe_dev,
 	struct msm_vfe_axi_stream *stream_info)
 {
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
-	uint32_t comp_mask, comp_mask_index =
-			stream_info->comp_mask_index[vfe_idx];
+	uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
 	uint32_t irq_mask;
 
 	comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
@@ -739,10 +755,9 @@
 	struct msm_vfe_axi_stream *stream_info)
 {
 	uint32_t irq_mask;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
 
 	irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
-	irq_mask |= BIT(stream_info->wm[vfe_idx][0] + 6);
+	irq_mask |= BIT(stream_info->wm[0] + 6);
 	msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
 }
 
@@ -750,31 +765,40 @@
 	struct msm_vfe_axi_stream *stream_info)
 {
 	uint32_t irq_mask;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
 
 	irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
-	irq_mask &= ~BIT(stream_info->wm[vfe_idx][0] + 6);
+	irq_mask &= ~BIT(stream_info->wm[0] + 6);
 	msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
 }
 
 static void msm_vfe32_cfg_framedrop(struct vfe_device *vfe_dev,
-	struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
-	uint32_t framedrop_period)
+	struct msm_vfe_axi_stream *stream_info)
 {
-	void __iomem *vfe_base = vfe_dev->vfe_base;
+	uint32_t framedrop_pattern = 0, framedrop_period = 0;
+
+	if (stream_info->runtime_init_frame_drop == 0) {
+		framedrop_pattern = stream_info->framedrop_pattern;
+		framedrop_period = stream_info->framedrop_period;
+	}
+
+	if (stream_info->stream_type == BURST_STREAM &&
+		stream_info->runtime_burst_frame_count == 0) {
+		framedrop_pattern = 0;
+		framedrop_period = 0;
+	}
 
 	if (stream_info->stream_src == PIX_ENCODER) {
-		msm_camera_io_w(framedrop_period - 1, vfe_base + 0x504);
-		msm_camera_io_w(framedrop_period - 1, vfe_base + 0x508);
-		msm_camera_io_w(framedrop_pattern, vfe_base + 0x50C);
-		msm_camera_io_w(framedrop_pattern, vfe_base + 0x510);
+		msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x504);
+		msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x508);
+		msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x50C);
+		msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x510);
 	} else if (stream_info->stream_src == PIX_VIEWFINDER) {
-		msm_camera_io_w(framedrop_period - 1, vfe_base + 0x514);
-		msm_camera_io_w(framedrop_period - 1, vfe_base + 0x518);
-		msm_camera_io_w(framedrop_pattern, vfe_base + 0x51C);
-		msm_camera_io_w(framedrop_pattern, vfe_base + 0x520);
+		msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x514);
+		msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x518);
+		msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x51C);
+		msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x520);
 	}
-	msm_camera_io_w_mb(0x1, vfe_base + 0x260);
+	msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x260);
 }
 
 static void msm_vfe32_clear_framedrop(struct vfe_device *vfe_dev,
@@ -877,6 +901,7 @@
 	struct msm_vfe_pix_cfg *pix_cfg)
 {
 	pr_err("%s: Fetch engine not supported\n", __func__);
+	return;
 }
 
 static void msm_vfe32_cfg_camif(struct vfe_device *vfe_dev,
@@ -924,6 +949,7 @@
 		pr_err("%s: Unsupported input mux %d\n",
 			__func__, pix_cfg->input_mux);
 	}
+	return;
 }
 
 static void msm_vfe32_update_camif_state(
@@ -938,19 +964,20 @@
 
 	if (update_state == ENABLE_CAMIF) {
 		val = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
-		val |= 0x1;
+		val |= 0x19;
 		msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x1C);
-
+		msm_camera_io_w_mb(0xA, vfe_dev->vfe_base + 0x200);
 		val = msm_camera_io_r(vfe_dev->vfe_base + 0x1E4);
 		bus_en =
 		((vfe_dev->axi_data.src_info[
 			VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
 		vfe_en =
 		((vfe_dev->axi_data.src_info[
-			VFE_PIX_0].stream_count > 0) ? 1 : 0);
+			VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
 		val &= 0xFFFFFF3F;
 		val = val | bus_en << 7 | vfe_en << 6;
 		msm_camera_io_w(val, vfe_dev->vfe_base + 0x1E4);
+		msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x1E0);
 		msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1E0);
 		vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
 	} else if (update_state == DISABLE_CAMIF) {
@@ -990,17 +1017,16 @@
 	uint8_t plane_idx)
 {
 	uint32_t val;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
-	uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
+	uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
 
 	if (!stream_info->frame_based) {
 		/*WR_IMAGE_SIZE*/
 		val =
 			((msm_isp_cal_word_per_line(
 			stream_info->output_format,
-			stream_info->plane_cfg[vfe_idx][plane_idx].
+			stream_info->plane_cfg[plane_idx].
 			output_width)+1)/2 - 1) << 16 |
-			(stream_info->plane_cfg[vfe_idx][plane_idx].
+			(stream_info->plane_cfg[plane_idx].
 			output_height - 1);
 		msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
 
@@ -1008,9 +1034,9 @@
 		val =
 			msm_isp_cal_word_per_line(
 			stream_info->output_format,
-			stream_info->plane_cfg[vfe_idx][plane_idx].
+			stream_info->plane_cfg[plane_idx].
 			output_stride) << 16 |
-			(stream_info->plane_cfg[vfe_idx][plane_idx].
+			(stream_info->plane_cfg[plane_idx].
 			output_height - 1) << 4 | VFE32_BURST_LEN;
 		msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
 	} else {
@@ -1018,12 +1044,13 @@
 		val =
 			msm_isp_cal_word_per_line(
 			stream_info->output_format,
-			stream_info->plane_cfg[vfe_idx][plane_idx].
+			stream_info->plane_cfg[plane_idx].
 			output_width) << 16 |
-			(stream_info->plane_cfg[vfe_idx][plane_idx].
+			(stream_info->plane_cfg[plane_idx].
 			output_height - 1) << 4 | VFE32_BURST_LEN;
 		msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
 	}
+	return;
 }
 
 static void msm_vfe32_axi_clear_wm_reg(
@@ -1031,22 +1058,24 @@
 	struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
 {
 	uint32_t val = 0;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
-	uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
+	uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
+
+	/* FRAME BASED */
+	msm_camera_io_w(val, vfe_dev->vfe_base + wm_base);
 	/*WR_IMAGE_SIZE*/
 	msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
 	/*WR_BUFFER_CFG*/
 	msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+	return;
 }
 
 static void msm_vfe32_axi_cfg_wm_xbar_reg(
 	struct vfe_device *vfe_dev,
 	struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
 {
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
 	struct msm_vfe_axi_plane_cfg *plane_cfg =
-		&stream_info->plane_cfg[vfe_idx][plane_idx];
-	uint8_t wm = stream_info->wm[vfe_idx][plane_idx];
+		&stream_info->plane_cfg[plane_idx];
+	uint8_t wm = stream_info->wm[plane_idx];
 	uint32_t xbar_cfg = 0;
 	uint32_t xbar_reg_cfg = 0;
 
@@ -1093,14 +1122,14 @@
 	xbar_reg_cfg &= ~(0xFF << VFE32_XBAR_SHIFT(wm));
 	xbar_reg_cfg |= (xbar_cfg << VFE32_XBAR_SHIFT(wm));
 	msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
+	return;
 }
 
 static void msm_vfe32_axi_clear_wm_xbar_reg(
 	struct vfe_device *vfe_dev,
 	struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
 {
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
-	uint8_t wm = stream_info->wm[vfe_idx][plane_idx];
+	uint8_t wm = stream_info->wm[plane_idx];
 	uint32_t xbar_reg_cfg = 0;
 
 	xbar_reg_cfg = msm_camera_io_r(vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
@@ -1108,21 +1137,95 @@
 	msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
 }
 
-static void msm_vfe32_update_ping_pong_addr(void __iomem *vfe_base,
-	uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
-	int32_t buf_size)
+static void msm_vfe32_cfg_axi_ub_equal_default(struct vfe_device *vfe_dev)
+{
+	int i;
+	uint32_t ub_offset = 0;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint32_t total_image_size = 0;
+	uint32_t num_used_wms = 0;
+	uint32_t prop_size = 0;
+	uint32_t wm_ub_size;
+	uint64_t delta;
+
+	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+		if (axi_data->free_wm[i] > 0) {
+			num_used_wms++;
+			total_image_size += axi_data->wm_image_size[i];
+		}
+	}
+	prop_size = MSM_ISP32_TOTAL_WM_UB -
+		axi_data->hw_info->min_wm_ub * num_used_wms;
+	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+		if (axi_data->free_wm[i]) {
+			delta =
+				(uint64_t)(axi_data->wm_image_size[i] *
+					prop_size);
+			do_div(delta, total_image_size);
+			wm_ub_size = axi_data->hw_info->min_wm_ub +
+				(uint32_t)delta;
+			msm_camera_io_w(ub_offset << 16 |
+				(wm_ub_size - 1), vfe_dev->vfe_base +
+					VFE32_WM_BASE(i) + 0xC);
+			ub_offset += wm_ub_size;
+		} else {
+			msm_camera_io_w(0,
+				vfe_dev->vfe_base + VFE32_WM_BASE(i) + 0xC);
+		}
+	}
+}
+
+static void msm_vfe32_cfg_axi_ub_equal_slicing(struct vfe_device *vfe_dev)
+{
+	int i;
+	uint32_t ub_offset = 0;
+	uint32_t final_ub_slice_size;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+		if (ub_offset + VFE32_EQUAL_SLICE_UB > VFE32_AXI_SLICE_UB) {
+			final_ub_slice_size = VFE32_AXI_SLICE_UB - ub_offset;
+			msm_camera_io_w(ub_offset << 16 |
+				(final_ub_slice_size - 1), vfe_dev->vfe_base +
+				VFE32_WM_BASE(i) + 0xC);
+			ub_offset += final_ub_slice_size;
+		} else {
+			msm_camera_io_w(ub_offset << 16 |
+				(VFE32_EQUAL_SLICE_UB - 1), vfe_dev->vfe_base +
+				VFE32_WM_BASE(i) + 0xC);
+			ub_offset += VFE32_EQUAL_SLICE_UB;
+		}
+	}
+}
+
+static void msm_vfe32_cfg_axi_ub(struct vfe_device *vfe_dev)
+{
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
+	if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
+		msm_vfe32_cfg_axi_ub_equal_slicing(vfe_dev);
+	else
+		msm_vfe32_cfg_axi_ub_equal_default(vfe_dev);
+}
+
+static void msm_vfe32_update_ping_pong_addr(struct vfe_device *vfe_dev,
+		uint8_t wm_idx, uint32_t pingpong_status, dma_addr_t paddr)
 {
 	uint32_t paddr32 = (paddr & 0xFFFFFFFF);
 
-	msm_camera_io_w(paddr32, vfe_base +
-		VFE32_PING_PONG_BASE(wm_idx, pingpong_bit));
+	msm_camera_io_w(paddr32, vfe_dev->vfe_base +
+		VFE32_PING_PONG_BASE(wm_idx, pingpong_status));
 }
 
 static int msm_vfe32_axi_halt(struct vfe_device *vfe_dev, uint32_t blocking)
 {
-	uint32_t halt_mask;
 	uint32_t axi_busy_flag = true;
 
+	/* Keep only halt and restart mask */
+	msm_camera_io_w(0x01800000, vfe_dev->vfe_base + 0x20);
+	/*Clear IRQ Status */
+	msm_camera_io_w(0xFE7FFFFF, vfe_dev->vfe_base + 0x28);
 	msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);
 	while (axi_busy_flag) {
 		if (msm_camera_io_r(
@@ -1130,10 +1233,27 @@
 			axi_busy_flag = false;
 	}
 	msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x1D8);
-	halt_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
-	halt_mask &= 0xFEFFFFFF;
-	/* Disable AXI IRQ */
-	msm_camera_io_w_mb(halt_mask, vfe_dev->vfe_base + 0x20);
+	return 0;
+}
+
+static int msm_vfe32_axi_restart(struct vfe_device *vfe_dev,
+	uint32_t blocking, uint32_t enable_camif)
+{
+	vfe_dev->hw_info->vfe_ops.core_ops.restore_irq_mask(vfe_dev);
+
+	/*Clear IRQ Status */
+	msm_camera_io_w(0xFE7FFFFF, vfe_dev->vfe_base + 0x28);
+	msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);
+	msm_camera_io_w_mb(0xA, vfe_dev->vfe_base + 0x200);
+	/* Start AXI */
+	msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x1D8);
+	vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, 0xF);
+	memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+	atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+	if (enable_camif) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+		update_camif_state(vfe_dev, ENABLE_CAMIF);
+	}
 	return 0;
 }
 
@@ -1187,20 +1307,36 @@
 }
 
 static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
-	uint32_t stats_mask, uint8_t comp_idx, uint8_t enable)
+	uint32_t stats_mask, uint8_t enable)
 {
+	uint32_t i = 0;
+	atomic_t *stats_comp;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+	stats_mask = stats_mask & 0x7F;
+
+	for (i = 0;
+		i < vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask; i++) {
+		stats_comp = &stats_data->stats_comp_mask[i];
+		if (enable)
+			atomic_add(stats_mask, stats_comp);
+		else
+			atomic_sub(stats_mask, stats_comp);
+		ISP_DBG("%s: comp_mask: %x\n",
+			__func__, atomic_read(&stats_data->stats_comp_mask[i]));
+	}
+	return;
 }
 
 static void msm_vfe32_stats_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info)
 {
-	int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
-				stream_info);
 	uint32_t irq_mask;
 
 	irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
-	irq_mask |= BIT(STATS_IDX(stream_info->stream_handle[vfe_idx]) + 13);
+	irq_mask |= BIT(STATS_IDX(stream_info->stream_handle) + 13);
 	msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+	return;
 }
 
 static void msm_vfe32_stats_clear_wm_irq_mask(struct vfe_device *vfe_dev,
@@ -1211,18 +1347,21 @@
 	irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
 	irq_mask &= ~(BIT(STATS_IDX(stream_info->stream_handle) + 13));
 	msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+	return;
 }
 
 static void msm_vfe32_stats_cfg_wm_reg(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info)
 {
 	/*Nothing to configure for VFE3.x*/
+	return;
 }
 
 static void msm_vfe32_stats_clear_wm_reg(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info)
 {
 	/*Nothing to configure for VFE3.x*/
+	return;
 }
 
 static void msm_vfe32_stats_cfg_ub(struct vfe_device *vfe_dev)
@@ -1247,12 +1386,7 @@
 		msm_camera_io_w(ub_offset << 16 | (ub_size[i] - 1),
 			vfe_dev->vfe_base + VFE32_STATS_BASE(i) + 0x8);
 	}
-}
-
-static bool msm_vfe32_is_module_cfg_lock_needed(
-	uint32_t reg_offset)
-{
-	return false;
+	return;
 }
 
 static void msm_vfe32_stats_enable_module(struct vfe_device *vfe_dev,
@@ -1294,15 +1428,13 @@
 
 static void msm_vfe32_stats_update_ping_pong_addr(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
-	dma_addr_t paddr, uint32_t buf_sz)
+	dma_addr_t paddr)
 {
-	void __iomem *vfe_base = vfe_dev->vfe_base;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
-				stream_info);
 	uint32_t paddr32 = (paddr & 0xFFFFFFFF);
-	int stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
 
-	msm_camera_io_w(paddr32, vfe_base +
+	int stats_idx = STATS_IDX(stream_info->stream_handle);
+
+	msm_camera_io_w(paddr32, vfe_dev->vfe_base +
 		VFE32_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
 }
 
@@ -1361,28 +1493,6 @@
 		goto vfe_no_resource;
 	}
 
-	if (!vfe_dev->pdev->dev.of_node)
-		vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe_imgwr");
-	else
-		vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe");
-
-	if (!vfe_dev->iommu_ctx[0]) {
-		pr_err("%s: no iommux ctx resource?\n", __func__);
-		rc = -ENODEV;
-		goto vfe_no_resource;
-	}
-
-	if (!vfe_dev->pdev->dev.of_node)
-		vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe_misc");
-	else
-		vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe");
-
-	if (!vfe_dev->iommu_ctx[1]) {
-		pr_err("%s: no iommux ctx resource?\n", __func__);
-		rc = -ENODEV;
-		goto vfe_no_resource;
-	}
-
 vfe_no_resource:
 	return rc;
 }
@@ -1394,13 +1504,27 @@
 	*error_mask1 = 0x007FFFFF;
 }
 
-struct msm_vfe_axi_hardware_info msm_vfe32_axi_hw_info = {
-	.num_wm = 5,
+
+static void msm_vfe32_restore_irq_mask(struct vfe_device *vfe_dev)
+{
+	msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
+		vfe_dev->vfe_base + 0x1C);
+	msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
+		vfe_dev->vfe_base + 0x20);
+}
+
+static void msm_vfe32_get_halt_restart_mask(uint32_t *irq0_mask,
+	uint32_t *irq1_mask)
+{
+	*irq1_mask = 0x01800000;
+}
+
+static struct msm_vfe_axi_hardware_info msm_vfe32_axi_hw_info = {
+	.num_wm = 6,
 	.num_comp_mask = 3,
 	.num_rdi = 3,
 	.num_rdi_master = 3,
 	.min_wm_ub = 64,
-	.scratch_buf_range = SZ_32M,
 };
 
 static struct msm_vfe_stats_hardware_info msm_vfe32_stats_hw_info = {
@@ -1412,7 +1536,22 @@
 		1 << MSM_ISP_STATS_SKIN | 1 << MSM_ISP_STATS_BHIST,
 	.stats_ping_pong_offset = stats_pingpong_offset_map,
 	.num_stats_type = VFE32_NUM_STATS_TYPE,
-	.num_stats_comp_mask = 0,
+	.num_stats_comp_mask = 1,
+};
+
+static struct v4l2_subdev_core_ops msm_vfe32_subdev_core_ops = {
+	.ioctl = msm_isp_ioctl,
+	.subscribe_event = msm_isp_subscribe_event,
+	.unsubscribe_event = msm_isp_unsubscribe_event,
+};
+
+static struct v4l2_subdev_ops msm_vfe32_subdev_ops = {
+	.core = &msm_vfe32_subdev_core_ops,
+};
+
+static struct v4l2_subdev_internal_ops msm_vfe32_internal_ops = {
+	.open = msm_isp_open_node,
+	.close = msm_isp_close_node,
 };
 
 struct msm_vfe_hardware_info vfe32_hw_info = {
@@ -1421,16 +1560,14 @@
 	.vfe_clk_idx = VFE32_CLK_IDX,
 	.vfe_ops = {
 		.irq_ops = {
-			.read_and_clear_irq_status =
-				msm_vfe32_read_and_clear_irq_status,
 			.read_irq_status = msm_vfe32_read_irq_status,
 			.process_camif_irq = msm_vfe32_process_camif_irq,
 			.process_reset_irq = msm_vfe32_process_reset_irq,
 			.process_halt_irq = msm_vfe32_process_halt_irq,
 			.process_reg_update = msm_vfe32_process_reg_update,
+			.process_epoch_irq = msm_vfe32_process_epoch_irq,
 			.process_axi_irq = msm_isp_process_axi_irq,
 			.process_stats_irq = msm_isp_process_stats_irq,
-			.process_epoch_irq = msm_vfe32_process_epoch_irq,
 		},
 		.axi_ops = {
 			.reload_wm = msm_vfe32_axi_reload_wm,
@@ -1446,15 +1583,14 @@
 			.clear_wm_reg = msm_vfe32_axi_clear_wm_reg,
 			.cfg_wm_xbar_reg = msm_vfe32_axi_cfg_wm_xbar_reg,
 			.clear_wm_xbar_reg = msm_vfe32_axi_clear_wm_xbar_reg,
-			.cfg_ub = msm_vfe47_cfg_axi_ub,
+			.cfg_ub = msm_vfe32_cfg_axi_ub,
 			.update_ping_pong_addr =
 				msm_vfe32_update_ping_pong_addr,
 			.get_comp_mask = msm_vfe32_get_comp_mask,
 			.get_wm_mask = msm_vfe32_get_wm_mask,
 			.get_pingpong_status = msm_vfe32_get_pingpong_status,
 			.halt = msm_vfe32_axi_halt,
-			.ub_reg_offset = msm_vfe40_ub_reg_offset,
-			.get_ub_size = msm_vfe40_get_ub_size,
+			.restart = msm_vfe32_axi_restart,
 		},
 		.core_ops = {
 			.reg_update = msm_vfe32_reg_update,
@@ -1469,13 +1605,13 @@
 			.release_hw = msm_vfe32_release_hardware,
 			.get_platform_data = msm_vfe32_get_platform_data,
 			.get_error_mask = msm_vfe32_get_error_mask,
-			.process_error_status = msm_vfe32_process_error_status,
 			.get_overflow_mask = msm_vfe32_get_overflow_mask,
-			.is_module_cfg_lock_needed =
-				msm_vfe32_is_module_cfg_lock_needed,
-			.ahb_clk_cfg = NULL,
-			.set_bus_err_ign_mask = NULL,
-			.get_bus_err_mask = NULL,
+			.get_rdi_wm_mask = msm_vfe32_get_rdi_wm_mask,
+			.get_irq_mask = msm_vfe32_get_irq_mask,
+			.restore_irq_mask = msm_vfe32_restore_irq_mask,
+			.get_halt_restart_mask =
+				msm_vfe32_get_halt_restart_mask,
+			.process_error_status = msm_vfe32_process_error_status,
 		},
 		.stats_ops = {
 			.get_stats_idx = msm_vfe32_get_stats_idx,
@@ -1493,47 +1629,12 @@
 			.get_wm_mask = msm_vfe32_stats_get_wm_mask,
 			.get_frame_id = msm_vfe32_stats_get_frame_id,
 			.get_pingpong_status = msm_vfe32_get_pingpong_status,
-			.enable_stats_wm = NULL,
 		},
 	},
 	.dmi_reg_offset = 0x5A0,
 	.axi_hw_info = &msm_vfe32_axi_hw_info,
 	.stats_hw_info = &msm_vfe32_stats_hw_info,
+	.subdev_ops = &msm_vfe32_subdev_ops,
+	.subdev_internal_ops = &msm_vfe32_internal_ops,
 };
 EXPORT_SYMBOL(vfe32_hw_info);
-
-static const struct of_device_id msm_vfe32_dt_match[] = {
-	{
-		.compatible = "qcom,vfe32",
-		.data = &vfe32_hw_info,
-	},
-	{}
-};
-
-MODULE_DEVICE_TABLE(of, msm_vfe32_dt_match);
-
-static struct platform_driver vfe32_driver = {
-	.probe = vfe_hw_probe,
-	.driver = {
-		.name = "msm_vfe32",
-		.owner = THIS_MODULE,
-		.of_match_table = msm_vfe32_dt_match,
-	},
-	.id_table = msm_vfe32_dev_id,
-};
-
-static int __init msm_vfe32_init_module(void)
-{
-	return platform_driver_register(&vfe32_driver);
-}
-
-static void __exit msm_vfe32_exit_module(void)
-{
-	platform_driver_unregister(&vfe32_driver);
-}
-
-module_init(msm_vfe32_init_module);
-module_exit(msm_vfe32_exit_module);
-MODULE_DESCRIPTION("MSM VFE32 driver");
-MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 19b1aac..72ab3ba 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -889,8 +889,6 @@
 		msm_camera_io_w(temp | (framedrop_period - 1) << 2,
 		vfe_base + VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
 	}
-
-	msm_camera_io_w_mb(0x1, vfe_base + 0x378);
 }
 
 static void msm_vfe40_clear_framedrop(struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.c
new file mode 100644
index 0000000..eada5fa
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.c
@@ -0,0 +1,557 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/of_device.h>
+#include <linux/sched_clock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+
+#include "msm_isp_32.h"
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+#include "msm_isp_stats_util_32.h"
+#include "msm_sd.h"
+#include "msm_isp32.h"
+
+static struct msm_sd_req_vb2_q vfe_vb2_ops;
+
+static const struct of_device_id msm_vfe_dt_match[] = {
+	{
+		.compatible = "qcom,vfe32",
+		.data = &vfe32_hw_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe_dt_match);
+
+static const struct platform_device_id msm_vfe_dev_id[] = {
+	{"msm_vfe32", (kernel_ulong_t) &vfe32_hw_info},
+	{}
+};
+#define MAX_OVERFLOW_COUNTERS  29
+#define OVERFLOW_LENGTH 1024
+#define OVERFLOW_BUFFER_LENGTH 64
+static char stat_line[OVERFLOW_LENGTH];
+
+static struct msm_isp_buf_mgr vfe_buf_mgr;
+static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
+				  struct msm_isp_bw_req_info *isp_req_hist);
+static char *stats_str[MAX_OVERFLOW_COUNTERS] = {
+	"imgmaster0_overflow_cnt",
+	"imgmaster1_overflow_cnt",
+	"imgmaster2_overflow_cnt",
+	"imgmaster3_overflow_cnt",
+	"imgmaster4_overflow_cnt",
+	"imgmaster5_overflow_cnt",
+	"imgmaster6_overflow_cnt",
+	"be_overflow_cnt",
+	"bg_overflow_cnt",
+	"bf_overflow_cnt",
+	"awb_overflow_cnt",
+	"rs_overflow_cnt",
+	"cs_overflow_cnt",
+	"ihist_overflow_cnt",
+	"skinbhist_overflow_cnt",
+	"bfscale_overflow_cnt",
+	"ISP_VFE0_client_info.active",
+	"ISP_VFE0_client_info.ab",
+	"ISP_VFE0_client_info.ib",
+	"ISP_VFE1_client_info.active",
+	"ISP_VFE1_client_info.ab",
+	"ISP_VFE1_client_info.ib",
+	"ISP_CPP_client_info.active",
+	"ISP_CPP_client_info.ab",
+	"ISP_CPP_client_info.ib",
+	"ISP_last_overflow.ab",
+	"ISP_last_overflow.ib",
+	"ISP_VFE_CLK_RATE",
+	"ISP_CPP_CLK_RATE",
+};
+
+#define MAX_DEPTH_BW_REQ_HISTORY 25
+#define MAX_BW_HISTORY_BUFF_LEN  6144
+#define MAX_BW_HISTORY_LINE_BUFF_LEN 512
+
+#define MAX_UB_INFO_BUFF_LEN  1024
+#define MAX_UB_INFO_LINE_BUFF_LEN 256
+
+static struct msm_isp_bw_req_info
+		msm_isp_bw_request_history[MAX_DEPTH_BW_REQ_HISTORY];
+static int msm_isp_bw_request_history_idx;
+static char bw_request_history_buff[MAX_BW_HISTORY_BUFF_LEN];
+static char ub_info_buffer[MAX_UB_INFO_BUFF_LEN];
+static spinlock_t req_history_lock;
+static int vfe_debugfs_statistics_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t vfe_debugfs_statistics_read(struct file *t_file,
+	char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	uint64_t *ptr;
+	char buffer[OVERFLOW_BUFFER_LENGTH] = {0};
+	struct vfe_device *vfe_dev = (struct vfe_device *)
+		t_file->private_data;
+	struct msm_isp_statistics *stats = vfe_dev->stats;
+
+	memset(stat_line, 0, sizeof(stat_line));
+	msm_isp_util_get_bandwidth_stats(vfe_dev, stats);
+	ptr = (uint64_t *)(stats);
+	for (i = 0; i < MAX_OVERFLOW_COUNTERS; i++) {
+		strlcat(stat_line, stats_str[i], sizeof(stat_line));
+		strlcat(stat_line, "     ", sizeof(stat_line));
+		snprintf(buffer, sizeof(buffer), "%llu", ptr[i]);
+		strlcat(stat_line, buffer, sizeof(stat_line));
+		strlcat(stat_line, "\r\n", sizeof(stat_line));
+	}
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, stat_line, strlen(stat_line));
+}
+
+static ssize_t vfe_debugfs_statistics_write(struct file *t_file,
+	const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	struct vfe_device *vfe_dev = (struct vfe_device *)
+		t_file->private_data;
+	struct msm_isp_statistics *stats = vfe_dev->stats;
+
+	memset(stats, 0, sizeof(struct msm_isp_statistics));
+
+	return sizeof(struct msm_isp_statistics);
+}
+
+static int bw_history_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t bw_history_read(struct file *t_file, char __user *t_char,
+	size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	char *out_buffer = bw_request_history_buff;
+	char line_buffer[MAX_BW_HISTORY_LINE_BUFF_LEN] = {0};
+	struct msm_isp_bw_req_info *isp_req_hist =
+		(struct msm_isp_bw_req_info *) t_file->private_data;
+
+	memset(out_buffer, 0, MAX_BW_HISTORY_BUFF_LEN);
+
+	snprintf(line_buffer, sizeof(line_buffer),
+		"Bus bandwidth request history in chronological order:\n");
+	strlcat(out_buffer, line_buffer, sizeof(bw_request_history_buff));
+
+	snprintf(line_buffer, sizeof(line_buffer),
+		"MSM_ISP_MIN_AB = %u, MSM_ISP_MIN_IB = %u\n\n",
+		MSM_ISP_MIN_AB, MSM_ISP_MIN_IB);
+	strlcat(out_buffer, line_buffer, sizeof(bw_request_history_buff));
+
+	for (i = 0; i < MAX_DEPTH_BW_REQ_HISTORY; i++) {
+		snprintf(line_buffer, sizeof(line_buffer),
+		 "idx = %d, client = %u, timestamp = %llu, ab = %llu, ib = %llu\n"
+		 "ISP0.active = %x, ISP0.ab = %llu, ISP0.ib = %llu\n"
+		 "ISP1.active = %x, ISP1.ab = %llu, ISP1.ib = %llu\n"
+		 "CPP.active = %x, CPP.ab = %llu, CPP.ib = %llu\n\n",
+		 i, isp_req_hist[i].client, isp_req_hist[i].timestamp,
+		 isp_req_hist[i].total_ab, isp_req_hist[i].total_ib,
+		 isp_req_hist[i].client_info[0].active,
+		 isp_req_hist[i].client_info[0].ab,
+		 isp_req_hist[i].client_info[0].ib,
+		 isp_req_hist[i].client_info[1].active,
+		 isp_req_hist[i].client_info[1].ab,
+		 isp_req_hist[i].client_info[1].ib,
+		 isp_req_hist[i].client_info[2].active,
+		 isp_req_hist[i].client_info[2].ab,
+		 isp_req_hist[i].client_info[2].ib);
+		strlcat(out_buffer, line_buffer,
+		sizeof(bw_request_history_buff));
+	}
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t bw_history_write(struct file *t_file,
+	const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	struct msm_isp_bw_req_info *isp_req_hist =
+		(struct msm_isp_bw_req_info *) t_file->private_data;
+
+	memset(isp_req_hist, 0, sizeof(msm_isp_bw_request_history));
+	msm_isp_bw_request_history_idx = 0;
+	return sizeof(msm_isp_bw_request_history);
+}
+
+static int ub_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t ub_info_read(struct file *t_file, char __user *t_char,
+	size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	char *out_buffer = ub_info_buffer;
+	char line_buffer[MAX_UB_INFO_LINE_BUFF_LEN] = {0};
+	struct vfe_device *vfe_dev =
+		(struct vfe_device *) t_file->private_data;
+	struct msm_isp_ub_info *ub_info = vfe_dev->ub_info;
+
+	memset(out_buffer, 0, MAX_UB_INFO_LINE_BUFF_LEN);
+	snprintf(line_buffer, sizeof(line_buffer),
+		"wm_ub_policy_type = %d\n"
+		"num_wm = %d\n"
+		"wm_ub = %d\n",
+		ub_info->policy, ub_info->num_wm, ub_info->wm_ub);
+	strlcat(out_buffer, line_buffer,
+	    sizeof(ub_info_buffer));
+	for (i = 0; i < ub_info->num_wm; i++) {
+		snprintf(line_buffer, sizeof(line_buffer),
+			"data[%d] = 0x%x, addr[%d] = 0x%llx\n",
+			i, ub_info->data[i], i, ub_info->addr[i]);
+		strlcat(out_buffer, line_buffer,
+			sizeof(ub_info_buffer));
+	}
+
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t ub_info_write(struct file *t_file,
+	const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	struct vfe_device *vfe_dev =
+		(struct vfe_device *) t_file->private_data;
+	struct msm_isp_ub_info *ub_info = vfe_dev->ub_info;
+
+	memset(ub_info, 0, sizeof(struct msm_isp_ub_info));
+
+	return sizeof(struct msm_isp_ub_info);
+}
+
+static const struct file_operations vfe_debugfs_error = {
+	.open = vfe_debugfs_statistics_open,
+	.read = vfe_debugfs_statistics_read,
+	.write = vfe_debugfs_statistics_write,
+};
+
+static const struct file_operations bw_history_ops = {
+	.open = bw_history_open,
+	.read = bw_history_read,
+	.write = bw_history_write,
+};
+
+static const struct file_operations ub_info_ops = {
+	.open = ub_info_open,
+	.read = ub_info_read,
+	.write = ub_info_write,
+};
+
+static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
+				  struct msm_isp_bw_req_info *isp_req_hist)
+{
+	struct dentry *debugfs_base;
+	char dirname[32] = {0};
+
+	snprintf(dirname, sizeof(dirname), "msm_isp%d", vfe_dev->pdev->id);
+	debugfs_base = debugfs_create_dir(dirname, NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+	if (!debugfs_create_file("stats", 0644, debugfs_base,
+		vfe_dev, &vfe_debugfs_error))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("bw_req_history", 0644,
+		debugfs_base, isp_req_hist, &bw_history_ops))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("ub_info", 0644,
+		debugfs_base, vfe_dev, &ub_info_ops))
+		return -ENOMEM;
+
+	return 0;
+}
+
+void msm_isp_update_req_history(uint32_t client, uint64_t ab,
+				 uint64_t ib,
+				 struct msm_isp_bandwidth_info *client_info,
+				 unsigned long long ts)
+{
+	int i;
+
+	spin_lock(&req_history_lock);
+	msm_isp_bw_request_history[msm_isp_bw_request_history_idx].client =
+		client;
+	msm_isp_bw_request_history[msm_isp_bw_request_history_idx].timestamp =
+		ts;
+	msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ab =
+		ab;
+	msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ib =
+		ib;
+
+	for (i = 0; i < MAX_ISP_CLIENT; i++) {
+		msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+			client_info[i].active = client_info[i].active;
+		msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+			client_info[i].ab = client_info[i].ab;
+		msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+			client_info[i].ib = client_info[i].ib;
+	}
+
+	msm_isp_bw_request_history_idx = (msm_isp_bw_request_history_idx + 1)
+			 % MAX_DEPTH_BW_REQ_HISTORY;
+	spin_unlock(&req_history_lock);
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
+{
+	long rc;
+
+	if (is_compat_task()) {
+		struct msm_isp32_event_data32 *event_data32;
+		struct msm_isp32_event_data  *event_data;
+		struct v4l2_event isp_event;
+		struct v4l2_event *isp_event_user;
+
+		memset(&isp_event, 0, sizeof(isp_event));
+		rc = v4l2_event_dequeue(vfh, &isp_event,
+				file->f_flags & O_NONBLOCK);
+		if (rc)
+			return rc;
+		event_data = (struct msm_isp32_event_data *)
+				isp_event.u.data;
+		isp_event_user = (struct v4l2_event *)arg;
+		memcpy(isp_event_user, &isp_event,
+				sizeof(*isp_event_user));
+		event_data32 = (struct msm_isp32_event_data32 *)
+			isp_event_user->u.data;
+		memset(event_data32, 0,
+				sizeof(struct msm_isp32_event_data32));
+		event_data32->timestamp.tv_sec =
+				event_data->timestamp.tv_sec;
+		event_data32->timestamp.tv_usec =
+				event_data->timestamp.tv_usec;
+		event_data32->mono_timestamp.tv_sec =
+				event_data->mono_timestamp.tv_sec;
+		event_data32->mono_timestamp.tv_usec =
+				event_data->mono_timestamp.tv_usec;
+		event_data32->input_intf = event_data->input_intf;
+		event_data32->frame_id = event_data->frame_id;
+		memcpy(&(event_data32->u), &(event_data->u),
+					sizeof(event_data32->u));
+	} else {
+		rc = v4l2_event_dequeue(vfh, arg,
+				file->f_flags & O_NONBLOCK);
+	}
+	return rc;
+}
+#else
+static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
+{
+	return v4l2_event_dequeue(vfh, arg,
+			file->f_flags & O_NONBLOCK);
+}
+#endif
+
+static long msm_isp_subdev_do_ioctl(
+	struct file *file, unsigned int cmd, void *arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+	struct v4l2_fh *vfh = file->private_data;
+
+	switch (cmd) {
+	case VIDIOC_DQEVENT: {
+		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+			return -ENOIOCTLCMD;
+		return msm_isp_dqevent(file, vfh, arg);
+	}
+	break;
+	case VIDIOC_SUBSCRIBE_EVENT:
+		return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+	case VIDIOC_UNSUBSCRIBE_EVENT:
+		return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+	default:
+		return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+	}
+}
+
+static long msm_isp_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	return video_usercopy(file, cmd, arg, msm_isp_subdev_do_ioctl);
+}
+
+static struct v4l2_file_operations msm_isp_v4l2_subdev_fops = {
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = msm_isp_subdev_fops_ioctl,
+#endif
+	.unlocked_ioctl = msm_isp_subdev_fops_ioctl
+};
+
+static int vfe_probe(struct platform_device *pdev)
+{
+	struct vfe_device *vfe_dev;
+	/*struct msm_cam_subdev_info sd_info;*/
+	const struct of_device_id *match_dev;
+	int rc = 0;
+
+	vfe_dev = kzalloc(sizeof(struct vfe_device), GFP_KERNEL);
+	if (!vfe_dev) {
+		rc = -ENOMEM;
+		goto end;
+	}
+	vfe_dev->stats = kzalloc(sizeof(struct msm_isp_statistics), GFP_KERNEL);
+	if (!vfe_dev->stats) {
+		rc = -ENOMEM;
+		goto probe_fail1;
+	}
+
+	vfe_dev->ub_info = kzalloc(sizeof(struct msm_isp_ub_info), GFP_KERNEL);
+	if (!vfe_dev->ub_info) {
+		rc = -ENOMEM;
+		goto probe_fail2;
+	}
+	if (pdev->dev.of_node) {
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+		match_dev = of_match_device(msm_vfe_dt_match, &pdev->dev);
+		if (!match_dev) {
+			pr_err("%s: No vfe hardware info\n", __func__);
+			rc = -EINVAL;
+			goto probe_fail3;
+		}
+		vfe_dev->hw_info =
+			(struct msm_vfe_hardware_info *) match_dev->data;
+	} else {
+		vfe_dev->hw_info = (struct msm_vfe_hardware_info *)
+			platform_get_device_id(pdev)->driver_data;
+	}
+
+	if (!vfe_dev->hw_info) {
+		pr_err("%s: No vfe hardware info\n", __func__);
+		rc = -EINVAL;
+		goto probe_fail3;
+	}
+	ISP_DBG("%s: device id = %d\n", __func__, pdev->id);
+
+	vfe_dev->pdev = pdev;
+	rc = vfe_dev->hw_info->vfe_ops.core_ops.get_platform_data(vfe_dev);
+	if (rc < 0) {
+		pr_err("%s: failed to get platform resources\n", __func__);
+		rc = -ENOMEM;
+		goto probe_fail3;
+	}
+
+	INIT_LIST_HEAD(&vfe_dev->tasklet_q);
+	tasklet_init(&vfe_dev->vfe_tasklet,
+		msm_isp_do_tasklet, (unsigned long)vfe_dev);
+
+	v4l2_subdev_init(&vfe_dev->subdev.sd, vfe_dev->hw_info->subdev_ops);
+	vfe_dev->subdev.sd.internal_ops =
+		vfe_dev->hw_info->subdev_internal_ops;
+	snprintf(vfe_dev->subdev.sd.name,
+		ARRAY_SIZE(vfe_dev->subdev.sd.name),
+		"vfe");
+	vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+	v4l2_set_subdevdata(&vfe_dev->subdev.sd, vfe_dev);
+	platform_set_drvdata(pdev, &vfe_dev->subdev.sd);
+	mutex_init(&vfe_dev->realtime_mutex);
+	mutex_init(&vfe_dev->core_mutex);
+	spin_lock_init(&vfe_dev->tasklet_lock);
+	spin_lock_init(&vfe_dev->shared_data_lock);
+	spin_lock_init(&req_history_lock);
+	media_entity_pads_init(&vfe_dev->subdev.sd.entity, 0, NULL);
+	vfe_dev->subdev.sd.entity.function = MSM_CAMERA_SUBDEV_VFE;
+	vfe_dev->subdev.sd.entity.name = pdev->name;
+	vfe_dev->subdev.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x2;
+	rc = msm_sd_register(&vfe_dev->subdev);
+	if (rc != 0) {
+		pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+		goto probe_fail3;
+	}
+
+	msm_isp_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
+	msm_isp_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
+	msm_isp_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
+	msm_isp_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
+
+	vfe_dev->subdev.sd.devnode->fops = &msm_isp_v4l2_subdev_fops;
+
+	vfe_dev->buf_mgr = &vfe_buf_mgr;
+	v4l2_subdev_notify(&vfe_dev->subdev.sd,
+		MSM_SD_NOTIFY_REQ_CB, &vfe_vb2_ops);
+	rc = msm_isp_create_isp_buf_mgr(vfe_dev->buf_mgr,
+		&vfe_vb2_ops, &pdev->dev,
+		vfe_dev->hw_info->axi_hw_info->scratch_buf_range);
+	if (rc < 0) {
+		pr_err("%s: Unable to create buffer manager\n", __func__);
+		rc = -EINVAL;
+		goto probe_fail3;
+	}
+	msm_isp_enable_debugfs(vfe_dev, msm_isp_bw_request_history);
+
+	vfe_dev->buf_mgr->init_done = 1;
+	vfe_dev->vfe_open_cnt = 0;
+	return rc;
+
+probe_fail3:
+	kfree(vfe_dev->ub_info);
+probe_fail2:
+	kfree(vfe_dev->stats);
+probe_fail1:
+	kfree(vfe_dev);
+end:
+	return rc;
+}
+
+static struct platform_driver vfe_driver = {
+	.probe = vfe_probe,
+	.driver = {
+		.name = "msm_vfe",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_vfe_dt_match,
+	},
+	.id_table = msm_vfe_dev_id,
+};
+
+static int __init msm_vfe_init_module(void)
+{
+	return platform_driver_register(&vfe_driver);
+}
+
+static void __exit msm_vfe_exit_module(void)
+{
+	platform_driver_unregister(&vfe_driver);
+}
+
+module_init(msm_vfe_init_module);
+module_exit(msm_vfe_exit_module);
+MODULE_DESCRIPTION("MSM VFE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.h
new file mode 100644
index 0000000..cbe92fa
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.h
@@ -0,0 +1,599 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VFE_H__
+#define __MSM_VFE_H__
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_isp.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+
+#include "msm_buf_mgr.h"
+
+#define VFE40_8974V1_VERSION 0x10000018
+#define VFE40_8974V2_VERSION 0x1001001A
+#define VFE40_8974V3_VERSION 0x1001001B
+#define VFE40_8x26_VERSION 0x20000013
+#define VFE40_8x26V2_VERSION 0x20010014
+#define VFE40_8916_VERSION 0x10030000
+#define VFE40_8939_VERSION 0x10040000
+#define VFE32_8909_VERSION 0x30600
+
+#define MAX_IOMMU_CTX 2
+#define MAX_NUM_WM 7
+#define MAX_NUM_RDI 3
+#define MAX_NUM_RDI_MASTER 3
+#define MAX_NUM_COMPOSITE_MASK 4
+#define MAX_NUM_STATS_COMP_MASK 2
+#define MAX_INIT_FRAME_DROP 31
+#define ISP_Q2 (1 << 2)
+#define ISP_Q10 (1 << 10)
+
+#define VFE_PING_FLAG 0xFFFFFFFF
+#define VFE_PONG_FLAG 0x0
+
+#define VFE_MAX_CFG_TIMEOUT 3000
+#define VFE_CLK_INFO_MAX 16
+#define STATS_COMP_BIT_MASK 0xFF0000
+
+#define MSM_ISP_MIN_AB 11000000
+#define MSM_ISP_MIN_IB 11000000
+
+struct vfe_device;
+struct msm_vfe_axi_stream;
+struct msm_vfe_stats_stream;
+
+struct vfe_subscribe_info {
+	struct v4l2_fh *vfh;
+	uint32_t active;
+};
+
+enum msm_isp_pack_fmt {
+	QCOM,
+	MIPI,
+	DPCM6,
+	DPCM8,
+	PLAIN8,
+	PLAIN16,
+	MAX_ISP_PACK_FMT,
+};
+
+enum msm_isp_camif_update_state {
+	NO_UPDATE,
+	ENABLE_CAMIF,
+	DISABLE_CAMIF,
+	DISABLE_CAMIF_IMMEDIATELY
+};
+
+struct msm_isp_timestamp {
+	/*Monotonic clock for v4l2 buffer*/
+	struct timeval buf_time;
+	/*Monotonic clock for VT */
+	struct timeval vt_time;
+	/*Wall clock for userspace event*/
+	struct timeval event_time;
+};
+
+struct msm_vfe_irq_ops {
+	void (*read_irq_status)(struct vfe_device *vfe_dev,
+		uint32_t *irq_status0, uint32_t *irq_status1);
+	void (*process_reg_update)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1,
+		struct msm_isp_timestamp *ts);
+	void (*process_epoch_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1,
+		struct msm_isp_timestamp *ts);
+	void (*process_reset_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1);
+	void (*process_halt_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1);
+	void (*process_camif_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1,
+		struct msm_isp_timestamp *ts);
+	void (*process_axi_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1,
+		struct msm_isp_timestamp *ts);
+	void (*process_stats_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1,
+		struct msm_isp_timestamp *ts);
+};
+
+struct msm_vfe_axi_ops {
+	void (*reload_wm)(struct vfe_device *vfe_dev,
+		uint32_t reload_mask);
+	void (*enable_wm)(struct vfe_device *vfe_dev,
+		uint8_t wm_idx, uint8_t enable);
+	int32_t (*cfg_io_format)(struct vfe_device *vfe_dev,
+		enum msm_vfe_axi_stream_src stream_src,
+		uint32_t io_format);
+	void (*cfg_framedrop)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+	void (*clear_framedrop)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+	void (*cfg_comp_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+	void (*clear_comp_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+	void (*cfg_wm_irq_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+	void (*clear_wm_irq_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+
+	void (*cfg_wm_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info,
+		uint8_t plane_idx);
+	void (*clear_wm_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+
+	void (*cfg_wm_xbar_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info,
+		uint8_t plane_idx);
+	void (*clear_wm_xbar_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+
+	void (*cfg_ub)(struct vfe_device *vfe_dev);
+
+	void (*update_ping_pong_addr)(struct vfe_device *vfe_dev,
+		uint8_t wm_idx, uint32_t pingpong_status, dma_addr_t paddr);
+
+	uint32_t (*get_wm_mask)(uint32_t irq_status0, uint32_t irq_status1);
+	uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
+	uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
+	int (*halt)(struct vfe_device *vfe_dev, uint32_t blocking);
+	int (*restart)(struct vfe_device *vfe_dev, uint32_t blocking,
+		uint32_t enable_camif);
+	void (*update_cgc_override)(struct vfe_device *vfe_dev,
+		uint8_t wm_idx, uint8_t cgc_override);
+};
+
+struct msm_vfe_core_ops {
+	void (*reg_update)(struct vfe_device *vfe_dev, uint32_t input_src);
+	long (*reset_hw)(struct vfe_device *vfe_dev, uint32_t first_start,
+		uint32_t blocking_call);
+	int (*init_hw)(struct vfe_device *vfe_dev);
+	void (*init_hw_reg)(struct vfe_device *vfe_dev);
+	void (*clear_status_reg)(struct vfe_device *vfe_dev);
+	void (*release_hw)(struct vfe_device *vfe_dev);
+	void (*cfg_input_mux)(struct vfe_device *vfe_dev,
+		struct msm_vfe_pix_cfg *pix_cfg);
+	int (*start_fetch_eng)(struct vfe_device *vfe_dev,
+		void *arg);
+	void (*update_camif_state)(struct vfe_device *vfe_dev,
+		enum msm_isp_camif_update_state update_state);
+	void (*cfg_rdi_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_rdi_cfg *rdi_cfg,
+		enum msm_vfe_input_src input_src);
+	int (*get_platform_data)(struct vfe_device *vfe_dev);
+	void (*get_error_mask)(uint32_t *error_mask0, uint32_t *error_mask1);
+	void (*process_error_status)(struct vfe_device *vfe_dev);
+	void (*get_overflow_mask)(uint32_t *overflow_mask);
+	void (*get_irq_mask)(struct vfe_device *vfe_dev,
+		uint32_t *irq0_mask, uint32_t *irq1_mask);
+	void (*restore_irq_mask)(struct vfe_device *vfe_dev);
+	void (*get_halt_restart_mask)(uint32_t *irq0_mask,
+		uint32_t *irq1_mask);
+	void (*get_rdi_wm_mask)(struct vfe_device *vfe_dev,
+		uint32_t *rdi_wm_mask);
+};
+struct msm_vfe_stats_ops {
+	int (*get_stats_idx)(enum msm_isp_stats_type stats_type);
+	int (*check_streams)(struct msm_vfe_stats_stream *stream_info);
+	void (*cfg_framedrop)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+	void (*clear_framedrop)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+	void (*cfg_comp_mask)(struct vfe_device *vfe_dev,
+		uint32_t stats_mask, uint8_t enable);
+	void (*cfg_wm_irq_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+	void (*clear_wm_irq_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+
+	void (*cfg_wm_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+	void (*clear_wm_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+
+	void (*cfg_ub)(struct vfe_device *vfe_dev);
+
+	void (*enable_module)(struct vfe_device *vfe_dev,
+		uint32_t stats_mask, uint8_t enable);
+
+	void (*update_ping_pong_addr)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info,
+		uint32_t pingpong_status, dma_addr_t paddr);
+
+	uint32_t (*get_frame_id)(struct vfe_device *vfe_dev);
+	uint32_t (*get_wm_mask)(uint32_t irq_status0, uint32_t irq_status1);
+	uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
+	uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
+
+	void (*update_cgc_override)(struct vfe_device *vfe_dev,
+		uint32_t stats_mask, uint8_t enable);
+};
+
+struct msm_vfe_ops {
+	struct msm_vfe_irq_ops irq_ops;
+	struct msm_vfe_axi_ops axi_ops;
+	struct msm_vfe_core_ops core_ops;
+	struct msm_vfe_stats_ops stats_ops;
+};
+
+struct msm_vfe_hardware_info {
+	int num_iommu_ctx;
+	/* secure iommu ctx nums */
+	int num_iommu_secure_ctx;
+	int vfe_clk_idx;
+	struct msm_vfe_ops vfe_ops;
+	struct msm_vfe_axi_hardware_info *axi_hw_info;
+	struct msm_vfe_stats_hardware_info *stats_hw_info;
+	struct v4l2_subdev_internal_ops *subdev_internal_ops;
+	struct v4l2_subdev_ops *subdev_ops;
+	uint32_t dmi_reg_offset;
+};
+
+struct msm_vfe_axi_hardware_info {
+	uint8_t num_wm;
+	uint8_t num_rdi;
+	uint8_t num_rdi_master;
+	uint8_t num_comp_mask;
+	uint32_t min_wm_ub;
+	uint32_t scratch_buf_range;
+};
+
+enum msm_vfe_axi_state {
+	AVAILABLE,
+	INACTIVE,
+	ACTIVE,
+	PAUSED,
+	START_PENDING,
+	STOP_PENDING,
+	PAUSE_PENDING,
+	RESUME_PENDING,
+	STARTING,
+	STOPPING,
+	PAUSING,
+	RESUMING,
+};
+
+enum msm_vfe_axi_cfg_update_state {
+	NO_AXI_CFG_UPDATE,
+	APPLYING_UPDATE_RESUME,
+	UPDATE_REQUESTED,
+};
+
+#define VFE_NO_DROP	       0xFFFFFFFF
+#define VFE_DROP_EVERY_2FRAME  0x55555555
+#define VFE_DROP_EVERY_4FRAME  0x11111111
+#define VFE_DROP_EVERY_8FRAME  0x01010101
+#define VFE_DROP_EVERY_16FRAME 0x00010001
+#define VFE_DROP_EVERY_32FRAME 0x00000001
+
+enum msm_vfe_axi_stream_type {
+	CONTINUOUS_STREAM,
+	BURST_STREAM,
+};
+
+struct msm_vfe_axi_stream {
+	uint32_t frame_id;
+	enum msm_vfe_axi_state state;
+	enum msm_vfe_axi_stream_src stream_src;
+	uint8_t num_planes;
+	uint8_t wm[MAX_PLANES_PER_STREAM];
+	uint32_t output_format;/*Planar/RAW/Misc*/
+	struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+	uint8_t comp_mask_index;
+	struct msm_isp_buffer *buf[2];
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t bufq_handle;
+	uint32_t bufq_scratch_handle;
+	uint32_t controllable_output;
+	uint32_t stream_handle;
+	uint32_t request_frm_num;
+	uint8_t buf_divert;
+	enum msm_vfe_axi_stream_type stream_type;
+	uint32_t frame_based;
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+	uint32_t framedrop_period;
+	uint32_t framedrop_pattern;
+	uint32_t num_burst_capture;/*number of frame to capture*/
+	uint32_t init_frame_drop;
+	uint32_t burst_frame_count;/*number of sof before burst stop*/
+	uint8_t framedrop_update;
+	spinlock_t lock;
+
+	/*Bandwidth calculation info*/
+	uint32_t max_width;
+	/*Based on format plane size in Q2. e.g NV12 = 1.5*/
+	uint32_t format_factor;
+	uint32_t bandwidth;
+
+	/*Run time update variables*/
+	uint32_t runtime_init_frame_drop;
+	uint32_t runtime_burst_frame_count;/*number of sof before burst stop*/
+	uint32_t runtime_num_burst_capture;
+	uint8_t  runtime_framedrop_update;
+	uint8_t  runtime_framedrop_update_burst;
+	uint32_t runtime_output_format;
+	enum msm_stream_memory_input_t  memory_input;
+};
+
+struct msm_vfe_axi_composite_info {
+	uint32_t stream_handle;
+	uint32_t stream_composite_mask;
+};
+
+struct msm_vfe_src_info {
+	uint32_t frame_id;
+	uint8_t active;
+	uint8_t pix_stream_count;
+	uint8_t raw_stream_count;
+	enum msm_vfe_inputmux input_mux;
+	uint32_t width;
+	long pixel_clock;
+	uint32_t input_format;/*V4L2 pix format with bayer pattern*/
+	uint32_t last_updt_frm_id;
+};
+
+struct msm_vfe_fetch_engine_info {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t bufq_handle;
+	uint32_t buf_idx;
+	uint8_t is_busy;
+};
+
+enum msm_wm_ub_cfg_type {
+	MSM_WM_UB_CFG_DEFAULT,
+	MSM_WM_UB_EQUAL_SLICING,
+	MSM_WM_UB_CFG_MAX_NUM
+};
+
+struct msm_vfe_axi_shared_data {
+	struct msm_vfe_axi_hardware_info *hw_info;
+	struct msm_vfe_axi_stream stream_info[VFE_AXI_SRC_MAX];
+	uint32_t free_wm[MAX_NUM_WM];
+	uint32_t wm_image_size[MAX_NUM_WM];
+	enum msm_wm_ub_cfg_type wm_ub_cfg_policy;
+	uint8_t num_used_wm;
+	uint8_t num_active_stream;
+	uint8_t num_rdi_stream;
+	uint8_t num_pix_stream;
+	uint32_t rdi_wm_mask;
+	struct msm_vfe_axi_composite_info
+	composite_info[MAX_NUM_COMPOSITE_MASK];
+	uint8_t num_used_composite_mask;
+	uint32_t stream_update;
+	atomic_t axi_cfg_update;
+	enum msm_isp_camif_update_state pipeline_update;
+	struct msm_vfe_src_info src_info[VFE_SRC_MAX];
+	uint16_t stream_handle_cnt;
+	uint32_t event_mask;
+};
+
+struct msm_vfe_stats_hardware_info {
+	uint32_t stats_capability_mask;
+	uint8_t *stats_ping_pong_offset;
+	uint8_t num_stats_type;
+	uint8_t num_stats_comp_mask;
+};
+
+enum msm_vfe_stats_state {
+	STATS_AVAILABLE,
+	STATS_INACTIVE,
+	STATS_ACTIVE,
+	STATS_START_PENDING,
+	STATS_STOP_PENDING,
+	STATS_STARTING,
+	STATS_STOPPING,
+};
+
+struct msm_vfe_stats_stream {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t stream_handle;
+	uint32_t composite_flag;
+	enum msm_isp_stats_type stats_type;
+	enum msm_vfe_stats_state state;
+	uint32_t framedrop_pattern;
+	uint32_t framedrop_period;
+	uint32_t irq_subsample_pattern;
+	uint32_t init_stats_frame_drop;
+
+	uint32_t buffer_offset;
+	struct msm_isp_buffer *buf[2];
+	uint32_t bufq_handle;
+};
+
+struct msm_vfe_stats_shared_data {
+	struct msm_vfe_stats_stream stream_info[MSM_ISP_STATS_MAX];
+	uint8_t num_active_stream;
+	atomic_t stats_comp_mask[MAX_NUM_STATS_COMP_MASK];
+	uint32_t reg_mask;
+	uint16_t stream_handle_cnt;
+	atomic_t stats_update;
+};
+
+struct msm_vfe_tasklet_queue_cmd {
+	struct list_head list;
+	uint32_t vfeInterruptStatus0;
+	uint32_t vfeInterruptStatus1;
+	struct msm_isp_timestamp ts;
+	uint8_t cmd_used;
+};
+
+#define MSM_VFE_TASKLETQ_SIZE 200
+
+enum msm_vfe_overflow_state {
+	NO_OVERFLOW,
+	OVERFLOW_DETECTED,
+	HALT_REQUESTED,
+	RESTART_REQUESTED,
+};
+
+struct msm_vfe_error_info {
+	atomic_t overflow_state;
+	uint32_t overflow_recover_irq_mask0;
+	uint32_t overflow_recover_irq_mask1;
+	uint32_t error_mask0;
+	uint32_t error_mask1;
+	uint32_t violation_status;
+	uint32_t camif_status;
+	uint32_t stream_framedrop_count[MAX_NUM_STREAM];
+	uint32_t stats_framedrop_count[MSM_ISP_STATS_MAX];
+	uint32_t info_dump_frame_count;
+	uint32_t error_count;
+};
+
+struct msm_isp_statistics {
+	int64_t imagemaster0_overflow;
+	int64_t imagemaster1_overflow;
+	int64_t imagemaster2_overflow;
+	int64_t imagemaster3_overflow;
+	int64_t imagemaster4_overflow;
+	int64_t imagemaster5_overflow;
+	int64_t imagemaster6_overflow;
+	int64_t be_overflow;
+	int64_t bg_overflow;
+	int64_t bf_overflow;
+	int64_t awb_overflow;
+	int64_t rs_overflow;
+	int64_t cs_overflow;
+	int64_t ihist_overflow;
+	int64_t skinbhist_overflow;
+	int64_t bfscale_overflow;
+
+	int64_t isp_vfe0_active;
+	int64_t isp_vfe0_ab;
+	int64_t isp_vfe0_ib;
+
+	int64_t isp_vfe1_active;
+	int64_t isp_vfe1_ab;
+	int64_t isp_vfe1_ib;
+
+	int64_t isp_cpp_active;
+	int64_t isp_cpp_ab;
+	int64_t isp_cpp_ib;
+
+	int64_t last_overflow_ab;
+	int64_t last_overflow_ib;
+
+	int64_t vfe_clk_rate;
+	int64_t cpp_clk_rate;
+};
+
+enum msm_isp_hw_client {
+	ISP_VFE0,
+	ISP_VFE1,
+	ISP_CPP,
+	MAX_ISP_CLIENT,
+};
+
+struct msm_isp_bandwidth_info {
+	uint32_t active;
+	uint64_t ab;
+	uint64_t ib;
+};
+
+struct msm_isp_bw_req_info {
+	uint32_t client;
+	unsigned long long timestamp;
+	uint64_t total_ab;
+	uint64_t total_ib;
+	struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+};
+
+#define MSM_ISP_MAX_WM 7
+struct msm_isp_ub_info {
+	enum msm_wm_ub_cfg_type policy;
+	uint8_t num_wm;
+	uint32_t wm_ub;
+	uint32_t data[MSM_ISP_MAX_WM];
+	uint64_t addr[MSM_ISP_MAX_WM];
+};
+
+struct msm_vfe_hw_init_parms {
+	const char *entries;
+	const char *regs;
+	const char *settings;
+};
+
+struct vfe_device {
+	struct platform_device *pdev;
+	struct msm_sd_subdev subdev;
+	struct resource *vfe_irq;
+	struct resource *vfe_mem;
+	struct resource *vfe_vbif_mem;
+	struct resource *vfe_io;
+	struct resource *vfe_vbif_io;
+	void __iomem *vfe_base;
+	void __iomem *vfe_vbif_base;
+
+	struct device *iommu_ctx[MAX_IOMMU_CTX];
+	/*Add secure context banks*/
+	struct device *iommu_secure_ctx[MAX_IOMMU_CTX];
+
+	struct regulator *fs_vfe;
+	struct clk **vfe_clk;
+	uint32_t num_clk;
+
+	uint32_t bus_perf_client;
+
+	struct completion reset_complete;
+	struct completion halt_complete;
+	struct completion stream_config_complete;
+	struct completion stats_config_complete;
+	struct mutex realtime_mutex;
+	struct mutex core_mutex;
+
+	atomic_t irq_cnt;
+	uint8_t taskletq_idx;
+	spinlock_t  tasklet_lock;
+	spinlock_t  shared_data_lock;
+	struct list_head tasklet_q;
+	struct tasklet_struct vfe_tasklet;
+	struct msm_vfe_tasklet_queue_cmd
+		tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
+
+	uint32_t vfe_hw_version;
+	struct msm_vfe_hardware_info *hw_info;
+	struct msm_vfe_axi_shared_data axi_data;
+	struct msm_vfe_stats_shared_data stats_data;
+	struct msm_vfe_error_info error_info;
+	struct msm_isp_buf_mgr *buf_mgr;
+	int dump_reg;
+	int vfe_clk_idx;
+	uint32_t vfe_open_cnt;
+	uint8_t vt_enable;
+	uint8_t ignore_error;
+	struct msm_isp_statistics *stats;
+	struct msm_vfe_fetch_engine_info fetch_engine_info;
+	uint64_t msm_isp_last_overflow_ab;
+	uint64_t msm_isp_last_overflow_ib;
+	uint64_t msm_isp_vfe_clk_rate;
+	struct msm_isp_ub_info *ub_info;
+	uint32_t vfe_ub_policy;
+	uint32_t isp_sof_debug;
+	uint8_t reset_pending;
+	uint32_t bus_util_factor;
+	uint8_t vfe_reset_timeout_processed;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 0b84327..f50e257 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -3847,7 +3847,7 @@
 
 int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
 {
-	int rc = 0, i;
+	int rc = 0, i, j, k;
 	struct msm_vfe_axi_stream *stream_info;
 	struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
 	struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
@@ -4126,8 +4126,16 @@
 			}
 			vfe_idx = msm_isp_get_vfe_idx_for_stream(
 				vfe_dev, stream_info);
-			msm_isp_stream_axi_cfg_update(vfe_dev, stream_info,
-				update_info);
+			for (j = 0; j < stream_info->num_planes; j++) {
+				stream_info->plane_cfg[vfe_idx][j] =
+					update_info->plane_cfg[j];
+				for (k = 0; k < stream_info->num_isp; k++) {
+					vfe_dev = stream_info->vfe_dev[k];
+					vfe_dev->hw_info->vfe_ops.axi_ops.
+						cfg_wm_reg(vfe_dev,
+						stream_info, j);
+				}
+			}
 		}
 		break;
 	}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.c
new file mode 100644
index 0000000..21bac16
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.c
@@ -0,0 +1,2092 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <asm/div64.h>
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+
+#define SRC_TO_INTF(src) \
+	((src < RDI_INTF_0 || src == VFE_AXI_SRC_MAX) ? VFE_PIX_0 : \
+	(VFE_RAW_0 + src - RDI_INTF_0))
+
+#define HANDLE_TO_IDX(handle) (handle & 0xFF)
+
+int msm_isp_axi_create_stream(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd)
+{
+	uint32_t i = stream_cfg_cmd->stream_src;
+
+	if (i >= VFE_AXI_SRC_MAX) {
+		pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
+			stream_cfg_cmd->stream_src);
+		return -EINVAL;
+	}
+
+	if ((axi_data->stream_handle_cnt << 8) == 0)
+		axi_data->stream_handle_cnt++;
+
+	stream_cfg_cmd->axi_stream_handle =
+		(++axi_data->stream_handle_cnt) << 8 | i;
+
+	memset(&axi_data->stream_info[i], 0,
+		   sizeof(struct msm_vfe_axi_stream));
+	spin_lock_init(&axi_data->stream_info[i].lock);
+	axi_data->stream_info[i].session_id = stream_cfg_cmd->session_id;
+	axi_data->stream_info[i].stream_id = stream_cfg_cmd->stream_id;
+	axi_data->stream_info[i].buf_divert = stream_cfg_cmd->buf_divert;
+	axi_data->stream_info[i].state = INACTIVE;
+	axi_data->stream_info[i].stream_handle =
+		stream_cfg_cmd->axi_stream_handle;
+	axi_data->stream_info[i].controllable_output =
+		stream_cfg_cmd->controllable_output;
+	if (stream_cfg_cmd->controllable_output)
+		stream_cfg_cmd->frame_skip_pattern = SKIP_ALL;
+	return 0;
+}
+
+void msm_isp_axi_destroy_stream(
+	struct msm_vfe_axi_shared_data *axi_data, int stream_idx)
+{
+	if (axi_data->stream_info[stream_idx].state != AVAILABLE) {
+		axi_data->stream_info[stream_idx].state = AVAILABLE;
+		axi_data->stream_info[stream_idx].stream_handle = 0;
+	} else {
+		pr_err("%s: stream does not exist\n", __func__);
+	}
+}
+
+int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd)
+{
+	int rc = -1, i;
+	struct msm_vfe_axi_stream *stream_info = NULL;
+
+	if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) < MAX_NUM_STREAM) {
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+	} else {
+		pr_err("%s: Invalid axi_stream_handle\n", __func__);
+		return rc;
+	}
+
+	if (!stream_info) {
+		pr_err("%s: Stream info is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (stream_cfg_cmd->output_format) {
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+	case V4L2_PIX_FMT_SBGGR14:
+	case V4L2_PIX_FMT_SGBRG14:
+	case V4L2_PIX_FMT_SGRBG14:
+	case V4L2_PIX_FMT_SRGGB14:
+	case V4L2_PIX_FMT_QBGGR8:
+	case V4L2_PIX_FMT_QGBRG8:
+	case V4L2_PIX_FMT_QGRBG8:
+	case V4L2_PIX_FMT_QRGGB8:
+	case V4L2_PIX_FMT_QBGGR10:
+	case V4L2_PIX_FMT_QGBRG10:
+	case V4L2_PIX_FMT_QGRBG10:
+	case V4L2_PIX_FMT_QRGGB10:
+	case V4L2_PIX_FMT_QBGGR12:
+	case V4L2_PIX_FMT_QGBRG12:
+	case V4L2_PIX_FMT_QGRBG12:
+	case V4L2_PIX_FMT_QRGGB12:
+	case V4L2_PIX_FMT_QBGGR14:
+	case V4L2_PIX_FMT_QGBRG14:
+	case V4L2_PIX_FMT_QGRBG14:
+	case V4L2_PIX_FMT_QRGGB14:
+	case V4L2_PIX_FMT_P16BGGR10:
+	case V4L2_PIX_FMT_P16GBRG10:
+	case V4L2_PIX_FMT_P16GRBG10:
+	case V4L2_PIX_FMT_P16RGGB10:
+	case V4L2_PIX_FMT_JPEG:
+	case V4L2_PIX_FMT_META:
+		stream_info->num_planes = 1;
+		stream_info->format_factor = ISP_Q2;
+		break;
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV14:
+	case V4L2_PIX_FMT_NV41:
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		stream_info->num_planes = 2;
+		stream_info->format_factor = 1.5 * ISP_Q2;
+		break;
+	/*TD: Add more image format*/
+	default:
+		msm_isp_print_fourcc_error(__func__,
+				stream_cfg_cmd->output_format);
+		return rc;
+	}
+
+	if (axi_data->hw_info->num_wm - axi_data->num_used_wm <
+		stream_info->num_planes) {
+		pr_err("%s: No free write masters\n", __func__);
+		return rc;
+	}
+
+	if ((stream_info->num_planes > 1) &&
+			(axi_data->hw_info->num_comp_mask -
+			axi_data->num_used_composite_mask < 1)) {
+		pr_err("%s: No free composite mask\n", __func__);
+		return rc;
+	}
+
+	if (stream_cfg_cmd->init_frame_drop >= MAX_INIT_FRAME_DROP) {
+		pr_err("%s: Invalid skip pattern\n", __func__);
+		return rc;
+	}
+
+	if (stream_cfg_cmd->frame_skip_pattern >= MAX_SKIP) {
+		pr_err("%s: Invalid skip pattern\n", __func__);
+		return rc;
+	}
+
+	for (i = 0; i < stream_info->num_planes; i++) {
+		stream_info->plane_cfg[i] = stream_cfg_cmd->plane_cfg[i];
+		stream_info->max_width = max(stream_info->max_width,
+			stream_cfg_cmd->plane_cfg[i].output_width);
+	}
+
+	stream_info->output_format = stream_cfg_cmd->output_format;
+	stream_info->runtime_output_format = stream_info->output_format;
+	stream_info->stream_src = stream_cfg_cmd->stream_src;
+	stream_info->frame_based = stream_cfg_cmd->frame_base;
+	return 0;
+}
+
+static uint32_t msm_isp_axi_get_plane_size(
+	struct msm_vfe_axi_stream *stream_info, int plane_idx)
+{
+	uint32_t size = 0;
+	struct msm_vfe_axi_plane_cfg *plane_cfg = stream_info->plane_cfg;
+
+	switch (stream_info->output_format) {
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_QBGGR8:
+	case V4L2_PIX_FMT_QGBRG8:
+	case V4L2_PIX_FMT_QGRBG8:
+	case V4L2_PIX_FMT_QRGGB8:
+	case V4L2_PIX_FMT_JPEG:
+	case V4L2_PIX_FMT_META:
+		size = plane_cfg[plane_idx].output_height *
+		plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_QBGGR10:
+	case V4L2_PIX_FMT_QGBRG10:
+	case V4L2_PIX_FMT_QGRBG10:
+	case V4L2_PIX_FMT_QRGGB10:
+		/* TODO: fix me */
+		size = plane_cfg[plane_idx].output_height *
+		plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+	case V4L2_PIX_FMT_QBGGR12:
+	case V4L2_PIX_FMT_QGBRG12:
+	case V4L2_PIX_FMT_QGRBG12:
+	case V4L2_PIX_FMT_QRGGB12:
+	case V4L2_PIX_FMT_SBGGR14:
+	case V4L2_PIX_FMT_SGBRG14:
+	case V4L2_PIX_FMT_SGRBG14:
+	case V4L2_PIX_FMT_SRGGB14:
+	case V4L2_PIX_FMT_QBGGR14:
+	case V4L2_PIX_FMT_QGBRG14:
+	case V4L2_PIX_FMT_QGRBG14:
+	case V4L2_PIX_FMT_QRGGB14:
+		/* TODO: fix me */
+		size = plane_cfg[plane_idx].output_height *
+		plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_P16BGGR10:
+	case V4L2_PIX_FMT_P16GBRG10:
+	case V4L2_PIX_FMT_P16GRBG10:
+	case V4L2_PIX_FMT_P16RGGB10:
+		size = plane_cfg[plane_idx].output_height *
+		plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+		if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
+			size = plane_cfg[plane_idx].output_height *
+				plane_cfg[plane_idx].output_width;
+		else
+			size = plane_cfg[plane_idx].output_height *
+				plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_NV14:
+	case V4L2_PIX_FMT_NV41:
+		if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
+			size = plane_cfg[plane_idx].output_height *
+				plane_cfg[plane_idx].output_width;
+		else
+			size = plane_cfg[plane_idx].output_height *
+				plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		size = plane_cfg[plane_idx].output_height *
+			plane_cfg[plane_idx].output_width;
+		break;
+	/*TD: Add more image format*/
+	default:
+		msm_isp_print_fourcc_error(__func__,
+				stream_info->output_format);
+		break;
+	}
+	return size;
+}
+
+void msm_isp_axi_reserve_wm(struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int i, j;
+
+	for (i = 0; i < stream_info->num_planes; i++) {
+		for (j = 0; j < axi_data->hw_info->num_wm; j++) {
+			if (!axi_data->free_wm[j]) {
+				axi_data->free_wm[j] =
+					stream_info->stream_handle;
+				axi_data->wm_image_size[j] =
+					msm_isp_axi_get_plane_size(
+						stream_info, i);
+				axi_data->num_used_wm++;
+				break;
+			}
+		}
+		stream_info->wm[i] = j;
+	}
+}
+
+void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int i;
+
+	for (i = 0; i < stream_info->num_planes; i++) {
+		axi_data->free_wm[stream_info->wm[i]] = 0;
+		axi_data->num_used_wm--;
+	}
+	if (stream_info->stream_src <= IDEAL_RAW)
+		axi_data->num_pix_stream++;
+	else if (stream_info->stream_src < VFE_AXI_SRC_MAX)
+		axi_data->num_rdi_stream++;
+}
+
+void msm_isp_axi_reserve_comp_mask(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int i;
+	uint8_t comp_mask = 0;
+
+	for (i = 0; i < stream_info->num_planes; i++)
+		comp_mask |= 1 << stream_info->wm[i];
+
+	for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
+		if (!axi_data->composite_info[i].stream_handle) {
+			axi_data->composite_info[i].stream_handle =
+				stream_info->stream_handle;
+			axi_data->composite_info[i].
+				stream_composite_mask = comp_mask;
+			axi_data->num_used_composite_mask++;
+			break;
+		}
+	}
+	stream_info->comp_mask_index = i;
+}
+
+static void msm_isp_axi_free_comp_mask(struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	axi_data->composite_info[stream_info->comp_mask_index].
+		stream_composite_mask = 0;
+	axi_data->composite_info[stream_info->comp_mask_index].
+		stream_handle = 0;
+	axi_data->num_used_composite_mask--;
+}
+
+static int msm_isp_axi_get_bufq_handles(
+		struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info)
+{
+	int rc = 0;
+
+	if (stream_info->stream_id & ISP_SCRATCH_BUF_BIT) {
+		stream_info->bufq_handle =
+			vfe_dev->buf_mgr->ops->get_bufq_handle(
+		vfe_dev->buf_mgr, stream_info->session_id,
+		stream_info->stream_id & ~ISP_SCRATCH_BUF_BIT);
+		if (stream_info->bufq_handle == 0) {
+			pr_err("%s: Stream 0x%x has no valid buffer queue\n",
+				__func__, (unsigned int)stream_info->stream_id);
+			rc = -EINVAL;
+			return rc;
+		}
+
+		stream_info->bufq_scratch_handle =
+			vfe_dev->buf_mgr->ops->get_bufq_handle(
+		vfe_dev->buf_mgr, stream_info->session_id,
+		stream_info->stream_id);
+		if (stream_info->bufq_scratch_handle == 0) {
+			pr_err("%s: Stream 0x%x has no valid buffer queue\n",
+				__func__, (unsigned int)stream_info->stream_id);
+			rc = -EINVAL;
+			return rc;
+		}
+	} else {
+		stream_info->bufq_handle =
+			vfe_dev->buf_mgr->ops->get_bufq_handle(
+		vfe_dev->buf_mgr, stream_info->session_id,
+		stream_info->stream_id);
+		if (stream_info->bufq_handle == 0) {
+			pr_err("%s: Stream 0x%x has no valid buffer queue\n",
+				__func__, (unsigned int)stream_info->stream_id);
+			rc = -EINVAL;
+			return rc;
+		}
+	}
+	return rc;
+}
+
+int msm_isp_axi_check_stream_state(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int rc = 0, i;
+	unsigned long flags;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream *stream_info;
+	enum msm_vfe_axi_state valid_state =
+		(stream_cfg_cmd->cmd == START_STREAM) ? INACTIVE : ACTIVE;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+		return -EINVAL;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+			MAX_NUM_STREAM) {
+			return -EINVAL;
+		}
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		spin_lock_irqsave(&stream_info->lock, flags);
+		if (stream_info->state != valid_state) {
+			if ((stream_info->state == PAUSING ||
+				stream_info->state == PAUSED ||
+				stream_info->state == RESUME_PENDING ||
+				stream_info->state == RESUMING) &&
+				(stream_cfg_cmd->cmd == STOP_STREAM ||
+				stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
+				stream_info->state = ACTIVE;
+			} else {
+				pr_err("%s: Invalid stream state: %d\n",
+					__func__, stream_info->state);
+				spin_unlock_irqrestore(
+					&stream_info->lock, flags);
+				rc = -EINVAL;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&stream_info->lock, flags);
+
+		if (stream_cfg_cmd->cmd == START_STREAM) {
+			rc = msm_isp_axi_get_bufq_handles(vfe_dev, stream_info);
+			if (rc)
+				break;
+		}
+	}
+	return rc;
+}
+
+void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
+	uint8_t input_src)
+{
+	int i;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream *stream_info;
+
+	for (i = 0; i < MAX_NUM_STREAM; i++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->state != ACTIVE)
+			continue;
+
+		if (stream_info->runtime_framedrop_update) {
+			stream_info->runtime_init_frame_drop--;
+			if (stream_info->runtime_init_frame_drop == 0) {
+				stream_info->runtime_framedrop_update = 0;
+				vfe_dev->hw_info->vfe_ops.axi_ops.
+				cfg_framedrop(vfe_dev, stream_info);
+			}
+		}
+		if (stream_info->stream_type == BURST_STREAM &&
+			((1 << SRC_TO_INTF(stream_info->stream_src)) &
+			input_src)) {
+			if (stream_info->runtime_framedrop_update_burst) {
+				stream_info->runtime_framedrop_update_burst = 0;
+				stream_info->runtime_burst_frame_count =
+				    stream_info->runtime_init_frame_drop +
+				    (stream_info->runtime_num_burst_capture -
+					1) *
+				    (stream_info->framedrop_period + 1) + 1;
+				vfe_dev->hw_info->vfe_ops.axi_ops.
+					cfg_framedrop(vfe_dev, stream_info);
+			} else {
+				stream_info->runtime_burst_frame_count--;
+				if (stream_info->
+				    runtime_burst_frame_count == 0) {
+					vfe_dev->hw_info->vfe_ops.axi_ops.
+					cfg_framedrop(vfe_dev, stream_info);
+				}
+			}
+		}
+	}
+}
+
+void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
+			struct msm_vfe_axi_stream *stream_info)
+{
+	stream_info->runtime_init_frame_drop = stream_info->init_frame_drop;
+	stream_info->runtime_burst_frame_count =
+		stream_info->burst_frame_count;
+	stream_info->runtime_num_burst_capture =
+		stream_info->num_burst_capture;
+	stream_info->runtime_framedrop_update = stream_info->framedrop_update;
+	vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(vfe_dev, stream_info);
+}
+
+void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
+	enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts)
+{
+	struct msm_isp32_event_data event_data;
+
+	memset(&event_data, 0, sizeof(event_data));
+	switch (event_type) {
+	case ISP_EVENT_SOF:
+		if ((frame_src == VFE_PIX_0) && (vfe_dev->isp_sof_debug < 5)) {
+			pr_err("%s: PIX0 frame id: %u\n", __func__,
+				vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+		vfe_dev->isp_sof_debug++;
+		}
+		vfe_dev->axi_data.src_info[frame_src].frame_id++;
+		if (vfe_dev->axi_data.src_info[frame_src].frame_id == 0)
+			vfe_dev->axi_data.src_info[frame_src].frame_id = 1;
+		ISP_DBG("%s: frame_src %d frame id: %u\n", __func__,
+			frame_src,
+			vfe_dev->axi_data.src_info[frame_src].frame_id);
+		break;
+	case ISP_EVENT_REG_UPDATE:
+		vfe_dev->axi_data.src_info[frame_src].last_updt_frm_id = 0;
+		break;
+	default:
+		break;
+	}
+
+	event_data.input_intf = frame_src;
+	event_data.frame_id = vfe_dev->axi_data.src_info[frame_src].frame_id;
+	event_data.timestamp = ts->event_time;
+	event_data.mono_timestamp = ts->buf_time;
+	msm_isp_send_event(vfe_dev, event_type | frame_src, &event_data);
+}
+
+void msm_isp_calculate_framedrop(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd)
+{
+	uint32_t framedrop_period = 0;
+	struct msm_vfe_axi_stream *stream_info = NULL;
+
+	if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) < MAX_NUM_STREAM) {
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+	} else {
+		pr_err("%s: Invalid stream handle", __func__);
+		return;
+	}
+	if (!stream_info) {
+		pr_err("%s: Stream info is NULL\n", __func__);
+		return;
+	}
+
+	framedrop_period = msm_isp_get_framedrop_period(
+	   stream_cfg_cmd->frame_skip_pattern);
+	stream_info->frame_skip_pattern =
+		stream_cfg_cmd->frame_skip_pattern;
+	if (stream_cfg_cmd->frame_skip_pattern == SKIP_ALL)
+		stream_info->framedrop_pattern = 0x0;
+	else
+		stream_info->framedrop_pattern = 0x1;
+	stream_info->framedrop_period = framedrop_period - 1;
+
+	if (stream_cfg_cmd->init_frame_drop < framedrop_period) {
+		stream_info->framedrop_pattern <<=
+			stream_cfg_cmd->init_frame_drop;
+		stream_info->init_frame_drop = 0;
+		stream_info->framedrop_update = 0;
+	} else {
+		stream_info->init_frame_drop = stream_cfg_cmd->init_frame_drop;
+		stream_info->framedrop_update = 1;
+	}
+
+	if (stream_cfg_cmd->burst_count > 0) {
+		stream_info->stream_type = BURST_STREAM;
+		stream_info->num_burst_capture =
+			stream_cfg_cmd->burst_count;
+		stream_info->burst_frame_count =
+		stream_cfg_cmd->init_frame_drop +
+			(stream_cfg_cmd->burst_count - 1) *
+			framedrop_period + 1;
+	} else {
+		stream_info->stream_type = CONTINUOUS_STREAM;
+		stream_info->burst_frame_count = 0;
+		stream_info->num_burst_capture = 0;
+	}
+}
+
+static void msm_isp_calculate_bandwidth(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int bpp = 0;
+
+	if (stream_info->stream_src < RDI_INTF_0) {
+		stream_info->bandwidth =
+			(axi_data->src_info[VFE_PIX_0].pixel_clock /
+			axi_data->src_info[VFE_PIX_0].width) *
+			stream_info->max_width;
+		stream_info->bandwidth = (unsigned long)stream_info->bandwidth *
+			stream_info->format_factor / ISP_Q2;
+	} else {
+		int rdi = SRC_TO_INTF(stream_info->stream_src);
+
+		bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+		if (rdi < VFE_SRC_MAX)
+			stream_info->bandwidth =
+				(axi_data->src_info[rdi].pixel_clock / 8) * bpp;
+		else
+			pr_err("%s: Invalid rdi interface\n", __func__);
+	}
+}
+
+#ifdef CONFIG_MSM_AVTIMER
+void msm_isp_start_avtimer(void)
+{
+	avcs_core_open();
+	avcs_core_disable_power_collapse(1);
+}
+
+static inline void msm_isp_get_avtimer_ts(
+		struct msm_isp_timestamp *time_stamp)
+{
+	int rc = 0;
+	uint32_t avtimer_usec = 0;
+	uint64_t avtimer_tick = 0;
+
+	rc = avcs_core_query_timer(&avtimer_tick);
+	if (rc < 0) {
+		pr_err("%s: Error: Invalid AVTimer Tick, rc=%d\n",
+			   __func__, rc);
+		/* In case of error return zero AVTimer Tick Value */
+		time_stamp->vt_time.tv_sec = 0;
+		time_stamp->vt_time.tv_usec = 0;
+	} else {
+		avtimer_usec = do_div(avtimer_tick, USEC_PER_SEC);
+		time_stamp->vt_time.tv_sec = (uint32_t)(avtimer_tick);
+		time_stamp->vt_time.tv_usec = avtimer_usec;
+		pr_debug("%s: AVTimer TS = %u:%u\n", __func__,
+			(uint32_t)(avtimer_tick), avtimer_usec);
+	}
+}
+#else
+void msm_isp_start_avtimer(void)
+{
+	pr_err("AV Timer is not supported\n");
+}
+
+inline void msm_isp_get_avtimer_ts(
+		struct msm_isp_timestamp *time_stamp)
+{
+	pr_err_ratelimited("%s: Error: AVTimer driver not available\n",
+		__func__);
+	time_stamp->vt_time.tv_sec = 0;
+	time_stamp->vt_time.tv_usec = 0;
+}
+#endif
+
+int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0, i;
+	uint32_t io_format = 0;
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd = arg;
+	struct msm_vfe_axi_stream *stream_info;
+
+	rc = msm_isp_axi_create_stream(
+		&vfe_dev->axi_data, stream_cfg_cmd);
+	if (rc) {
+		pr_err("%s: create stream failed\n", __func__);
+		return rc;
+	}
+
+	rc = msm_isp_validate_axi_request(
+		&vfe_dev->axi_data, stream_cfg_cmd);
+	if (rc) {
+		pr_err("%s: Request validation failed\n", __func__);
+		if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) <
+			MAX_NUM_STREAM)
+			msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+			      HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
+		return rc;
+	}
+	stream_info = &vfe_dev->axi_data.
+		stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+	if (!stream_info) {
+		pr_err("%s: can not find stream handle %x\n", __func__,
+			stream_cfg_cmd->axi_stream_handle);
+		return -EINVAL;
+	}
+
+	stream_info->memory_input = stream_cfg_cmd->memory_input;
+
+	msm_isp_axi_reserve_wm(&vfe_dev->axi_data, stream_info);
+
+	if (stream_info->stream_src < RDI_INTF_0) {
+		io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
+		if (stream_info->stream_src == CAMIF_RAW ||
+			stream_info->stream_src == IDEAL_RAW) {
+			if (stream_info->stream_src == CAMIF_RAW &&
+				io_format != stream_info->output_format)
+				pr_debug("%s: Overriding input format\n",
+					__func__);
+
+			io_format = stream_info->output_format;
+		}
+		rc = vfe_dev->hw_info->vfe_ops.axi_ops.cfg_io_format(
+			vfe_dev, stream_info->stream_src, io_format);
+		if (rc) {
+			pr_err("%s: cfg io format failed\n", __func__);
+			msm_isp_axi_free_wm(&vfe_dev->axi_data,
+				stream_info);
+			msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+				HANDLE_TO_IDX(
+				stream_cfg_cmd->axi_stream_handle));
+			return rc;
+		}
+	}
+
+	msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
+	if (stream_cfg_cmd->vt_enable && !vfe_dev->vt_enable) {
+		vfe_dev->vt_enable = stream_cfg_cmd->vt_enable;
+		msm_isp_start_avtimer();
+	}
+	if (stream_info->num_planes > 1) {
+		msm_isp_axi_reserve_comp_mask(
+			&vfe_dev->axi_data, stream_info);
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+		cfg_comp_mask(vfe_dev, stream_info);
+	} else {
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			cfg_wm_irq_mask(vfe_dev, stream_info);
+	}
+
+	for (i = 0; i < stream_info->num_planes; i++) {
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			cfg_wm_reg(vfe_dev, stream_info, i);
+
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			cfg_wm_xbar_reg(vfe_dev, stream_info, i);
+	}
+	return rc;
+}
+
+int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0, i;
+	struct msm_vfe_axi_stream_release_cmd *stream_release_cmd = arg;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
+
+
+	if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >=
+		MAX_NUM_STREAM) {
+		pr_err("%s: Invalid stream handle\n", __func__);
+		return -EINVAL;
+	}
+	stream_info = &axi_data->stream_info[
+		HANDLE_TO_IDX(stream_release_cmd->stream_handle)];
+	if (stream_info->state == AVAILABLE) {
+		pr_err("%s: Stream already released\n", __func__);
+		return -EINVAL;
+	} else if (stream_info->state != INACTIVE) {
+		stream_cfg.cmd = STOP_STREAM;
+		stream_cfg.num_streams = 1;
+		stream_cfg.stream_handle[0] = stream_release_cmd->stream_handle;
+		msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg);
+	}
+
+	for (i = 0; i < stream_info->num_planes; i++) {
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			clear_wm_reg(vfe_dev, stream_info, i);
+
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+		clear_wm_xbar_reg(vfe_dev, stream_info, i);
+	}
+
+	if (stream_info->num_planes > 1) {
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			clear_comp_mask(vfe_dev, stream_info);
+		msm_isp_axi_free_comp_mask(&vfe_dev->axi_data, stream_info);
+	} else {
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+		clear_wm_irq_mask(vfe_dev, stream_info);
+	}
+
+	vfe_dev->hw_info->vfe_ops.axi_ops.clear_framedrop(vfe_dev, stream_info);
+	msm_isp_axi_free_wm(axi_data, stream_info);
+
+	msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+		HANDLE_TO_IDX(stream_release_cmd->stream_handle));
+
+	return rc;
+}
+
+static void msm_isp_axi_stream_enable_cfg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int i;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	if (stream_info->state == INACTIVE)
+		return;
+	for (i = 0; i < stream_info->num_planes; i++) {
+		if (stream_info->state == START_PENDING ||
+			stream_info->state == RESUME_PENDING) {
+			vfe_dev->hw_info->vfe_ops.axi_ops.
+				enable_wm(vfe_dev, stream_info->wm[i], 1);
+		} else {
+			vfe_dev->hw_info->vfe_ops.axi_ops.
+				enable_wm(vfe_dev, stream_info->wm[i], 0);
+			/* Issue a reg update for Raw Snapshot Case
+			 * since we dont have reg update ack
+			 */
+			if (stream_info->stream_src == CAMIF_RAW ||
+				stream_info->stream_src == IDEAL_RAW) {
+				vfe_dev->hw_info->vfe_ops.core_ops.
+				reg_update(vfe_dev, (1 << VFE_PIX_0));
+			}
+		}
+	}
+
+	if (stream_info->state == START_PENDING)
+		axi_data->num_active_stream++;
+	else if (stream_info->state == STOP_PENDING)
+		axi_data->num_active_stream--;
+}
+
+void msm_isp_axi_stream_update(struct vfe_device *vfe_dev, uint8_t input_src)
+{
+	int i;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	for (i = 0; i < MAX_NUM_STREAM; i++) {
+		if (axi_data->stream_info[i].state == START_PENDING ||
+				axi_data->stream_info[i].state ==
+					STOP_PENDING) {
+			if ((1 <<
+				SRC_TO_INTF(axi_data->stream_info[i].
+				stream_src)) &
+				input_src) {
+				msm_isp_axi_stream_enable_cfg(
+				   vfe_dev, &axi_data->stream_info[i]);
+				axi_data->stream_info[i].state =
+					axi_data->stream_info[i].state ==
+					START_PENDING ? STARTING : STOPPING;
+			}
+		} else if (axi_data->stream_info[i].state == STARTING ||
+			axi_data->stream_info[i].state == STOPPING) {
+			if ((1 <<
+				SRC_TO_INTF(axi_data->stream_info[i].
+				stream_src)) &
+				input_src) {
+				axi_data->stream_info[i].state =
+				axi_data->stream_info[i].state == STARTING ?
+					ACTIVE : INACTIVE;
+				vfe_dev->axi_data.stream_update--;
+			}
+		}
+	}
+
+	if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF ||
+		(vfe_dev->axi_data.pipeline_update ==
+		DISABLE_CAMIF_IMMEDIATELY)) {
+		vfe_dev->hw_info->vfe_ops.stats_ops.
+			enable_module(vfe_dev, 0xFF, 0);
+		vfe_dev->axi_data.pipeline_update = NO_UPDATE;
+	}
+
+	if (vfe_dev->axi_data.stream_update == 0)
+		complete(&vfe_dev->stream_config_complete);
+}
+
+static void msm_isp_reload_ping_pong_offset(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info)
+{
+	int i, j;
+	uint32_t flag;
+	struct msm_isp_buffer *buf;
+
+	for (i = 0; i < 2; i++) {
+		buf = stream_info->buf[i];
+		if (!buf)
+			continue;
+		flag = i ? VFE_PONG_FLAG : VFE_PING_FLAG;
+		for (j = 0; j < stream_info->num_planes; j++) {
+			vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+				vfe_dev, stream_info->wm[j], flag,
+				buf->mapped_info[j].paddr +
+				stream_info->plane_cfg[j].plane_addr_offset);
+		}
+	}
+}
+
+void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev)
+{
+	int i, j;
+	uint32_t update_state;
+	unsigned long flags;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream *stream_info;
+
+	for (i = 0; i < MAX_NUM_STREAM; i++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->stream_type == BURST_STREAM ||
+			stream_info->state == AVAILABLE)
+			continue;
+		spin_lock_irqsave(&stream_info->lock, flags);
+		if (stream_info->state == PAUSING) {
+			/*AXI Stopped, apply update*/
+			stream_info->state = PAUSED;
+			msm_isp_reload_ping_pong_offset(vfe_dev, stream_info);
+			for (j = 0; j < stream_info->num_planes; j++)
+				vfe_dev->hw_info->vfe_ops.axi_ops.
+					cfg_wm_reg(vfe_dev, stream_info, j);
+			/*Resume AXI*/
+			stream_info->state = RESUME_PENDING;
+			msm_isp_axi_stream_enable_cfg(
+				vfe_dev, &axi_data->stream_info[i]);
+			stream_info->state = RESUMING;
+		} else if (stream_info->state == RESUMING) {
+			stream_info->runtime_output_format =
+				stream_info->output_format;
+			stream_info->state = ACTIVE;
+		}
+		spin_unlock_irqrestore(&stream_info->lock, flags);
+	}
+
+	update_state = atomic_dec_return(&axi_data->axi_cfg_update);
+}
+
+static void msm_isp_cfg_pong_address(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info)
+{
+	int i;
+	struct msm_isp_buffer *buf = stream_info->buf[0];
+
+	for (i = 0; i < stream_info->num_planes; i++)
+		vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+			vfe_dev, stream_info->wm[i],
+			VFE_PONG_FLAG, buf->mapped_info[i].paddr +
+			stream_info->plane_cfg[i].plane_addr_offset);
+	stream_info->buf[1] = buf;
+}
+
+static void msm_isp_get_done_buf(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+	struct msm_isp_buffer **done_buf)
+{
+	uint32_t pingpong_bit = 0, i;
+
+	pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+	for (i = 0; i < stream_info->num_planes; i++) {
+		if (pingpong_bit !=
+			(~(pingpong_status >> stream_info->wm[i]) & 0x1)) {
+			pr_debug("%s: Write master ping pong mismatch. Status: 0x%x\n",
+				__func__, pingpong_status);
+		}
+	}
+
+	*done_buf = stream_info->buf[pingpong_bit];
+
+	if (stream_info->controllable_output) {
+		stream_info->buf[pingpong_bit] = NULL;
+		stream_info->request_frm_num--;
+	}
+}
+
+static int msm_isp_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+	uint32_t pingpong_bit)
+{
+	int i, rc = -1;
+	struct msm_isp_buffer *buf = NULL;
+	uint32_t bufq_handle = 0, frame_id = 0;
+	uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+
+	if (stream_idx >= MAX_NUM_STREAM) {
+		pr_err("%s: Invalid stream_idx", __func__);
+		return rc;
+	}
+
+	if (stream_info->controllable_output && !stream_info->request_frm_num)
+		return 0;
+
+	frame_id = vfe_dev->axi_data.
+		src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+	if (frame_id && stream_info->frame_id &&
+		stream_info->frame_id == frame_id) {
+		/* This could happen if reg update ack is delayed */
+		pr_err("%s: Duplicate frame streamId:%d stream_fid:%d frame_id:%d\n",
+			__func__, stream_info->stream_id, stream_info->frame_id,
+			frame_id);
+		vfe_dev->error_info.stream_framedrop_count[stream_idx]++;
+		return rc;
+	}
+
+	bufq_handle = stream_info->bufq_handle;
+	if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+		rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+				vfe_dev->pdev->id, bufq_handle,
+				MSM_ISP_INVALID_BUF_INDEX, &buf);
+	else {
+		pr_err("%s: Invalid stream index\n", __func__);
+		rc = -1;
+	}
+
+	if (rc < 0) {
+		vfe_dev->error_info.stream_framedrop_count[stream_idx]++;
+		return rc;
+	}
+
+	if (buf->num_planes != stream_info->num_planes) {
+		pr_err("%s: Invalid buffer\n", __func__);
+		rc = -EINVAL;
+		goto buf_error;
+	}
+
+	for (i = 0; i < stream_info->num_planes; i++)
+		vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+			vfe_dev, stream_info->wm[i],
+			pingpong_status, buf->mapped_info[i].paddr +
+			stream_info->plane_cfg[i].plane_addr_offset);
+
+	stream_info->buf[pingpong_bit] = buf;
+
+	return 0;
+buf_error:
+	vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+		buf->bufq_handle, buf->buf_idx);
+	return rc;
+}
+
+static void msm_isp_process_done_buf(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info, struct msm_isp_buffer *buf,
+	struct msm_isp_timestamp *ts)
+{
+	int rc;
+	struct msm_isp32_event_data buf_event;
+	struct timeval *time_stamp;
+	uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+	uint32_t frame_id;
+	uint32_t buf_src;
+
+	memset(&buf_event, 0, sizeof(buf_event));
+
+	if (stream_idx >= MAX_NUM_STREAM) {
+		pr_err("%s: Invalid stream_idx", __func__);
+		return;
+	}
+
+	if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+		frame_id = vfe_dev->axi_data.
+			src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+	else {
+		pr_err("%s: Invalid stream index, put buf back to vb2 queue\n",
+			__func__);
+		vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+			buf->bufq_handle, buf->buf_idx);
+		return;
+	}
+
+	if (buf && ts) {
+		if (vfe_dev->vt_enable) {
+			msm_isp_get_avtimer_ts(ts);
+			time_stamp = &ts->vt_time;
+		} else
+			time_stamp = &ts->buf_time;
+
+		rc = vfe_dev->buf_mgr->ops->get_buf_src(vfe_dev->buf_mgr,
+						buf->bufq_handle, &buf_src);
+		if (stream_info->buf_divert && rc == 0 &&
+				buf_src != MSM_ISP_BUFFER_SRC_SCRATCH) {
+			rc = vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
+				buf->bufq_handle, buf->buf_idx,
+				time_stamp, frame_id);
+			/* Buf divert return value represent whether the buf
+			 * can be diverted. A positive return value means
+			 * other ISP hardware is still processing the frame.
+			 */
+			if (rc == 0) {
+				buf_event.input_intf =
+					SRC_TO_INTF(stream_info->stream_src);
+				buf_event.frame_id = frame_id;
+				buf_event.timestamp = *time_stamp;
+				buf_event.u.buf_done.session_id =
+					stream_info->session_id;
+				buf_event.u.buf_done.stream_id =
+					stream_info->stream_id;
+				buf_event.u.buf_done.handle =
+					stream_info->bufq_handle;
+				buf_event.u.buf_done.buf_idx = buf->buf_idx;
+				buf_event.u.buf_done.output_format =
+					stream_info->runtime_output_format;
+				msm_isp_send_event(vfe_dev,
+					ISP_EVENT_BUF_DIVERT + stream_idx,
+					&buf_event);
+			}
+		} else {
+			buf_event.input_intf =
+				SRC_TO_INTF(stream_info->stream_src);
+			buf_event.frame_id = frame_id;
+			buf_event.timestamp = ts->buf_time;
+			buf_event.u.buf_done.session_id =
+				stream_info->session_id;
+			buf_event.u.buf_done.stream_id =
+				stream_info->stream_id;
+			buf_event.u.buf_done.output_format =
+				stream_info->runtime_output_format;
+			msm_isp_send_event(vfe_dev,
+				ISP_EVENT_BUF_DONE, &buf_event);
+			vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+				buf->bufq_handle, buf->buf_idx,
+				time_stamp, frame_id,
+				stream_info->runtime_output_format);
+		}
+	}
+}
+
+static enum msm_isp_camif_update_state
+	msm_isp_get_camif_update_state(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint8_t pix_stream_cnt = 0, cur_pix_stream_cnt;
+
+	cur_pix_stream_cnt =
+		axi_data->src_info[VFE_PIX_0].pix_stream_count +
+		axi_data->src_info[VFE_PIX_0].raw_stream_count;
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		stream_info =
+			&axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		if (stream_info->stream_src  < RDI_INTF_0)
+			pix_stream_cnt++;
+	}
+
+	if ((pix_stream_cnt) &&
+	    (axi_data->src_info[VFE_PIX_0].input_mux != EXTERNAL_READ)) {
+
+		if (cur_pix_stream_cnt == 0 && pix_stream_cnt &&
+			stream_cfg_cmd->cmd == START_STREAM)
+			return ENABLE_CAMIF;
+		else if (cur_pix_stream_cnt &&
+			(cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
+			stream_cfg_cmd->cmd == STOP_STREAM)
+			return DISABLE_CAMIF;
+		else if (cur_pix_stream_cnt &&
+			(cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
+			stream_cfg_cmd->cmd == STOP_IMMEDIATELY)
+			return DISABLE_CAMIF_IMMEDIATELY;
+	}
+
+	return NO_UPDATE;
+}
+
+static void msm_isp_update_camif_output_count(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+		return;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+			MAX_NUM_STREAM) {
+			return;
+		}
+		stream_info =
+			&axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		if (stream_info->stream_src >= RDI_INTF_0)
+			continue;
+		if (stream_info->stream_src == PIX_ENCODER ||
+			stream_info->stream_src == PIX_VIEWFINDER ||
+			stream_info->stream_src == PIX_VIDEO ||
+			stream_info->stream_src == IDEAL_RAW) {
+			if (stream_cfg_cmd->cmd == START_STREAM)
+				vfe_dev->axi_data.src_info[VFE_PIX_0].
+					pix_stream_count++;
+			else
+				vfe_dev->axi_data.src_info[VFE_PIX_0].
+					pix_stream_count--;
+		} else if (stream_info->stream_src == CAMIF_RAW) {
+			if (stream_cfg_cmd->cmd == START_STREAM)
+				vfe_dev->axi_data.src_info[VFE_PIX_0].
+					raw_stream_count++;
+			else
+				vfe_dev->axi_data.src_info[VFE_PIX_0].
+					raw_stream_count--;
+		}
+	}
+}
+
+
+static void msm_isp_update_rdi_output_count(
+	  struct vfe_device *vfe_dev,
+	  struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+		return;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])
+			> MAX_NUM_STREAM)
+			return;
+		stream_info =
+			&axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		if (stream_info->stream_src < RDI_INTF_0)
+			continue;
+		if (stream_info->stream_src == RDI_INTF_0) {
+			if (stream_cfg_cmd->cmd == START_STREAM)
+				vfe_dev->axi_data.src_info[VFE_RAW_0].
+					raw_stream_count++;
+			else
+				vfe_dev->axi_data.src_info[VFE_RAW_0].
+					raw_stream_count--;
+		} else if (stream_info->stream_src == RDI_INTF_1) {
+			if (stream_cfg_cmd->cmd == START_STREAM)
+				vfe_dev->axi_data.src_info[VFE_RAW_1].
+					raw_stream_count++;
+			else
+				vfe_dev->axi_data.src_info[VFE_RAW_1].
+					raw_stream_count--;
+		} else if (stream_info->stream_src == RDI_INTF_2) {
+			if (stream_cfg_cmd->cmd == START_STREAM)
+				vfe_dev->axi_data.src_info[VFE_RAW_2].
+					raw_stream_count++;
+			else
+				vfe_dev->axi_data.src_info[VFE_RAW_2].
+					raw_stream_count--;
+		}
+
+	}
+}
+
+static uint8_t msm_isp_get_curr_stream_cnt(
+	  struct vfe_device *vfe_dev)
+{
+	uint8_t curr_stream_cnt = 0;
+
+	curr_stream_cnt = vfe_dev->axi_data.src_info[VFE_RAW_0].
+					raw_stream_count +
+					vfe_dev->axi_data.src_info[VFE_RAW_1].
+					raw_stream_count +
+					vfe_dev->axi_data.src_info[VFE_RAW_2].
+					raw_stream_count +
+					vfe_dev->axi_data.src_info[VFE_PIX_0].
+					pix_stream_count +
+					vfe_dev->axi_data.src_info[VFE_PIX_0].
+					raw_stream_count;
+	return curr_stream_cnt;
+}
+
+/*Factor in Q2 format*/
+#define ISP_DEFAULT_FORMAT_FACTOR 6
+#define ISP_BUS_UTILIZATION_FACTOR 1536 /* 1.5 in Q10 format */
+static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
+{
+	int i, rc = 0;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint64_t total_pix_bandwidth = 0, total_rdi_bandwidth = 0;
+	uint32_t num_pix_streams = 0;
+	uint32_t num_rdi_streams = 0;
+	uint32_t total_streams   = 0;
+	uint64_t total_bandwidth = 0;
+	uint32_t bus_util_factor = ISP_BUS_UTILIZATION_FACTOR;
+
+	for (i = 0; i < MAX_NUM_STREAM; i++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->state == ACTIVE ||
+			stream_info->state == START_PENDING) {
+			if (stream_info->stream_src < RDI_INTF_0) {
+				total_pix_bandwidth += stream_info->bandwidth;
+				num_pix_streams++;
+			} else {
+				total_rdi_bandwidth += stream_info->bandwidth;
+				num_rdi_streams++;
+			}
+		}
+	}
+	total_bandwidth = total_pix_bandwidth + total_rdi_bandwidth;
+	total_streams = num_pix_streams + num_rdi_streams;
+	if (vfe_dev->bus_util_factor)
+		bus_util_factor = vfe_dev->bus_util_factor;
+	ISP_DBG("%s: bus_util_factor = %u\n", __func__, bus_util_factor);
+
+	if (total_streams == 1)
+		rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+		total_bandwidth,
+		(total_bandwidth * bus_util_factor / ISP_Q10));
+	else
+		rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+		(total_bandwidth + MSM_ISP_MIN_AB),  (total_bandwidth *
+		bus_util_factor / ISP_Q10 + MSM_ISP_MIN_IB));
+	if (rc < 0)
+		pr_err("%s: update failed\n", __func__);
+
+	return rc;
+}
+
+static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev,
+	enum msm_isp_camif_update_state camif_update)
+{
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+	init_completion(&vfe_dev->stream_config_complete);
+	vfe_dev->axi_data.pipeline_update = camif_update;
+	spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
+	rc = wait_for_completion_timeout(
+		&vfe_dev->stream_config_complete,
+		msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+	if (rc == 0) {
+		pr_err("%s: wait timeout\n", __func__);
+		rc = -1;
+	} else {
+		rc = 0;
+	}
+	return rc;
+}
+
+static int msm_isp_init_stream_ping_pong_reg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int rc = 0;
+	/*Set address for both PING & PONG register */
+	rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+		stream_info, VFE_PING_FLAG, 0);
+	if (rc < 0) {
+		pr_err("%s: No free buffer for ping\n",
+			   __func__);
+		return rc;
+	}
+
+	/* For burst stream of one capture, only one buffer
+	 * is allocated. Duplicate ping buffer address to pong
+	 * buffer to ensure hardware write to a valid address
+	 */
+	if (stream_info->stream_type == BURST_STREAM &&
+		stream_info->runtime_num_burst_capture <= 1) {
+		msm_isp_cfg_pong_address(vfe_dev, stream_info);
+	} else {
+		rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+			stream_info, VFE_PONG_FLAG, 1);
+		if (rc < 0) {
+			pr_err("%s: No free buffer for pong\n",
+				   __func__);
+			return rc;
+		}
+	}
+	return rc;
+}
+
+static void msm_isp_deinit_stream_ping_pong_reg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		struct msm_isp_buffer *buf;
+
+		buf = stream_info->buf[i];
+		if (buf) {
+			vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+				buf->bufq_handle, buf->buf_idx);
+		}
+	}
+}
+
+static void msm_isp_get_stream_wm_mask(
+	struct msm_vfe_axi_stream *stream_info,
+	uint32_t *wm_reload_mask)
+{
+	int i;
+
+	for (i = 0; i < stream_info->num_planes; i++)
+		*wm_reload_mask |= (1 << stream_info->wm[i]);
+}
+
+int msm_isp_axi_halt(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_halt_cmd *halt_cmd)
+{
+	int rc = 0;
+
+	if (halt_cmd->overflow_detected) {
+		/*Store current IRQ mask*/
+		if (vfe_dev->error_info.overflow_recover_irq_mask0 == 0) {
+			vfe_dev->hw_info->vfe_ops.core_ops.get_irq_mask(vfe_dev,
+			&vfe_dev->error_info.overflow_recover_irq_mask0,
+			&vfe_dev->error_info.overflow_recover_irq_mask1);
+		}
+		atomic_set(&vfe_dev->error_info.overflow_state,
+			OVERFLOW_DETECTED);
+	}
+
+	rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+
+	if (halt_cmd->stop_camif) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+	}
+
+	return rc;
+}
+
+int msm_isp_axi_reset(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_reset_cmd *reset_cmd)
+{
+	int rc = 0, i, j;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_isp_bufq *bufq = NULL;
+	struct msm_isp_timestamp timestamp;
+
+	if (!reset_cmd) {
+		pr_err("%s: NULL pointer reset cmd %pK\n", __func__, reset_cmd);
+		rc = -1;
+		return rc;
+	}
+
+	msm_isp_get_timestamp(&timestamp, vfe_dev);
+	rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+		0, reset_cmd->blocking);
+
+	for (i = 0, j = 0; j < axi_data->num_active_stream &&
+		i < MAX_NUM_STREAM; i++, j++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
+			rc = -1;
+			pr_err("%s invalid  stream src = %d\n", __func__,
+				stream_info->stream_src);
+			break;
+		}
+		if (stream_info->state != ACTIVE) {
+			j--;
+			continue;
+		}
+
+		bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
+			stream_info->bufq_handle);
+		if (!bufq) {
+			pr_err("%s: bufq null %pK by handle %x\n", __func__,
+				bufq, stream_info->bufq_handle);
+			continue;
+		}
+
+		if (bufq->buf_type != ISP_SHARE_BUF) {
+			msm_isp_deinit_stream_ping_pong_reg(vfe_dev,
+				stream_info);
+		} else {
+			vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+				stream_info->bufq_handle,
+				MSM_ISP_BUFFER_FLUSH_ALL,
+				&timestamp.buf_time,
+				reset_cmd->frame_id);
+		}
+		axi_data->src_info[SRC_TO_INTF(stream_info->stream_src)].
+			frame_id = reset_cmd->frame_id;
+		msm_isp_reset_burst_count_and_frame_drop(vfe_dev, stream_info);
+	}
+
+	if (rc < 0)
+		pr_err("%s Error! reset hw Timed out\n", __func__);
+
+	return rc;
+}
+
+int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_restart_cmd *restart_cmd)
+{
+	int rc = 0, i, j;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint32_t wm_reload_mask = 0x0;
+
+	for (i = 0, j = 0; j < axi_data->num_active_stream &&
+		i < MAX_NUM_STREAM; i++, j++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->state != ACTIVE) {
+			j--;
+			continue;
+		}
+		msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+		msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+	}
+
+	vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
+	rc = vfe_dev->hw_info->vfe_ops.axi_ops.restart(vfe_dev, 0,
+		restart_cmd->enable_camif);
+	if (rc < 0)
+		pr_err("%s Error restarting HW\n", __func__);
+
+	return rc;
+}
+
+static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+	uint8_t cgc_override)
+{
+	int i = 0, j = 0;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+		return -EINVAL;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+				MAX_NUM_STREAM) {
+			return -EINVAL;
+		}
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		for (j = 0; j < stream_info->num_planes; j++) {
+			if (vfe_dev->hw_info->vfe_ops.axi_ops.
+				update_cgc_override)
+				vfe_dev->hw_info->vfe_ops.axi_ops.
+					update_cgc_override(vfe_dev,
+					stream_info->wm[j], cgc_override);
+		}
+	}
+	return 0;
+}
+
+static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
+			struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+			enum msm_isp_camif_update_state camif_update)
+{
+	int i, rc = 0;
+	uint8_t src_state, wait_for_complete = 0;
+	uint32_t wm_reload_mask = 0x0;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint8_t init_frm_drop = 0;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+		return -EINVAL;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+			MAX_NUM_STREAM) {
+			return -EINVAL;
+		}
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+			src_state = axi_data->src_info[
+				SRC_TO_INTF(stream_info->stream_src)].active;
+		else {
+			pr_err("%s: invalid src info index\n", __func__);
+			return -EINVAL;
+		}
+
+		msm_isp_calculate_bandwidth(axi_data, stream_info);
+		msm_isp_reset_framedrop(vfe_dev, stream_info);
+		init_frm_drop = stream_info->init_frame_drop;
+		msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+		rc = msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+		if (rc < 0) {
+			pr_err("%s: No buffer for stream %d\n",
+					__func__,
+				HANDLE_TO_IDX(
+				stream_cfg_cmd->stream_handle[i]));
+			return rc;
+		}
+
+		stream_info->state = START_PENDING;
+		if (src_state) {
+			wait_for_complete = 1;
+		} else {
+			if (vfe_dev->dump_reg)
+				msm_camera_io_dump_2(vfe_dev->vfe_base, 0x900);
+
+			/*Configure AXI start bits to start immediately*/
+			msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
+			stream_info->state = ACTIVE;
+		}
+		if (SRC_TO_INTF(stream_info->stream_src) != VFE_PIX_0 &&
+			stream_info->stream_src < VFE_AXI_SRC_MAX) {
+			vfe_dev->axi_data.src_info[SRC_TO_INTF(
+				stream_info->stream_src)].frame_id =
+						init_frm_drop;
+		}
+	}
+	msm_isp_update_stream_bandwidth(vfe_dev);
+	vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
+	vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, 0xF);
+	msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
+	msm_isp_update_rdi_output_count(vfe_dev, stream_cfg_cmd);
+	/*Configure UB*/
+	vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev);
+	if (camif_update == ENABLE_CAMIF) {
+		atomic_set(&vfe_dev->error_info.overflow_state,
+				NO_OVERFLOW);
+		vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id = 0;
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			update_camif_state(vfe_dev, camif_update);
+	}
+
+	if (wait_for_complete) {
+		vfe_dev->axi_data.stream_update = stream_cfg_cmd->num_streams;
+		rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
+	}
+
+	return rc;
+}
+
+static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
+			struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+			enum msm_isp_camif_update_state camif_update)
+{
+	int i, rc = 0;
+	uint8_t wait_for_complete_for_this_stream = 0, cur_stream_cnt = 0;
+	uint8_t wait_for_complete = 0;
+	struct msm_vfe_axi_stream *stream_info = NULL;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	int    ext_read =
+		axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM ||
+		stream_cfg_cmd->num_streams == 0)
+		return -EINVAL;
+
+	msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
+	msm_isp_update_rdi_output_count(vfe_dev, stream_cfg_cmd);
+	cur_stream_cnt = msm_isp_get_curr_stream_cnt(vfe_dev);
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+			MAX_NUM_STREAM) {
+			return -EINVAL;
+		}
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		wait_for_complete_for_this_stream = 0;
+		stream_info->state = STOP_PENDING;
+		if (stream_info->stream_src == CAMIF_RAW ||
+			stream_info->stream_src == IDEAL_RAW) {
+			/* We dont get reg update IRQ for raw snapshot
+			 * so frame skip cant be ocnfigured
+			 */
+			if ((camif_update != DISABLE_CAMIF_IMMEDIATELY) &&
+					(!ext_read))
+				wait_for_complete_for_this_stream = 1;
+		} else if (stream_info->stream_type == BURST_STREAM &&
+				stream_info->runtime_num_burst_capture == 0) {
+			/* Configure AXI writemasters to stop immediately
+			 * since for burst case, write masters already skip
+			 * all frames.
+			 */
+			if (stream_info->stream_src == RDI_INTF_0 ||
+				stream_info->stream_src == RDI_INTF_1 ||
+				stream_info->stream_src == RDI_INTF_2)
+				wait_for_complete_for_this_stream = 1;
+		} else {
+			if  ((camif_update != DISABLE_CAMIF_IMMEDIATELY) &&
+				(!ext_read) &&
+				!(stream_info->stream_src == RDI_INTF_0 ||
+				stream_info->stream_src == RDI_INTF_1 ||
+				stream_info->stream_src == RDI_INTF_2))
+				wait_for_complete_for_this_stream = 1;
+		}
+		if (!wait_for_complete_for_this_stream) {
+			msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
+			stream_info->state = INACTIVE;
+			vfe_dev->hw_info->vfe_ops.core_ops.
+				reg_update(vfe_dev, 0xF);
+		}
+		wait_for_complete |= wait_for_complete_for_this_stream;
+	}
+	if (wait_for_complete) {
+		vfe_dev->axi_data.stream_update = stream_cfg_cmd->num_streams;
+		vfe_dev->hw_info->vfe_ops.core_ops.
+				reg_update(vfe_dev, 0xF);
+		rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
+		if (rc < 0) {
+			pr_err("%s: wait for config done failed\n", __func__);
+			for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+				stream_info = &axi_data->stream_info[
+				HANDLE_TO_IDX(
+					stream_cfg_cmd->stream_handle[i])];
+				stream_info->state = STOP_PENDING;
+				vfe_dev->hw_info->vfe_ops.core_ops.
+					reg_update(vfe_dev, 0xF);
+				msm_isp_axi_stream_enable_cfg(
+					vfe_dev, stream_info);
+				stream_info->state = INACTIVE;
+			}
+		}
+	}
+	if (camif_update == DISABLE_CAMIF) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			update_camif_state(vfe_dev, DISABLE_CAMIF);
+	} else if ((camif_update == DISABLE_CAMIF_IMMEDIATELY) ||
+					(ext_read)) {
+		/*during stop immediately, stop output then stop input*/
+		if (!ext_read)
+			vfe_dev->hw_info->vfe_ops.core_ops.
+				update_camif_state(vfe_dev,
+						DISABLE_CAMIF_IMMEDIATELY);
+	}
+	if (cur_stream_cnt == 0) {
+		vfe_dev->ignore_error = 1;
+		vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+		vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
+		vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+		vfe_dev->ignore_error = 0;
+	}
+	msm_isp_update_stream_bandwidth(vfe_dev);
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		msm_isp_deinit_stream_ping_pong_reg(vfe_dev, stream_info);
+	}
+
+	return rc;
+}
+
+
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0;
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
+	enum msm_isp_camif_update_state camif_update;
+
+	rc = msm_isp_axi_check_stream_state(vfe_dev, stream_cfg_cmd);
+	if (rc < 0) {
+		pr_err("%s: Invalid stream state\n", __func__);
+		return rc;
+	}
+
+	camif_update = msm_isp_get_camif_update_state(vfe_dev, stream_cfg_cmd);
+
+	if (stream_cfg_cmd->cmd == START_STREAM) {
+		msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 1);
+
+		rc = msm_isp_start_axi_stream(
+		   vfe_dev, stream_cfg_cmd, camif_update);
+	} else {
+		rc = msm_isp_stop_axi_stream(
+		   vfe_dev, stream_cfg_cmd, camif_update);
+
+		msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 0);
+	}
+
+	if (rc < 0)
+		pr_err("%s: start/stop stream failed\n", __func__);
+	return rc;
+}
+
+static int msm_isp_request_frame(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info, uint32_t request_frm_num)
+{
+	struct msm_vfe32_axi_stream_request_cmd stream_cfg_cmd;
+	int rc = 0;
+	uint32_t pingpong_status, pingpong_bit, wm_reload_mask = 0x0;
+
+	if (!stream_info->controllable_output)
+		return 0;
+
+	if (!request_frm_num) {
+		pr_err("%s: Invalid frame request.\n", __func__);
+		return -EINVAL;
+	}
+
+	stream_info->request_frm_num += request_frm_num;
+
+	stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle;
+	stream_cfg_cmd.frame_skip_pattern = NO_SKIP;
+	stream_cfg_cmd.init_frame_drop = 0;
+	stream_cfg_cmd.burst_count = stream_info->request_frm_num;
+	msm_isp_calculate_framedrop(&vfe_dev->axi_data, &stream_cfg_cmd);
+	msm_isp_reset_framedrop(vfe_dev, stream_info);
+
+	if (stream_info->request_frm_num != request_frm_num) {
+		pingpong_status =
+			vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(
+				vfe_dev);
+		pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+
+		if (!stream_info->buf[pingpong_bit]) {
+			rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+				pingpong_status, pingpong_bit);
+			if (rc) {
+				pr_err("%s:%d fail to set ping pong address\n",
+					__func__, __LINE__);
+				return rc;
+			}
+		}
+
+		if (!stream_info->buf[!pingpong_bit] && request_frm_num > 1) {
+			rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+				~pingpong_status, !pingpong_bit);
+			if (rc) {
+				pr_err("%s:%d fail to set ping pong address\n",
+					__func__, __LINE__);
+				return rc;
+			}
+		}
+	} else {
+		if (!stream_info->buf[0]) {
+			rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+				VFE_PING_FLAG, 0);
+			if (rc) {
+				pr_err("%s:%d fail to set ping pong address\n",
+					__func__, __LINE__);
+				return rc;
+			}
+		}
+
+		if (!stream_info->buf[1] && request_frm_num > 1) {
+			rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+				VFE_PONG_FLAG, 1);
+			if (rc) {
+				pr_err("%s:%d fail to set ping pong address\n",
+					__func__, __LINE__);
+				return rc;
+			}
+		}
+
+		msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+		vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+			wm_reload_mask);
+	}
+
+	return rc;
+}
+
+int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0, i, j;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
+	struct msm_vfe_axi_stream_cfg_update_info *update_info;
+	uint32_t frame_id;
+	struct msm_isp_timestamp timestamp;
+
+	if (update_cmd->update_type == UPDATE_STREAM_AXI_CONFIG &&
+		atomic_read(&axi_data->axi_cfg_update)) {
+		pr_err("%s: AXI stream config updating\n", __func__);
+		return -EBUSY;
+	}
+
+	/*num_stream is uint32 and update_info[] bound by MAX_NUM_STREAM*/
+	if (update_cmd->num_streams > MAX_NUM_STREAM)
+		return -EINVAL;
+
+	for (i = 0; i < update_cmd->num_streams; i++) {
+		update_info = &update_cmd->update_info[i];
+		/*check array reference bounds*/
+		if (HANDLE_TO_IDX(update_info->stream_handle) >=
+			MAX_NUM_STREAM) {
+			return -EINVAL;
+		}
+		stream_info = &axi_data->stream_info[
+				HANDLE_TO_IDX(update_info->stream_handle)];
+		if (stream_info->state != ACTIVE &&
+			stream_info->state != INACTIVE) {
+			pr_err("%s: Invalid stream state\n", __func__);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < update_cmd->num_streams; i++) {
+		update_info = &update_cmd->update_info[i];
+		stream_info = &axi_data->stream_info[
+				HANDLE_TO_IDX(update_info->stream_handle)];
+
+		switch (update_cmd->update_type) {
+		case ENABLE_STREAM_BUF_DIVERT:
+			stream_info->buf_divert = 1;
+			break;
+		case DISABLE_STREAM_BUF_DIVERT:
+			msm_isp_get_timestamp(&timestamp, vfe_dev);
+			stream_info->buf_divert = 0;
+			frame_id = vfe_dev->axi_data.
+					src_info[SRC_TO_INTF(
+					stream_info->stream_src)].
+					frame_id;
+			vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+					stream_info->bufq_handle,
+					MSM_ISP_BUFFER_FLUSH_DIVERTED,
+					&timestamp.buf_time, frame_id);
+			break;
+		case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+			uint32_t framedrop_period =
+				msm_isp_get_framedrop_period(
+				   update_info->skip_pattern);
+			if (update_info->skip_pattern == SKIP_ALL)
+				stream_info->framedrop_pattern = 0x0;
+			else
+				stream_info->framedrop_pattern = 0x1;
+			stream_info->framedrop_period = framedrop_period - 1;
+			if (stream_info->stream_type == BURST_STREAM) {
+				stream_info->runtime_framedrop_update_burst = 1;
+			} else {
+				stream_info->runtime_init_frame_drop = 0;
+				vfe_dev->hw_info->vfe_ops.axi_ops.
+					cfg_framedrop(vfe_dev, stream_info);
+			}
+			break;
+		}
+		case UPDATE_STREAM_AXI_CONFIG: {
+			for (j = 0; j < stream_info->num_planes; j++) {
+				stream_info->plane_cfg[j] =
+					update_info->plane_cfg[j];
+			}
+			stream_info->output_format = update_info->output_format;
+			if (stream_info->state == ACTIVE) {
+				stream_info->state = PAUSE_PENDING;
+				msm_isp_axi_stream_enable_cfg(
+					vfe_dev, stream_info);
+				stream_info->state = PAUSING;
+				atomic_set(&axi_data->axi_cfg_update,
+					UPDATE_REQUESTED);
+			} else {
+				for (j = 0; j < stream_info->num_planes; j++)
+					vfe_dev->hw_info->vfe_ops.axi_ops.
+					cfg_wm_reg(vfe_dev, stream_info, j);
+				stream_info->runtime_output_format =
+					stream_info->output_format;
+			}
+			break;
+		}
+		case UPDATE_STREAM_REQUEST_FRAMES: {
+			rc = msm_isp_request_frame(vfe_dev, stream_info,
+				update_info->frame_id);
+			if (rc)
+				pr_err("%s failed to request frame!\n",
+					__func__);
+			break;
+		}
+		default:
+			pr_err("%s: Invalid update type\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return rc;
+}
+
+void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1,
+	struct msm_isp_timestamp *ts)
+{
+	int i, rc = 0;
+	struct msm_isp_buffer *done_buf = NULL;
+	uint32_t comp_mask = 0, wm_mask = 0;
+	uint32_t pingpong_status, stream_idx;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_composite_info *comp_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint32_t pingpong_bit = 0, frame_id = 0;
+
+	comp_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
+		get_comp_mask(irq_status0, irq_status1);
+	wm_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
+		get_wm_mask(irq_status0, irq_status1);
+	if (!(comp_mask || wm_mask))
+		return;
+
+	ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
+	pingpong_status =
+		vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
+	for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
+		rc = 0;
+		comp_info = &axi_data->composite_info[i];
+		if (comp_mask & (1 << i)) {
+			stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
+			if ((!comp_info->stream_handle) ||
+				(stream_idx >= MAX_NUM_STREAM)) {
+				pr_err("%s: Invalid handle for composite irq\n",
+					__func__);
+			} else {
+				stream_idx =
+					HANDLE_TO_IDX(comp_info->stream_handle);
+				stream_info =
+					&axi_data->stream_info[stream_idx];
+
+				pingpong_bit = (~(pingpong_status >>
+					stream_info->wm[0]) & 0x1);
+
+				if (stream_info->stream_type == BURST_STREAM)
+					stream_info->
+						runtime_num_burst_capture--;
+
+				msm_isp_get_done_buf(vfe_dev, stream_info,
+					pingpong_status, &done_buf);
+				if (stream_info->stream_type ==
+					CONTINUOUS_STREAM ||
+					stream_info->
+					runtime_num_burst_capture > 1) {
+					rc = msm_isp_cfg_ping_pong_address(
+							vfe_dev, stream_info,
+							pingpong_status,
+							pingpong_bit);
+				}
+				frame_id = vfe_dev->axi_data.
+					src_info[SRC_TO_INTF(
+					stream_info->stream_src)].
+					frame_id;
+				stream_info->frame_id = frame_id;
+				ISP_DBG("%s: stream id:%d frame id:%d\n",
+					__func__, stream_info->stream_id,
+					stream_info->frame_id);
+				if (done_buf && !rc)
+					msm_isp_process_done_buf(vfe_dev,
+					stream_info, done_buf, ts);
+			}
+		}
+		wm_mask &= ~(comp_info->stream_composite_mask);
+	}
+
+	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+		if (wm_mask & (1 << i)) {
+			stream_idx = HANDLE_TO_IDX(axi_data->free_wm[i]);
+			if ((!axi_data->free_wm[i]) ||
+				(stream_idx >= MAX_NUM_STREAM)) {
+				pr_err("%s: Invalid handle for wm irq\n",
+					__func__);
+				continue;
+			}
+			stream_info = &axi_data->stream_info[stream_idx];
+
+			pingpong_bit = (~(pingpong_status >>
+				stream_info->wm[0]) & 0x1);
+
+			if (stream_info->stream_type == BURST_STREAM)
+				stream_info->runtime_num_burst_capture--;
+
+			msm_isp_get_done_buf(vfe_dev, stream_info,
+						pingpong_status, &done_buf);
+			if (stream_info->stream_type == CONTINUOUS_STREAM ||
+				stream_info->runtime_num_burst_capture > 1) {
+				rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+					stream_info, pingpong_status,
+					pingpong_bit);
+			}
+			stream_info->frame_id = frame_id;
+			ISP_DBG("%s: stream id:%d frame id:%d\n",
+				__func__, stream_info->stream_id,
+				stream_info->frame_id);
+			if (done_buf && !rc)
+				msm_isp_process_done_buf(vfe_dev,
+				stream_info, done_buf, ts);
+		}
+	}
+}
+int msm_isp_user_buf_done(struct vfe_device *vfe_dev,
+	struct msm_isp32_event_data *buf_cmd)
+{
+	int rc = 0;
+	struct msm_isp32_event_data buf_event;
+
+	memset(&buf_event, 0, sizeof(buf_event));
+	buf_event.input_intf = buf_cmd->input_intf;
+	buf_event.frame_id = buf_cmd->frame_id;
+	buf_event.timestamp = buf_cmd->timestamp;
+	buf_event.u.buf_done.session_id =
+	  buf_cmd->u.buf_done.session_id;
+	buf_event.u.buf_done.stream_id =
+	  buf_cmd->u.buf_done.stream_id;
+	buf_event.u.buf_done.output_format =
+	  buf_cmd->u.buf_done.output_format;
+	buf_event.u.buf_done.buf_idx =
+	  buf_cmd->u.buf_done.buf_idx;
+	buf_event.u.buf_done.handle =
+	   buf_cmd->u.buf_done.handle;
+
+	vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+			buf_event.u.buf_done.handle,
+			buf_event.u.buf_done.buf_idx,
+			&buf_event.timestamp, buf_event.frame_id,
+			buf_event.u.buf_done.output_format);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.h
new file mode 100644
index 0000000..5d89a84
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_AXI_UTIL_H__
+#define __MSM_ISP_AXI_UTIL_H__
+
+#include "msm_isp_32.h"
+
+int msm_isp_axi_create_stream(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd);
+
+void msm_isp_axi_destroy_stream(
+	struct msm_vfe_axi_shared_data *axi_data, int stream_idx);
+
+int msm_isp_validate_axi_request(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd);
+
+void msm_isp_axi_reserve_wm(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info);
+
+void msm_isp_axi_reserve_comp_mask(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_axi_check_stream_state(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd);
+
+void msm_isp_calculate_framedrop(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd);
+void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info);
+
+void msm_isp_start_avtimer(void);
+int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev);
+int msm_isp_axi_halt(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_halt_cmd *halt_cmd);
+int msm_isp_axi_reset(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_reset_cmd *reset_cmd);
+int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_restart_cmd *restart_cmd);
+int msm_isp_user_buf_done(struct vfe_device *vfe_dev,
+	struct msm_isp32_event_data *buf_cmd);
+void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
+	uint8_t input_src);
+
+void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
+	uint8_t input_src);
+void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
+	enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts);
+void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1,
+	struct msm_isp_timestamp *ts);
+void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info);
+#endif /* __MSM_ISP_AXI_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.c
new file mode 100644
index 0000000..8273298
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.c
@@ -0,0 +1,709 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_isp.h>
+#include "msm_isp_util_32.h"
+#include "msm_isp_stats_util_32.h"
+
+static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
+	struct msm_isp_buffer **done_buf)
+{
+	int rc = -1;
+	struct msm_isp_buffer *buf;
+	uint32_t pingpong_bit = 0;
+	uint32_t bufq_handle = stream_info->bufq_handle;
+	uint32_t stats_pingpong_offset;
+	uint32_t stats_idx = STATS_IDX(stream_info->stream_handle);
+
+	if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type ||
+		stats_idx >= MSM_ISP_STATS_MAX) {
+		pr_err("%s Invalid stats index %d", __func__, stats_idx);
+		return -EINVAL;
+	}
+
+	stats_pingpong_offset =
+		vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
+		stats_idx];
+
+	pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
+	rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+			vfe_dev->pdev->id, bufq_handle,
+			MSM_ISP_INVALID_BUF_INDEX, &buf);
+	if (rc < 0) {
+		vfe_dev->error_info.stats_framedrop_count[stats_idx]++;
+		return rc;
+	}
+
+	if (buf->num_planes != 1) {
+		pr_err("%s: Invalid buffer\n", __func__);
+		rc = -EINVAL;
+		goto buf_error;
+	}
+
+	vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
+		vfe_dev, stream_info,
+		pingpong_status, buf->mapped_info[0].paddr +
+		stream_info->buffer_offset);
+
+	if (stream_info->buf[pingpong_bit] && done_buf)
+		*done_buf = stream_info->buf[pingpong_bit];
+
+	stream_info->buf[pingpong_bit] = buf;
+	return 0;
+buf_error:
+	vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+		buf->bufq_handle, buf->buf_idx);
+	return rc;
+}
+
+void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1,
+	struct msm_isp_timestamp *ts)
+{
+	int i, j, rc;
+	struct msm_isp32_event_data buf_event;
+	struct msm_isp_stats_event *stats_event = &buf_event.u.stats;
+	struct msm_isp_buffer *done_buf;
+	struct msm_vfe_stats_stream *stream_info = NULL;
+	uint32_t pingpong_status;
+	uint32_t comp_stats_type_mask = 0, atomic_stats_mask = 0;
+	uint32_t stats_comp_mask = 0, stats_irq_mask = 0;
+	uint32_t num_stats_comp_mask =
+		vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+	stats_comp_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
+		get_comp_mask(irq_status0, irq_status1);
+	stats_irq_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
+		get_wm_mask(irq_status0, irq_status1);
+	if (!(stats_comp_mask || stats_irq_mask))
+		return;
+	ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
+
+	/*
+	 * If any of composite mask is set, clear irq bits from mask,
+	 * they will be restored by comp mask
+	 */
+	if (stats_comp_mask) {
+		for (j = 0; j < num_stats_comp_mask; j++) {
+			stats_irq_mask &= ~atomic_read(
+				&vfe_dev->stats_data.stats_comp_mask[j]);
+		}
+	}
+
+	for (j = 0; j < num_stats_comp_mask; j++) {
+		atomic_stats_mask = atomic_read(
+			&vfe_dev->stats_data.stats_comp_mask[j]);
+		if (!stats_comp_mask) {
+			stats_irq_mask &= ~atomic_stats_mask;
+		} else {
+			/* restore irq bits from composite mask */
+			if (stats_comp_mask & (1 << j))
+				stats_irq_mask |= atomic_stats_mask;
+		}
+		/* if no irq bits set from this composite mask continue*/
+		if (!stats_irq_mask)
+			continue;
+		memset(&buf_event, 0, sizeof(struct msm_isp32_event_data));
+		buf_event.timestamp = ts->event_time;
+		buf_event.frame_id =
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+		buf_event.input_intf = VFE_PIX_0;
+		pingpong_status = vfe_dev->hw_info->
+			vfe_ops.stats_ops.get_pingpong_status(vfe_dev);
+
+		for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type;
+			i++) {
+			if (!(stats_irq_mask & (1 << i)))
+				continue;
+
+			stats_irq_mask &= ~(1 << i);
+			stream_info = &vfe_dev->stats_data.stream_info[i];
+			done_buf = NULL;
+			msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+				stream_info, pingpong_status, &done_buf);
+			if (done_buf) {
+				rc = vfe_dev->buf_mgr->ops->buf_divert(
+					vfe_dev->buf_mgr, done_buf->bufq_handle,
+					done_buf->buf_idx, &ts->buf_time,
+					vfe_dev->axi_data.
+					src_info[VFE_PIX_0].frame_id);
+				if (rc != 0)
+					continue;
+
+				stats_event->stats_buf_idxs
+					[stream_info->stats_type] =
+					done_buf->buf_idx;
+				if (!stream_info->composite_flag) {
+					stats_event->stats_mask =
+						1 << stream_info->stats_type;
+					ISP_DBG("%s: stats frameid: 0x%x %d\n",
+						__func__, buf_event.frame_id,
+						stream_info->stats_type);
+					msm_isp_send_event(vfe_dev,
+						ISP_EVENT_STATS_NOTIFY +
+						stream_info->stats_type,
+						&buf_event);
+				} else {
+					comp_stats_type_mask |=
+						1 << stream_info->stats_type;
+				}
+			}
+		}
+
+		if (comp_stats_type_mask) {
+			ISP_DBG("%s: comp_stats frameid: 0x%x, 0x%x\n",
+				__func__, buf_event.frame_id,
+				comp_stats_type_mask);
+			stats_event->stats_mask = comp_stats_type_mask;
+			msm_isp_send_event(vfe_dev,
+				ISP_EVENT_COMP_STATS_NOTIFY, &buf_event);
+			comp_stats_type_mask = 0;
+		}
+	}
+}
+
+int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_request_cmd *stream_req_cmd)
+{
+	int rc = -1;
+	struct msm_vfe_stats_stream *stream_info = NULL;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	uint32_t stats_idx;
+
+	if (!(vfe_dev->hw_info->stats_hw_info->stats_capability_mask &
+		(1 << stream_req_cmd->stats_type))) {
+		pr_err("%s: Stats type not supported\n", __func__);
+		return rc;
+	}
+
+	stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
+		get_stats_idx(stream_req_cmd->stats_type);
+
+	if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+		pr_err("%s Invalid stats index %d", __func__, stats_idx);
+		return -EINVAL;
+	}
+
+	stream_info = &stats_data->stream_info[stats_idx];
+	if (stream_info->state != STATS_AVAILABLE) {
+		pr_err("%s: Stats already requested\n", __func__);
+		return rc;
+	}
+
+	if (stream_req_cmd->framedrop_pattern >= MAX_SKIP) {
+		pr_err("%s: Invalid framedrop pattern\n", __func__);
+		return rc;
+	}
+
+	if (stream_req_cmd->irq_subsample_pattern >= MAX_SKIP) {
+		pr_err("%s: Invalid irq subsample pattern\n", __func__);
+		return rc;
+	}
+
+	stream_info->session_id = stream_req_cmd->session_id;
+	stream_info->stream_id = stream_req_cmd->stream_id;
+	stream_info->composite_flag = stream_req_cmd->composite_flag;
+	stream_info->stats_type = stream_req_cmd->stats_type;
+	stream_info->buffer_offset = stream_req_cmd->buffer_offset;
+	stream_info->framedrop_pattern = stream_req_cmd->framedrop_pattern;
+	stream_info->init_stats_frame_drop = stream_req_cmd->init_frame_drop;
+	stream_info->irq_subsample_pattern =
+		stream_req_cmd->irq_subsample_pattern;
+	stream_info->state = STATS_INACTIVE;
+
+	if ((vfe_dev->stats_data.stream_handle_cnt << 8) == 0)
+		vfe_dev->stats_data.stream_handle_cnt++;
+
+	stream_req_cmd->stream_handle =
+		(++vfe_dev->stats_data.stream_handle_cnt) << 8 | stats_idx;
+
+	stream_info->stream_handle = stream_req_cmd->stream_handle;
+	return 0;
+}
+
+int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = -1;
+	struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
+	struct msm_vfe_stats_stream *stream_info = NULL;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	uint32_t framedrop_period;
+	uint32_t stats_idx;
+
+	rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd);
+	if (rc < 0) {
+		pr_err("%s: create stream failed\n", __func__);
+		return rc;
+	}
+
+	stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
+
+	if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+		pr_err("%s Invalid stats index %d", __func__, stats_idx);
+		return -EINVAL;
+	}
+
+	stream_info = &stats_data->stream_info[stats_idx];
+
+	framedrop_period = msm_isp_get_framedrop_period(
+	   stream_req_cmd->framedrop_pattern);
+
+	if (stream_req_cmd->framedrop_pattern == SKIP_ALL)
+		stream_info->framedrop_pattern = 0x0;
+	else
+		stream_info->framedrop_pattern = 0x1;
+	stream_info->framedrop_period = framedrop_period - 1;
+
+	if (!stream_info->composite_flag)
+		vfe_dev->hw_info->vfe_ops.stats_ops.
+			cfg_wm_irq_mask(vfe_dev, stream_info);
+
+	if (stream_info->init_stats_frame_drop == 0)
+		vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
+			stream_info);
+
+	return rc;
+}
+
+int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = -1;
+	struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
+	struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	int stats_idx = STATS_IDX(stream_release_cmd->stream_handle);
+	struct msm_vfe_stats_stream *stream_info = NULL;
+
+	if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+		pr_err("%s Invalid stats index %d", __func__, stats_idx);
+		return -EINVAL;
+	}
+
+	stream_info = &stats_data->stream_info[stats_idx];
+	if (stream_info->state == STATS_AVAILABLE) {
+		pr_err("%s: stream already release\n", __func__);
+		return rc;
+	} else if (stream_info->state != STATS_INACTIVE) {
+		stream_cfg_cmd.enable = 0;
+		stream_cfg_cmd.num_streams = 1;
+		stream_cfg_cmd.stream_handle[0] =
+			stream_release_cmd->stream_handle;
+		rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+	}
+
+	if (!stream_info->composite_flag)
+		vfe_dev->hw_info->vfe_ops.stats_ops.
+			clear_wm_irq_mask(vfe_dev, stream_info);
+
+	vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
+	memset(stream_info, 0, sizeof(struct msm_vfe_stats_stream));
+	return 0;
+}
+
+static int msm_isp_init_stats_ping_pong_reg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream *stream_info)
+{
+	int rc = 0;
+
+	stream_info->bufq_handle =
+		vfe_dev->buf_mgr->ops->get_bufq_handle(
+		vfe_dev->buf_mgr, stream_info->session_id,
+		stream_info->stream_id);
+	if (stream_info->bufq_handle == 0) {
+		pr_err("%s: no buf configured for stream: 0x%x\n",
+			__func__, stream_info->stream_handle);
+		return -EINVAL;
+	}
+
+	rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+		stream_info, VFE_PING_FLAG, NULL);
+	if (rc < 0) {
+		pr_err("%s: No free buffer for ping\n", __func__);
+		return rc;
+	}
+	rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+		stream_info, VFE_PONG_FLAG, NULL);
+	if (rc < 0) {
+		pr_err("%s: No free buffer for pong\n", __func__);
+		return rc;
+	}
+	return rc;
+}
+
+static void msm_isp_deinit_stats_ping_pong_reg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream *stream_info)
+{
+	int i;
+	struct msm_isp_buffer *buf;
+
+	for (i = 0; i < 2; i++) {
+		buf = stream_info->buf[i];
+		if (buf)
+			vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+				buf->bufq_handle, buf->buf_idx);
+	}
+}
+
+void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev)
+{
+	int i;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	struct msm_vfe_stats_stream *stream_info = NULL;
+
+	for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+		stream_info = &stats_data->stream_info[i];
+		if (stream_info->state != STATS_ACTIVE)
+			continue;
+
+		if (stream_info->init_stats_frame_drop) {
+			stream_info->init_stats_frame_drop--;
+			if (stream_info->init_stats_frame_drop == 0) {
+				vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
+					vfe_dev, stream_info);
+			}
+		}
+	}
+}
+
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev)
+{
+	int i;
+	uint32_t stats_mask = 0, comp_stats_mask = 0;
+	uint32_t enable = 0;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+	for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+		if (stats_data->stream_info[i].state == STATS_START_PENDING ||
+				stats_data->stream_info[i].state ==
+					STATS_STOP_PENDING) {
+			stats_mask |= i;
+			enable = stats_data->stream_info[i].state ==
+				STATS_START_PENDING ? 1 : 0;
+			stats_data->stream_info[i].state =
+				stats_data->stream_info[i].state ==
+				STATS_START_PENDING ?
+				STATS_STARTING : STATS_STOPPING;
+			vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+				vfe_dev, BIT(i), enable);
+			vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+			   vfe_dev, BIT(i), enable);
+		} else if (stats_data->stream_info[i].state == STATS_STARTING ||
+			stats_data->stream_info[i].state == STATS_STOPPING) {
+			if (stats_data->stream_info[i].composite_flag)
+				comp_stats_mask |= i;
+			stats_data->stream_info[i].state =
+				stats_data->stream_info[i].state ==
+				STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE;
+		}
+	}
+	atomic_sub(1, &stats_data->stats_update);
+	if (!atomic_read(&stats_data->stats_update))
+		complete(&vfe_dev->stats_config_complete);
+}
+
+static int msm_isp_stats_wait_for_cfg_done(struct vfe_device *vfe_dev)
+{
+	int rc;
+
+	init_completion(&vfe_dev->stats_config_complete);
+	atomic_set(&vfe_dev->stats_data.stats_update, 2);
+	rc = wait_for_completion_timeout(
+		&vfe_dev->stats_config_complete,
+		msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+	if (rc == 0) {
+		pr_err("%s: wait timeout\n", __func__);
+		rc = -1;
+	} else {
+		rc = 0;
+	}
+	return rc;
+}
+
+static int msm_isp_stats_update_cgc_override(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i;
+	uint32_t stats_mask = 0, idx;
+
+	if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+		pr_err("%s invalid num_streams %d\n", __func__,
+			stream_cfg_cmd->num_streams);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+		if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+			pr_err("%s Invalid stats index %d", __func__, idx);
+			return -EINVAL;
+		}
+		stats_mask |= 1 << idx;
+	}
+
+	if (vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override) {
+		vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override(
+			vfe_dev, stats_mask, stream_cfg_cmd->enable);
+	}
+	return 0;
+}
+
+static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i, rc = 0;
+	uint32_t stats_mask = 0, idx;
+	uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
+	uint32_t num_stats_comp_mask = 0;
+	struct msm_vfe_stats_stream *stream_info;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+	if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+		pr_err("%s invalid num_streams %d\n", __func__,
+			stream_cfg_cmd->num_streams);
+		return -EINVAL;
+	}
+
+	num_stats_comp_mask =
+		vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+	rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams(
+		stats_data->stream_info);
+	if (rc < 0)
+		return rc;
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+		if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+			pr_err("%s Invalid stats index %d", __func__, idx);
+			return -EINVAL;
+		}
+
+		stream_info = &stats_data->stream_info[idx];
+		if (stream_info->stream_handle !=
+				stream_cfg_cmd->stream_handle[i]) {
+			pr_err("%s: Invalid stream handle: 0x%x received\n",
+				__func__, stream_cfg_cmd->stream_handle[i]);
+			continue;
+		}
+
+		if (stream_info->composite_flag > num_stats_comp_mask) {
+			pr_err("%s: comp grp %d exceed max %d\n",
+				__func__, stream_info->composite_flag,
+				num_stats_comp_mask);
+			return -EINVAL;
+		}
+		rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+		if (rc < 0) {
+			pr_err("%s: No buffer for stream%d type:%d stmID:0x%x\n",
+				__func__, idx, stream_info->stats_type,
+				stream_info->stream_id);
+			return rc;
+		}
+
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+			stream_info->state = STATS_START_PENDING;
+		else
+			stream_info->state = STATS_ACTIVE;
+
+		stats_data->num_active_stream++;
+		stats_mask |= 1 << idx;
+
+		if (stream_info->composite_flag > 0)
+			comp_stats_mask[stream_info->composite_flag-1] |=
+				1 << idx;
+
+		ISP_DBG("%s: stats_mask %x %x active streams %d\n",
+			__func__, comp_stats_mask[0],
+			comp_stats_mask[1],
+			stats_data->num_active_stream);
+
+	}
+
+	if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+		rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+	} else {
+		vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+			vfe_dev, stats_mask, stream_cfg_cmd->enable);
+		for (i = 0; i < num_stats_comp_mask; i++) {
+			vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+			 vfe_dev, comp_stats_mask[i], 1);
+		}
+	}
+	return rc;
+}
+
+static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i, rc = 0;
+	uint32_t stats_mask = 0, idx;
+	uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
+	uint32_t num_stats_comp_mask = 0;
+	struct msm_vfe_stats_stream *stream_info;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+	num_stats_comp_mask =
+		vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+
+		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+		if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+			pr_err("%s Invalid stats index %d", __func__, idx);
+			return -EINVAL;
+		}
+
+		stream_info = &stats_data->stream_info[idx];
+		if (stream_info->stream_handle !=
+				stream_cfg_cmd->stream_handle[i]) {
+			pr_err("%s: Invalid stream handle: 0x%x received\n",
+				__func__, stream_cfg_cmd->stream_handle[i]);
+			continue;
+		}
+
+		if (stream_info->composite_flag > num_stats_comp_mask) {
+			pr_err("%s: comp grp %d exceed max %d\n",
+				__func__, stream_info->composite_flag,
+				num_stats_comp_mask);
+			return -EINVAL;
+		}
+
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+			stream_info->state = STATS_STOP_PENDING;
+		else
+			stream_info->state = STATS_INACTIVE;
+
+		stats_data->num_active_stream--;
+		stats_mask |= 1 << idx;
+
+		if (stream_info->composite_flag > 0)
+			comp_stats_mask[stream_info->composite_flag-1] |=
+				1 << idx;
+
+		ISP_DBG("%s: stats_mask %x %x active streams %d\n",
+			__func__, comp_stats_mask[0],
+			comp_stats_mask[1],
+			stats_data->num_active_stream);
+	}
+
+	if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+		rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+	} else {
+		vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+			vfe_dev, stats_mask, stream_cfg_cmd->enable);
+		for (i = 0; i < num_stats_comp_mask; i++) {
+			vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+			   vfe_dev, comp_stats_mask[i], 0);
+		}
+	}
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+		if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+			pr_err("%s Invalid stats index %d", __func__, idx);
+			return -EINVAL;
+		}
+
+		stream_info = &stats_data->stream_info[idx];
+		msm_isp_deinit_stats_ping_pong_reg(vfe_dev, stream_info);
+	}
+	return rc;
+}
+
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0;
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+
+	if (vfe_dev->stats_data.num_active_stream == 0)
+		vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
+
+	if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+		pr_err("%s invalid num_streams %d\n", __func__,
+			stream_cfg_cmd->num_streams);
+		return -EINVAL;
+	}
+
+	if (stream_cfg_cmd->enable) {
+		msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
+
+		rc = msm_isp_start_stats_stream(vfe_dev, stream_cfg_cmd);
+	} else {
+		rc = msm_isp_stop_stats_stream(vfe_dev, stream_cfg_cmd);
+
+		msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
+	}
+
+	return rc;
+}
+
+int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0, i;
+	struct msm_vfe_stats_stream *stream_info;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
+	struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
+
+	/*validate request*/
+	for (i = 0; i < update_cmd->num_streams; i++) {
+		update_info = &update_cmd->update_info[i];
+		/*check array reference bounds*/
+		if (STATS_IDX(update_info->stream_handle)
+			> vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+			pr_err("%s: stats idx %d out of bound!", __func__,
+				STATS_IDX(update_info->stream_handle));
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < update_cmd->num_streams; i++) {
+		update_info = &update_cmd->update_info[i];
+		stream_info = &stats_data->stream_info[
+				STATS_IDX(update_info->stream_handle)];
+		if (stream_info->stream_handle !=
+			update_info->stream_handle) {
+			pr_err("%s: stats stream handle %x %x mismatch!\n",
+				__func__, stream_info->stream_handle,
+				update_info->stream_handle);
+			continue;
+		}
+
+		switch (update_cmd->update_type) {
+		case UPDATE_STREAM_STATS_FRAMEDROP_PATTERN: {
+			uint32_t framedrop_period =
+				msm_isp_get_framedrop_period(
+				   update_info->skip_pattern);
+			if (update_info->skip_pattern == SKIP_ALL)
+				stream_info->framedrop_pattern = 0x0;
+			else
+				stream_info->framedrop_pattern = 0x1;
+			stream_info->framedrop_period = framedrop_period - 1;
+			if (stream_info->init_stats_frame_drop == 0)
+				vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
+					vfe_dev, stream_info);
+			break;
+		}
+
+		default:
+			pr_err("%s: Invalid update type\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.h
new file mode 100644
index 0000000..11c0c28
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_STATS_UTIL_H__
+#define __MSM_ISP_STATS_UTIL_H__
+
+#include "msm_isp_32.h"
+#define STATS_IDX(idx) (idx & 0xFF)
+
+void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1,
+	struct msm_isp_timestamp *ts);
+int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_request_cmd *stream_req_cmd);
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev);
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev);
+#endif /* __MSM_ISP_STATS_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index b589afa..e671122 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -410,8 +410,12 @@
 			pr_err("%s: Fetch engine config failed\n", __func__);
 			return -EINVAL;
 		}
-		for (i = 0; i < stream_info->num_planes; i++)
+		for (i = 0; i < stream_info->num_planes; i++) {
+			vfe_dev->hw_info->vfe_ops.axi_ops.enable_wm(
+				vfe_dev->vfe_base,
+				stream_info->wm[vfe_idx][i], 1);
 			wm_reload_mask |= (1 << stream_info->wm[vfe_idx][i]);
+		}
 		vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
 			VFE_SRC_MAX);
 		vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.c
new file mode 100644
index 0000000..f6ca41c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.c
@@ -0,0 +1,1939 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <linux/ratelimit.h>
+
+#include "msm.h"
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+#include "msm_isp_stats_util_32.h"
+#include "msm_camera_io_util.h"
+#include "cam_smmu_api.h"
+
+#ifndef UINT16_MAX
+#define UINT16_MAX             (65535U)
+#endif
+
+#define MAX_ISP_V4l2_EVENTS 100
+#define MAX_ISP_REG_LIST 100
+static DEFINE_MUTEX(bandwidth_mgr_mutex);
+static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
+
+static uint64_t msm_isp_cpp_clk_rate;
+
+#define VFE40_8974V2_VERSION 0x1001001A
+static struct msm_bus_vectors msm_isp_init_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+};
+
+static struct msm_bus_vectors msm_isp_ping_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = MSM_ISP_MIN_AB,
+		.ib  = MSM_ISP_MIN_IB,
+	},
+};
+
+static struct msm_bus_vectors msm_isp_pong_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = MSM_ISP_MIN_AB,
+		.ib  = MSM_ISP_MIN_IB,
+	},
+};
+
+static struct msm_bus_paths msm_isp_bus_client_config[] = {
+	{
+		ARRAY_SIZE(msm_isp_init_vectors),
+		msm_isp_init_vectors,
+	},
+	{
+		ARRAY_SIZE(msm_isp_ping_vectors),
+		msm_isp_ping_vectors,
+	},
+	{
+		ARRAY_SIZE(msm_isp_pong_vectors),
+		msm_isp_pong_vectors,
+	},
+};
+
+static struct msm_bus_scale_pdata msm_isp_bus_client_pdata = {
+	msm_isp_bus_client_config,
+	NULL,
+	ARRAY_SIZE(msm_isp_bus_client_config),
+	.name = "msm_camera_isp",
+	0,
+};
+
+
+void msm_camera_io_dump_2(void __iomem *addr, int size)
+{
+	char line_str[128], *p_str;
+	int i;
+	u32 __iomem *p = (u32 __iomem *) addr;
+	u32 data;
+
+	pr_err("%s: %pK %d\n", __func__, addr, size);
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size/4; i++) {
+		if (i % 4 == 0) {
+#ifdef CONFIG_COMPAT
+			snprintf(p_str, 20, "%016lx: ", (unsigned long) p);
+			p_str += 18;
+#else
+			snprintf(p_str, 12, "%08lx: ", (unsigned long) p);
+			p_str += 10;
+#endif
+		}
+		data = readl_relaxed(p++);
+		snprintf(p_str, 12, "%08x ", data);
+		p_str += 9;
+		if ((i + 1) % 4 == 0) {
+			pr_err("%s\n", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		pr_err("%s\n", line_str);
+}
+
+void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
+{
+	int i;
+	char text[5];
+
+	text[4] = '\0';
+	for (i = 0; i < 4; i++) {
+		text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
+		if ((text[i] < '0') || (text[i] > 'z')) {
+			pr_err("%s: Invalid output format %d (unprintable)\n",
+				origin, fourcc_format);
+			return;
+		}
+	}
+	pr_err("%s: Invalid output format %s\n",
+		origin, text);
+}
+
+int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+	int rc = 0;
+
+	mutex_lock(&bandwidth_mgr_mutex);
+	isp_bandwidth_mgr.client_info[client].active = 1;
+	if (isp_bandwidth_mgr.use_count++) {
+		mutex_unlock(&bandwidth_mgr_mutex);
+		return rc;
+	}
+	isp_bandwidth_mgr.bus_client =
+		msm_bus_scale_register_client(&msm_isp_bus_client_pdata);
+	if (!isp_bandwidth_mgr.bus_client) {
+		pr_err("%s: client register failed\n", __func__);
+		mutex_unlock(&bandwidth_mgr_mutex);
+		return -EINVAL;
+	}
+
+	isp_bandwidth_mgr.bus_vector_active_idx = 1;
+	msm_bus_scale_client_update_request(
+	   isp_bandwidth_mgr.bus_client,
+	   isp_bandwidth_mgr.bus_vector_active_idx);
+
+	mutex_unlock(&bandwidth_mgr_mutex);
+	return 0;
+}
+
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+	uint64_t ab, uint64_t ib)
+{
+	int i;
+	struct msm_bus_paths *path;
+
+	mutex_lock(&bandwidth_mgr_mutex);
+	if (!isp_bandwidth_mgr.use_count ||
+		!isp_bandwidth_mgr.bus_client) {
+		pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
+			__func__, isp_bandwidth_mgr.use_count,
+			isp_bandwidth_mgr.bus_client);
+		return -EINVAL;
+	}
+
+	isp_bandwidth_mgr.client_info[client].ab = ab;
+	isp_bandwidth_mgr.client_info[client].ib = ib;
+	ALT_VECTOR_IDX(isp_bandwidth_mgr.bus_vector_active_idx);
+	path =
+		&(msm_isp_bus_client_pdata.usecase[
+		  isp_bandwidth_mgr.bus_vector_active_idx]);
+	path->vectors[0].ab = 0;
+	path->vectors[0].ib = 0;
+	for (i = 0; i < MAX_ISP_CLIENT; i++) {
+		if (isp_bandwidth_mgr.client_info[i].active) {
+			path->vectors[0].ab +=
+				isp_bandwidth_mgr.client_info[i].ab;
+			path->vectors[0].ib +=
+				isp_bandwidth_mgr.client_info[i].ib;
+		}
+	}
+	ISP_DBG("%s: Total AB = %llu IB = %llu\n", __func__,
+			path->vectors[0].ab, path->vectors[0].ib);
+	msm_bus_scale_client_update_request(isp_bandwidth_mgr.bus_client,
+		isp_bandwidth_mgr.bus_vector_active_idx);
+	/* Insert into circular buffer */
+	msm_isp_update_req_history(isp_bandwidth_mgr.bus_client,
+		path->vectors[0].ab,
+		path->vectors[0].ib,
+		isp_bandwidth_mgr.client_info,
+		sched_clock());
+	mutex_unlock(&bandwidth_mgr_mutex);
+	return 0;
+}
+
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+	if (client >= MAX_ISP_CLIENT) {
+		pr_err("invalid Client id %d", client);
+		return;
+	}
+	mutex_lock(&bandwidth_mgr_mutex);
+	memset(&isp_bandwidth_mgr.client_info[client], 0,
+		   sizeof(struct msm_isp_bandwidth_info));
+	if (--isp_bandwidth_mgr.use_count) {
+		mutex_unlock(&bandwidth_mgr_mutex);
+		return;
+	}
+
+	if (!isp_bandwidth_mgr.bus_client) {
+		pr_err("%s:%d error: bus client invalid\n", __func__, __LINE__);
+		mutex_unlock(&bandwidth_mgr_mutex);
+		return;
+	}
+
+	msm_bus_scale_client_update_request(
+	   isp_bandwidth_mgr.bus_client, 0);
+	msm_bus_scale_unregister_client(isp_bandwidth_mgr.bus_client);
+	isp_bandwidth_mgr.bus_client = 0;
+	mutex_unlock(&bandwidth_mgr_mutex);
+}
+
+void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
+				      struct msm_isp_statistics *stats)
+{
+	stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
+	stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
+	stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
+
+	stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
+	stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
+	stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
+
+	stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
+	stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
+	stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
+	stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
+	stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
+	stats->vfe_clk_rate = vfe_dev->msm_isp_vfe_clk_rate;
+	stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
+}
+
+void msm_isp_util_update_last_overflow_ab_ib(struct vfe_device *vfe_dev)
+{
+	struct msm_bus_paths *path;
+	 path = &(msm_isp_bus_client_pdata.usecase[
+		  isp_bandwidth_mgr.bus_vector_active_idx]);
+	vfe_dev->msm_isp_last_overflow_ab = path->vectors[0].ab;
+	vfe_dev->msm_isp_last_overflow_ib = path->vectors[0].ib;
+}
+
+void msm_isp_util_update_clk_rate(long clock_rate)
+{
+	msm_isp_cpp_clk_rate = clock_rate;
+}
+
+uint32_t msm_isp_get_framedrop_period(
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern)
+{
+	switch (frame_skip_pattern) {
+	case NO_SKIP:
+	case EVERY_2FRAME:
+	case EVERY_3FRAME:
+	case EVERY_4FRAME:
+	case EVERY_5FRAME:
+	case EVERY_6FRAME:
+	case EVERY_7FRAME:
+	case EVERY_8FRAME:
+		return frame_skip_pattern + 1;
+	case EVERY_16FRAME:
+		return 16;
+	case EVERY_32FRAME:
+		return 32;
+	case SKIP_ALL:
+		return 1;
+	default:
+		return 1;
+	}
+	return 1;
+}
+
+int msm_isp_get_clk_info(struct vfe_device *vfe_dev,
+	struct platform_device *pdev, struct msm_cam_clk_info *vfe_clk_info)
+{
+	int i, count, rc;
+	uint32_t rates[VFE_CLK_INFO_MAX];
+
+	struct device_node *of_node;
+
+	of_node = pdev->dev.of_node;
+
+	count = of_property_count_strings(of_node, "clock-names");
+
+	ISP_DBG("count = %d\n", count);
+	if (count <= 0) {
+		pr_err("no clocks found in device tree, count=%d", count);
+		return 0;
+	}
+
+	if (count > VFE_CLK_INFO_MAX) {
+		pr_err("invalid count=%d, max is %d\n", count,
+			VFE_CLK_INFO_MAX);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "clock-names",
+				i, &(vfe_clk_info[i].clk_name));
+		ISP_DBG("clock-names[%d] = %s\n", i, vfe_clk_info[i].clk_name);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			return rc;
+		}
+	}
+	rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+		rates, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return rc;
+	}
+	for (i = 0; i < count; i++) {
+		vfe_clk_info[i].clk_rate =
+			(rates[i] == 0) ? (long)-1 : rates[i];
+		ISP_DBG("clk_rate[%d] = %ld\n", i, vfe_clk_info[i].clk_rate);
+	}
+	vfe_dev->num_clk = count;
+	return 0;
+}
+
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
+	struct vfe_device *vfe_dev)
+{
+	struct timespec ts;
+
+	do_gettimeofday(&(time_stamp->event_time));
+	if (vfe_dev->vt_enable) {
+		msm_isp_get_avtimer_ts(time_stamp);
+		time_stamp->buf_time.tv_sec    = time_stamp->vt_time.tv_sec;
+		time_stamp->buf_time.tv_usec   = time_stamp->vt_time.tv_usec;
+	} else {
+		get_monotonic_boottime(&ts);
+		time_stamp->buf_time.tv_sec    = ts.tv_sec;
+		time_stamp->buf_time.tv_usec   = ts.tv_nsec/1000;
+	}
+}
+
+int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+	int rc = 0;
+
+	rc = v4l2_event_subscribe(fh, sub, MAX_ISP_V4l2_EVENTS, NULL);
+	if (rc == 0) {
+		if (sub->type == V4L2_EVENT_ALL) {
+			int i;
+
+			vfe_dev->axi_data.event_mask = 0;
+			for (i = 0; i < ISP_EVENT_MAX; i++)
+				vfe_dev->axi_data.event_mask |= (1 << i);
+		} else {
+			int event_idx = sub->type - ISP_EVENT_BASE;
+
+			vfe_dev->axi_data.event_mask |= (1 << event_idx);
+		}
+	}
+	return rc;
+}
+
+int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+	int rc = 0;
+
+	rc = v4l2_event_unsubscribe(fh, sub);
+	if (sub->type == V4L2_EVENT_ALL) {
+		vfe_dev->axi_data.event_mask = 0;
+	} else {
+		int event_idx = sub->type - ISP_EVENT_BASE;
+
+		vfe_dev->axi_data.event_mask &= ~(1 << event_idx);
+	}
+	return rc;
+}
+
+static int msm_isp_get_max_clk_rate(struct vfe_device *vfe_dev, long *rate)
+{
+	int           clk_idx = 0;
+	unsigned long max_value = ~0;
+	long          round_rate = 0;
+
+	if (!vfe_dev || !rate) {
+		pr_err("%s:%d failed: vfe_dev %pK rate %pK\n",
+			__func__, __LINE__,	vfe_dev, rate);
+		return -EINVAL;
+	}
+
+	*rate = 0;
+	if (!vfe_dev->hw_info) {
+		pr_err("%s:%d failed: vfe_dev->hw_info %pK\n", __func__,
+			__LINE__, vfe_dev->hw_info);
+		return -EINVAL;
+	}
+
+	clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+	if (clk_idx >= vfe_dev->num_clk) {
+		pr_err("%s:%d failed: clk_idx %d max array size %d\n",
+			__func__, __LINE__, clk_idx,
+			vfe_dev->num_clk);
+		return -EINVAL;
+	}
+
+	round_rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], max_value);
+	if (round_rate < 0) {
+		pr_err("%s: Invalid vfe clock rate\n", __func__);
+		return -EINVAL;
+	}
+
+	*rate = round_rate;
+	return 0;
+}
+
+static int msm_isp_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
+{
+	int rc = 0;
+	int clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+	long round_rate =
+		clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
+	if (round_rate < 0) {
+		pr_err("%s: Invalid vfe clock rate\n", __func__);
+		return round_rate;
+	}
+
+	rc = clk_set_rate(vfe_dev->vfe_clk[clk_idx], round_rate);
+	if (rc < 0) {
+		pr_err("%s: Vfe set rate error\n", __func__);
+		return rc;
+	}
+	*rate = round_rate;
+	vfe_dev->msm_isp_vfe_clk_rate = round_rate;
+	return 0;
+}
+
+void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
+	struct msm_vfe_fetch_engine_info *fetch_engine_info)
+{
+	struct msm_isp32_event_data fe_rd_done_event;
+
+	if (!fetch_engine_info->is_busy)
+		return;
+	memset(&fe_rd_done_event, 0, sizeof(struct msm_isp32_event_data));
+	fe_rd_done_event.frame_id =
+		vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+	fe_rd_done_event.u.buf_done.session_id = fetch_engine_info->session_id;
+	fe_rd_done_event.u.buf_done.stream_id = fetch_engine_info->stream_id;
+	fe_rd_done_event.u.buf_done.handle = fetch_engine_info->bufq_handle;
+	fe_rd_done_event.u.buf_done.buf_idx = fetch_engine_info->buf_idx;
+	ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
+		__func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
+	fetch_engine_info->is_busy = 0;
+	msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
+}
+
+static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
+	struct msm_vfe_input_cfg *input_cfg)
+{
+	int rc = 0;
+
+	if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+		pr_err("%s: src %d path is active\n", __func__, VFE_PIX_0);
+		return -EINVAL;
+	}
+
+	vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
+		input_cfg->input_pix_clk;
+	vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
+		input_cfg->d.pix_cfg.input_mux;
+	vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
+		input_cfg->d.pix_cfg.input_format;
+
+	rc = msm_isp_set_clk_rate(vfe_dev,
+		&vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock);
+	if (rc < 0) {
+		pr_err("%s: clock set rate failed\n", __func__);
+		return rc;
+	}
+
+	ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
+		input_cfg->d.pix_cfg.input_mux, CAMIF,
+		input_cfg->d.pix_cfg.input_format);
+
+	if (input_cfg->d.pix_cfg.input_mux == CAMIF) {
+		vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+			input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
+	} else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
+		vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+			input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
+	}
+	vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
+			vfe_dev, &input_cfg->d.pix_cfg);
+	return rc;
+}
+
+static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
+	struct msm_vfe_input_cfg *input_cfg)
+{
+	int rc = 0;
+
+	if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
+		pr_err("%s: RAW%d path is active\n", __func__,
+			   input_cfg->input_src - VFE_RAW_0);
+		return -EINVAL;
+	}
+
+	vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
+		input_cfg->input_pix_clk;
+	vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
+		vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
+	return rc;
+}
+
+int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0;
+	struct msm_vfe_input_cfg *input_cfg = arg;
+
+	switch (input_cfg->input_src) {
+	case VFE_PIX_0:
+		rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
+		break;
+	case VFE_RAW_0:
+	case VFE_RAW_1:
+	case VFE_RAW_2:
+		rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
+		break;
+	default:
+		pr_err("%s: Invalid input source\n", __func__);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0;
+	uint32_t count = 0;
+	struct msm_vfe_cfg_cmd_list *proc_cmd =
+		(struct msm_vfe_cfg_cmd_list *)arg;
+	struct msm_vfe_cfg_cmd_list cmd, cmd_next;
+	struct msm_vfe_cfg_cmd2 cfg_cmd;
+
+	if (!vfe_dev || !arg) {
+		pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
+			vfe_dev, arg);
+		return -EINVAL;
+	}
+
+	rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
+	if (rc < 0)
+		pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+	cmd = *proc_cmd;
+
+	while (cmd.next) {
+		if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
+			pr_err("%s:%d failed: next size %u != expected %zu\n",
+				__func__, __LINE__, cmd.next_size,
+				sizeof(struct msm_vfe_cfg_cmd_list));
+			break;
+		}
+		if (++count >= MAX_ISP_REG_LIST) {
+			pr_err("%s:%d Error exceeding the max register count:%u\n",
+				__func__, __LINE__, count);
+			rc = -EFAULT;
+			break;
+		}
+		if (copy_from_user(&cmd_next, (void __user *)cmd.next,
+			sizeof(struct msm_vfe_cfg_cmd_list))) {
+			rc = -EFAULT;
+			continue;
+		}
+
+		cfg_cmd = cmd_next.cfg_cmd;
+
+		rc = msm_isp_proc_cmd(vfe_dev, &cfg_cmd);
+		if (rc < 0)
+			pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+		cmd = cmd_next;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_vfe_cfg_cmd2_32 {
+	uint16_t num_cfg;
+	uint16_t cmd_len;
+	compat_caddr_t cfg_data;
+	compat_caddr_t cfg_cmd;
+};
+
+struct msm_vfe_cfg_cmd_list_32 {
+	struct msm_vfe_cfg_cmd2_32   cfg_cmd;
+	compat_caddr_t               next;
+	uint32_t                     next_size;
+};
+
+#define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
+#define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
+
+static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
+	struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
+{
+	proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
+	proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
+	proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
+	proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
+}
+
+static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0;
+	uint32_t count = 0;
+	struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
+		(struct msm_vfe_cfg_cmd_list_32 *)arg;
+	struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
+	struct msm_vfe_cfg_cmd2 current_cmd;
+
+	if (!vfe_dev || !arg) {
+		pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
+			vfe_dev, arg);
+		return -EINVAL;
+	}
+	msm_isp_compat_to_proc_cmd(&current_cmd, &proc_cmd->cfg_cmd);
+	rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
+	if (rc < 0)
+		pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+	cmd = *proc_cmd;
+
+	while (compat_ptr(cmd.next) != NULL) {
+		if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
+			pr_err("%s:%d failed: next size %u != expected %zu\n",
+				__func__, __LINE__, cmd.next_size,
+				sizeof(struct msm_vfe_cfg_cmd_list));
+			break;
+		}
+		if (++count >= MAX_ISP_REG_LIST) {
+			pr_err("%s:%d Error exceeding the max register count:%u\n",
+				__func__, __LINE__, count);
+			rc = -EFAULT;
+			break;
+		}
+		if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
+			sizeof(struct msm_vfe_cfg_cmd_list_32))) {
+			rc = -EFAULT;
+			continue;
+		}
+
+		msm_isp_compat_to_proc_cmd(&current_cmd, &cmd_next.cfg_cmd);
+		rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
+		if (rc < 0)
+			pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+		cmd = cmd_next;
+	}
+	return rc;
+}
+
+static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
+{
+	if (is_compat_task())
+		return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
+	else
+		return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
+}
+#else /* CONFIG_COMPAT */
+static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
+{
+	return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+
+static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	long rc = 0;
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+
+	if (!vfe_dev || !vfe_dev->vfe_base) {
+		pr_err("%s:%d failed: invalid params %pK\n",
+			__func__, __LINE__, vfe_dev);
+		if (vfe_dev)
+			pr_err("%s:%d failed %pK\n", __func__,
+				__LINE__, vfe_dev->vfe_base);
+		return -EINVAL;
+	}
+
+	/* use real time mutex for hard real-time ioctls such as
+	 * buffer operations and register updates.
+	 * Use core mutex for other ioctls that could take
+	 * longer time to complete such as start/stop ISP streams
+	 * which blocks until the hardware start/stop streaming
+	 */
+	ISP_DBG("%s cmd: %d\n", __func__, _IOC_TYPE(cmd));
+	switch (cmd) {
+	case VIDIOC_MSM_VFE_REG_CFG: {
+		mutex_lock(&vfe_dev->realtime_mutex);
+		rc = msm_isp_proc_cmd(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		break;
+	}
+	case VIDIOC_MSM_VFE_REG_LIST_CFG: {
+		mutex_lock(&vfe_dev->realtime_mutex);
+		rc = msm_isp_proc_cmd_list(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		break;
+	}
+	case VIDIOC_MSM_ISP_REQUEST_BUF:
+	case VIDIOC_MSM_ISP_ENQUEUE_BUF:
+	case VIDIOC_MSM_ISP_RELEASE_BUF: {
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	}
+	case VIDIOC_MSM_ISP32_REQUEST_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_request_axi_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_RELEASE_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_release_axi_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_CFG_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_AXI_HALT:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_axi_halt(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_AXI_RESET:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_axi_reset(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_AXI_RESTART:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_axi_restart(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_INPUT_CFG:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_cfg_input(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_FETCH_ENG_START:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = vfe_dev->hw_info->vfe_ops.core_ops.
+			start_fetch_eng(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
+		if (arg) {
+			enum msm_vfe_input_src frame_src =
+				*((enum msm_vfe_input_src *)arg);
+			vfe_dev->hw_info->vfe_ops.core_ops.
+				reg_update(vfe_dev, (1 << frame_src));
+			vfe_dev->axi_data.src_info[frame_src].last_updt_frm_id =
+			  vfe_dev->axi_data.src_info[frame_src].frame_id;
+		}
+		break;
+	case VIDIOC_MSM_ISP_SET_SRC_STATE:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_set_src_state(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_request_stats_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_release_stats_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_update_stats_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_UPDATE_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_update_axi_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_SMMU_ATTACH:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case MSM_SD_NOTIFY_FREEZE:
+		vfe_dev->isp_sof_debug = 0;
+		break;
+	case VIDIOC_MSM_ISP_BUF_DONE:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_user_buf_done(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case MSM_SD_SHUTDOWN:
+		while (vfe_dev->vfe_open_cnt != 0)
+			msm_isp_close_node(sd, NULL);
+		break;
+
+	default:
+		pr_err_ratelimited("%s: Invalid ISP command\n", __func__);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+
+#ifdef CONFIG_COMPAT
+static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+	long rc = 0;
+
+	if (!vfe_dev || !vfe_dev->vfe_base) {
+		pr_err("%s:%d failed: invalid params %pK\n",
+			__func__, __LINE__, vfe_dev);
+		if (vfe_dev)
+			pr_err("%s:%d failed %pK\n", __func__,
+				__LINE__, vfe_dev->vfe_base);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
+		struct msm_vfe_cfg_cmd2 proc_cmd;
+
+		mutex_lock(&vfe_dev->realtime_mutex);
+		msm_isp_compat_to_proc_cmd(&proc_cmd,
+			(struct msm_vfe_cfg_cmd2_32 *) arg);
+		rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		break;
+	}
+	case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
+		mutex_lock(&vfe_dev->realtime_mutex);
+		rc = msm_isp_proc_cmd_list(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		break;
+	}
+	default:
+		return msm_isp_ioctl_unlocked(sd, cmd, arg);
+	}
+
+	return rc;
+}
+
+long msm_isp_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	return msm_isp_ioctl_compat(sd, cmd, arg);
+}
+#else /* CONFIG_COMPAT */
+long msm_isp_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	return msm_isp_ioctl_unlocked(sd, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
+	struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
+	uint32_t *cfg_data, uint32_t cmd_len)
+{
+	if (!vfe_dev || !reg_cfg_cmd) {
+		pr_err("%s:%d failed: vfe_dev %pK reg_cfg_cmd %pK\n", __func__,
+			__LINE__, vfe_dev, reg_cfg_cmd);
+		return -EINVAL;
+	}
+	if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
+		(!cfg_data || !cmd_len)) {
+		pr_err("%s:%d failed: cmd type %d cfg_data %pK cmd_len %d\n",
+			__func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
+			cmd_len);
+		return -EINVAL;
+	}
+
+	/* Validate input parameters */
+	switch (reg_cfg_cmd->cmd_type) {
+	case VFE_WRITE:
+	case VFE_READ:
+	case VFE_WRITE_MB: {
+		if ((reg_cfg_cmd->u.rw_info.reg_offset >
+			(UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
+			((reg_cfg_cmd->u.rw_info.reg_offset +
+			reg_cfg_cmd->u.rw_info.len) >
+			resource_size(vfe_dev->vfe_mem)) ||
+			(reg_cfg_cmd->u.rw_info.reg_offset & 0x3)) {
+			pr_err("%s:%d reg_offset %d len %d res %d\n",
+				__func__, __LINE__,
+				reg_cfg_cmd->u.rw_info.reg_offset,
+				reg_cfg_cmd->u.rw_info.len,
+				(uint32_t)resource_size(vfe_dev->vfe_mem));
+			return -EINVAL;
+		}
+
+		if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
+			(UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
+			((reg_cfg_cmd->u.rw_info.cmd_data_offset +
+			reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
+			pr_err("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
+				__func__, __LINE__,
+				reg_cfg_cmd->u.rw_info.cmd_data_offset,
+				reg_cfg_cmd->u.rw_info.len, cmd_len);
+			return -EINVAL;
+		}
+		break;
+	}
+
+	case VFE_WRITE_DMI_16BIT:
+	case VFE_WRITE_DMI_32BIT:
+	case VFE_WRITE_DMI_64BIT:
+	case VFE_READ_DMI_16BIT:
+	case VFE_READ_DMI_32BIT:
+	case VFE_READ_DMI_64BIT: {
+		if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT ||
+			reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+			if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
+				reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
+				(reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
+				reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
+				(sizeof(uint32_t)))) {
+				pr_err("%s:%d hi %d lo %d\n",
+					__func__, __LINE__,
+					reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
+					reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
+				return -EINVAL;
+			}
+			if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
+				pr_err("%s:%d len %d\n",
+					__func__, __LINE__,
+					reg_cfg_cmd->u.dmi_info.len);
+				return -EINVAL;
+			}
+			if (((UINT_MAX -
+				reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
+				(reg_cfg_cmd->u.dmi_info.len -
+				sizeof(uint32_t))) ||
+				((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
+				reg_cfg_cmd->u.dmi_info.len -
+				sizeof(uint32_t)) > cmd_len)) {
+				pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
+					__func__, __LINE__,
+					reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
+					reg_cfg_cmd->u.dmi_info.len, cmd_len);
+				return -EINVAL;
+			}
+		}
+		if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
+			(UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
+			((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
+			reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
+			pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
+				__func__, __LINE__,
+				reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
+				reg_cfg_cmd->u.dmi_info.len, cmd_len);
+			return -EINVAL;
+		}
+		break;
+	}
+
+	default:
+		break;
+	}
+
+	switch (reg_cfg_cmd->cmd_type) {
+	case VFE_WRITE: {
+		msm_camera_io_memcpy(vfe_dev->vfe_base +
+			reg_cfg_cmd->u.rw_info.reg_offset,
+			cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+			reg_cfg_cmd->u.rw_info.len);
+		break;
+	}
+	case VFE_WRITE_MB: {
+		msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
+			reg_cfg_cmd->u.rw_info.reg_offset,
+			cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+			reg_cfg_cmd->u.rw_info.len);
+		break;
+	}
+	case VFE_CFG_MASK: {
+		uint32_t temp;
+
+		if ((UINT_MAX - sizeof(temp) <
+			reg_cfg_cmd->u.mask_info.reg_offset) ||
+			(resource_size(vfe_dev->vfe_mem) <
+			reg_cfg_cmd->u.mask_info.reg_offset +
+			sizeof(temp)) ||
+			(reg_cfg_cmd->u.mask_info.reg_offset & 0x3)) {
+			pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
+			return -EINVAL;
+		}
+		temp = msm_camera_io_r(vfe_dev->vfe_base +
+			reg_cfg_cmd->u.mask_info.reg_offset);
+
+		temp &= ~reg_cfg_cmd->u.mask_info.mask;
+		temp |= reg_cfg_cmd->u.mask_info.val;
+		msm_camera_io_w(temp, vfe_dev->vfe_base +
+			reg_cfg_cmd->u.mask_info.reg_offset);
+		break;
+	}
+	case VFE_WRITE_DMI_16BIT:
+	case VFE_WRITE_DMI_32BIT:
+	case VFE_WRITE_DMI_64BIT: {
+		int i;
+		uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
+		uint32_t hi_val, lo_val, lo_val1;
+
+		if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
+			hi_tbl_ptr = cfg_data +
+				reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
+		}
+		lo_tbl_ptr = cfg_data +
+			reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
+		if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
+			reg_cfg_cmd->u.dmi_info.len =
+				reg_cfg_cmd->u.dmi_info.len / 2;
+		for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
+			lo_val = *lo_tbl_ptr++;
+			if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
+				lo_val1 = lo_val & 0x0000FFFF;
+				lo_val = (lo_val & 0xFFFF0000)>>16;
+				msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
+					vfe_dev->hw_info->dmi_reg_offset + 0x4);
+			} else if (reg_cfg_cmd->cmd_type ==
+					   VFE_WRITE_DMI_64BIT) {
+				lo_tbl_ptr++;
+				hi_val = *hi_tbl_ptr;
+				hi_tbl_ptr = hi_tbl_ptr + 2;
+				msm_camera_io_w(hi_val, vfe_dev->vfe_base +
+					vfe_dev->hw_info->dmi_reg_offset);
+			}
+			msm_camera_io_w(lo_val, vfe_dev->vfe_base +
+				vfe_dev->hw_info->dmi_reg_offset + 0x4);
+		}
+		break;
+	}
+	case VFE_READ_DMI_16BIT:
+	case VFE_READ_DMI_32BIT:
+	case VFE_READ_DMI_64BIT: {
+		int i;
+		uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
+		uint32_t hi_val, lo_val, lo_val1;
+
+		if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+			hi_tbl_ptr = cfg_data +
+				reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
+		}
+
+		lo_tbl_ptr = cfg_data +
+			reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
+
+		if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
+			reg_cfg_cmd->u.dmi_info.len =
+				reg_cfg_cmd->u.dmi_info.len / 2;
+
+		for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
+			lo_val = msm_camera_io_r(vfe_dev->vfe_base +
+					vfe_dev->hw_info->dmi_reg_offset + 0x4);
+
+			if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
+				lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
+					vfe_dev->hw_info->dmi_reg_offset + 0x4);
+				lo_val |= lo_val1 << 16;
+			}
+			*lo_tbl_ptr++ = lo_val;
+			if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+				hi_val = msm_camera_io_r(vfe_dev->vfe_base +
+					vfe_dev->hw_info->dmi_reg_offset);
+				*hi_tbl_ptr = hi_val;
+				hi_tbl_ptr += 2;
+				lo_tbl_ptr++;
+			}
+		}
+		break;
+	}
+	case VFE_HW_UPDATE_LOCK: {
+		uint32_t update_id =
+			vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
+		if (update_id) {
+			ISP_DBG("%s hw_update_lock fail cur_id %u,last_id %u\n",
+				__func__,
+				vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+				update_id);
+			return -EINVAL;
+		}
+		break;
+	}
+	case VFE_HW_UPDATE_UNLOCK: {
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
+			!= *cfg_data) {
+			ISP_DBG("hw_updt over frm bound,strt_id %u end_id %d\n",
+				*cfg_data,
+				vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+		}
+		vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+		break;
+	}
+	case VFE_READ: {
+		int i;
+		uint32_t *data_ptr = cfg_data +
+			reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
+		for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
+			if ((data_ptr < cfg_data) ||
+				(UINT_MAX / sizeof(*data_ptr) <
+				 (data_ptr - cfg_data)) ||
+				(sizeof(*data_ptr) * (data_ptr - cfg_data) >=
+				 cmd_len))
+				return -EINVAL;
+			*data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
+				reg_cfg_cmd->u.rw_info.reg_offset);
+			reg_cfg_cmd->u.rw_info.reg_offset += 4;
+		}
+		break;
+	}
+	case GET_MAX_CLK_RATE: {
+		int rc = 0;
+		unsigned long rate;
+
+		if (cmd_len != sizeof(__u32)) {
+			pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+				__func__, __LINE__, cmd_len,
+				sizeof(__u32));
+			return -EINVAL;
+		}
+		rc = msm_isp_get_max_clk_rate(vfe_dev, &rate);
+		if (rc < 0) {
+			pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
+			return -EINVAL;
+		}
+
+		*(__u32 *)cfg_data = (__u32)rate;
+
+		break;
+	}
+	case GET_ISP_ID: {
+		uint32_t *isp_id = NULL;
+
+		if (cmd_len < sizeof(uint32_t)) {
+			pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+				__func__, __LINE__, cmd_len,
+				sizeof(uint32_t));
+			return -EINVAL;
+		}
+
+		isp_id = (uint32_t *)cfg_data;
+		*isp_id = vfe_dev->pdev->id;
+		break;
+	}
+	case SET_WM_UB_SIZE:
+		break;
+	case SET_UB_POLICY: {
+
+		if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
+			pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+				__func__, __LINE__, cmd_len,
+				sizeof(vfe_dev->vfe_ub_policy));
+			return -EINVAL;
+		}
+		vfe_dev->vfe_ub_policy = *cfg_data;
+		break;
+	}
+	default:
+		break;
+	}
+	return 0;
+}
+
+int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0, i;
+	struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
+	struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
+	uint32_t *cfg_data = NULL;
+
+	if (!proc_cmd->num_cfg) {
+		pr_err("%s: Passed num_cfg as 0\n", __func__);
+		return -EINVAL;
+	}
+
+	reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
+		proc_cmd->num_cfg, GFP_KERNEL);
+	if (!reg_cfg_cmd) {
+		rc = -ENOMEM;
+		goto reg_cfg_failed;
+	}
+
+	if (copy_from_user(reg_cfg_cmd,
+		(void __user *)(proc_cmd->cfg_cmd),
+		sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
+		rc = -EFAULT;
+		goto copy_cmd_failed;
+	}
+
+	if (proc_cmd->cmd_len > 0 &&
+		proc_cmd->cmd_len < UINT16_MAX) {
+		cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
+		if (!cfg_data) {
+			pr_err("%s: cfg_data alloc failed\n", __func__);
+			rc = -ENOMEM;
+			goto cfg_data_failed;
+		}
+
+		if (copy_from_user(cfg_data,
+			(void __user *)(proc_cmd->cfg_data),
+			proc_cmd->cmd_len)) {
+			rc = -EFAULT;
+			goto copy_cmd_failed;
+		}
+	}
+
+	for (i = 0; i < proc_cmd->num_cfg; i++)
+		rc = msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
+			cfg_data, proc_cmd->cmd_len);
+
+	if (copy_to_user(proc_cmd->cfg_data,
+			cfg_data, proc_cmd->cmd_len)) {
+		rc = -EFAULT;
+		goto copy_cmd_failed;
+	}
+
+copy_cmd_failed:
+	kfree(cfg_data);
+cfg_data_failed:
+	kfree(reg_cfg_cmd);
+reg_cfg_failed:
+	return rc;
+}
+
+int msm_isp_send_event(struct vfe_device *vfe_dev,
+	uint32_t event_type,
+	struct msm_isp32_event_data *event_data)
+{
+	struct v4l2_event isp_event;
+
+	memset(&isp_event, 0, sizeof(struct v4l2_event));
+	isp_event.id = 0;
+	isp_event.type = event_type;
+
+	memcpy(&isp_event.u.data[0], event_data,
+		sizeof(struct msm_isp32_event_data));
+	v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
+	return 0;
+}
+
+#define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
+
+int msm_isp_cal_word_per_line(uint32_t output_format,
+	uint32_t pixel_per_line)
+{
+	int val = -1;
+
+	switch (output_format) {
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_QBGGR8:
+	case V4L2_PIX_FMT_QGBRG8:
+	case V4L2_PIX_FMT_QGRBG8:
+	case V4L2_PIX_FMT_QRGGB8:
+	case V4L2_PIX_FMT_JPEG:
+	case V4L2_PIX_FMT_META:
+		val = CAL_WORD(pixel_per_line, 1, 8);
+		break;
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+		val = CAL_WORD(pixel_per_line, 5, 32);
+		break;
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+		val = CAL_WORD(pixel_per_line, 3, 16);
+		break;
+	case V4L2_PIX_FMT_SBGGR14:
+	case V4L2_PIX_FMT_SGBRG14:
+	case V4L2_PIX_FMT_SGRBG14:
+	case V4L2_PIX_FMT_SRGGB14:
+		val = CAL_WORD(pixel_per_line, 7, 32);
+		break;
+	case V4L2_PIX_FMT_QBGGR10:
+	case V4L2_PIX_FMT_QGBRG10:
+	case V4L2_PIX_FMT_QGRBG10:
+	case V4L2_PIX_FMT_QRGGB10:
+		val = CAL_WORD(pixel_per_line, 1, 6);
+		break;
+	case V4L2_PIX_FMT_QBGGR12:
+	case V4L2_PIX_FMT_QGBRG12:
+	case V4L2_PIX_FMT_QGRBG12:
+	case V4L2_PIX_FMT_QRGGB12:
+		val = CAL_WORD(pixel_per_line, 1, 5);
+		break;
+	case V4L2_PIX_FMT_QBGGR14:
+	case V4L2_PIX_FMT_QGBRG14:
+	case V4L2_PIX_FMT_QGRBG14:
+	case V4L2_PIX_FMT_QRGGB14:
+		val = CAL_WORD(pixel_per_line, 1, 4);
+		break;
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV14:
+	case V4L2_PIX_FMT_NV41:
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		val = CAL_WORD(pixel_per_line, 1, 8);
+		break;
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+		val = CAL_WORD(pixel_per_line, 2, 8);
+	break;
+	case V4L2_PIX_FMT_P16BGGR10:
+	case V4L2_PIX_FMT_P16GBRG10:
+	case V4L2_PIX_FMT_P16GRBG10:
+	case V4L2_PIX_FMT_P16RGGB10:
+		val = CAL_WORD(pixel_per_line, 1, 4);
+	break;
+		/*TD: Add more image format*/
+	default:
+		msm_isp_print_fourcc_error(__func__, output_format);
+		break;
+	}
+	return val;
+}
+
+enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
+{
+	switch (output_format) {
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+	case V4L2_PIX_FMT_SBGGR14:
+	case V4L2_PIX_FMT_SGBRG14:
+	case V4L2_PIX_FMT_SGRBG14:
+	case V4L2_PIX_FMT_SRGGB14:
+		return MIPI;
+	case V4L2_PIX_FMT_QBGGR8:
+	case V4L2_PIX_FMT_QGBRG8:
+	case V4L2_PIX_FMT_QGRBG8:
+	case V4L2_PIX_FMT_QRGGB8:
+	case V4L2_PIX_FMT_QBGGR10:
+	case V4L2_PIX_FMT_QGBRG10:
+	case V4L2_PIX_FMT_QGRBG10:
+	case V4L2_PIX_FMT_QRGGB10:
+	case V4L2_PIX_FMT_QBGGR12:
+	case V4L2_PIX_FMT_QGBRG12:
+	case V4L2_PIX_FMT_QGRBG12:
+	case V4L2_PIX_FMT_QRGGB12:
+	case V4L2_PIX_FMT_QBGGR14:
+	case V4L2_PIX_FMT_QGBRG14:
+	case V4L2_PIX_FMT_QGRBG14:
+	case V4L2_PIX_FMT_QRGGB14:
+		return QCOM;
+	case V4L2_PIX_FMT_P16BGGR10:
+	case V4L2_PIX_FMT_P16GBRG10:
+	case V4L2_PIX_FMT_P16GRBG10:
+	case V4L2_PIX_FMT_P16RGGB10:
+		return PLAIN16;
+	default:
+		msm_isp_print_fourcc_error(__func__, output_format);
+		break;
+	}
+	return -EINVAL;
+}
+
+int msm_isp_get_bit_per_pixel(uint32_t output_format)
+{
+	switch (output_format) {
+	case V4L2_PIX_FMT_Y4:
+		return 4;
+	case V4L2_PIX_FMT_Y6:
+		return 6;
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_QBGGR8:
+	case V4L2_PIX_FMT_QGBRG8:
+	case V4L2_PIX_FMT_QGRBG8:
+	case V4L2_PIX_FMT_QRGGB8:
+	case V4L2_PIX_FMT_JPEG:
+	case V4L2_PIX_FMT_META:
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV14:
+	case V4L2_PIX_FMT_NV41:
+	case V4L2_PIX_FMT_YVU410:
+	case V4L2_PIX_FMT_YVU420:
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YYUV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+	case V4L2_PIX_FMT_YUV422P:
+	case V4L2_PIX_FMT_YUV411P:
+	case V4L2_PIX_FMT_Y41P:
+	case V4L2_PIX_FMT_YUV444:
+	case V4L2_PIX_FMT_YUV555:
+	case V4L2_PIX_FMT_YUV565:
+	case V4L2_PIX_FMT_YUV32:
+	case V4L2_PIX_FMT_YUV410:
+	case V4L2_PIX_FMT_YUV420:
+	case V4L2_PIX_FMT_GREY:
+	case V4L2_PIX_FMT_PAL8:
+	case V4L2_PIX_FMT_UV8:
+	case MSM_V4L2_PIX_FMT_META:
+		return 8;
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_QBGGR10:
+	case V4L2_PIX_FMT_QGBRG10:
+	case V4L2_PIX_FMT_QGRBG10:
+	case V4L2_PIX_FMT_QRGGB10:
+	case V4L2_PIX_FMT_Y10:
+	case V4L2_PIX_FMT_Y10BPACK:
+	case V4L2_PIX_FMT_P16BGGR10:
+	case V4L2_PIX_FMT_P16GBRG10:
+	case V4L2_PIX_FMT_P16GRBG10:
+	case V4L2_PIX_FMT_P16RGGB10:
+		return 10;
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+	case V4L2_PIX_FMT_QBGGR12:
+	case V4L2_PIX_FMT_QGBRG12:
+	case V4L2_PIX_FMT_QGRBG12:
+	case V4L2_PIX_FMT_QRGGB12:
+	case V4L2_PIX_FMT_Y12:
+		return 12;
+	case V4L2_PIX_FMT_SBGGR14:
+	case V4L2_PIX_FMT_SGBRG14:
+	case V4L2_PIX_FMT_SGRBG14:
+	case V4L2_PIX_FMT_SRGGB14:
+	case V4L2_PIX_FMT_QBGGR14:
+	case V4L2_PIX_FMT_QGBRG14:
+	case V4L2_PIX_FMT_QGRBG14:
+	case V4L2_PIX_FMT_QRGGB14:
+		return 14;
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+	case V4L2_PIX_FMT_Y16:
+		return 16;
+		/*TD: Add more image format*/
+	default:
+		msm_isp_print_fourcc_error(__func__, output_format);
+		pr_err("%s: Invalid output format %x\n",
+			__func__, output_format);
+		return -EINVAL;
+	}
+}
+
+void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
+{
+	struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+
+	error_info->info_dump_frame_count++;
+}
+
+void msm_isp_process_error_info(struct vfe_device *vfe_dev)
+{
+	int i;
+	uint8_t num_stats_type =
+		vfe_dev->hw_info->stats_hw_info->num_stats_type;
+	struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+	static DEFINE_RATELIMIT_STATE(rs,
+		DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+	static DEFINE_RATELIMIT_STATE(rs_stats,
+		DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+	if (error_info->error_count == 1 ||
+		!(error_info->info_dump_frame_count % 100)) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			process_error_status(vfe_dev);
+		error_info->error_mask0 = 0;
+		error_info->error_mask1 = 0;
+		error_info->camif_status = 0;
+		error_info->violation_status = 0;
+		for (i = 0; i < MAX_NUM_STREAM; i++) {
+			if (error_info->stream_framedrop_count[i] != 0 &&
+				__ratelimit(&rs)) {
+				pr_err("%s: Stream[%d]: dropped %d frames\n",
+					__func__, i,
+					error_info->stream_framedrop_count[i]);
+				error_info->stream_framedrop_count[i] = 0;
+			}
+		}
+		for (i = 0; i < num_stats_type; i++) {
+			if (error_info->stats_framedrop_count[i] != 0 &&
+				__ratelimit(&rs_stats)) {
+				pr_err("%s: Stats stream[%d]: dropped %d frames\n",
+					__func__, i,
+					error_info->stats_framedrop_count[i]);
+				error_info->stats_framedrop_count[i] = 0;
+			}
+		}
+	}
+}
+
+static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
+	uint32_t error_mask0, uint32_t error_mask1)
+{
+	vfe_dev->error_info.error_mask0 |= error_mask0;
+	vfe_dev->error_info.error_mask1 |= error_mask1;
+	vfe_dev->error_info.error_count++;
+}
+
+static void msm_isp_process_overflow_irq(
+	struct vfe_device *vfe_dev,
+	uint32_t *irq_status0, uint32_t *irq_status1)
+{
+	uint32_t overflow_mask;
+
+	/* if there are no active streams - do not start recovery */
+	if (!vfe_dev->axi_data.num_active_stream)
+		return;
+
+	/*Mask out all other irqs if recovery is started*/
+	if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
+		uint32_t halt_restart_mask0, halt_restart_mask1;
+
+		vfe_dev->hw_info->vfe_ops.core_ops.
+		get_halt_restart_mask(&halt_restart_mask0,
+			&halt_restart_mask1);
+		*irq_status0 &= halt_restart_mask0;
+		*irq_status1 &= halt_restart_mask1;
+
+		return;
+	}
+
+	/*Check if any overflow bit is set*/
+	vfe_dev->hw_info->vfe_ops.core_ops.
+		get_overflow_mask(&overflow_mask);
+	overflow_mask &= *irq_status1;
+
+	if (overflow_mask) {
+		struct msm_isp32_event_data error_event;
+
+		if (vfe_dev->reset_pending == 1) {
+			pr_err("%s:%d failed: overflow %x during reset\n",
+				__func__, __LINE__, overflow_mask);
+			/* Clear overflow bits since reset is pending */
+			*irq_status1 &= ~overflow_mask;
+			return;
+		}
+
+		ISP_DBG("%s: Bus overflow detected: 0x%x, start recovery!\n",
+				__func__, overflow_mask);
+		atomic_set(&vfe_dev->error_info.overflow_state,
+				OVERFLOW_DETECTED);
+		/*Store current IRQ mask*/
+		vfe_dev->hw_info->vfe_ops.core_ops.get_irq_mask(vfe_dev,
+			&vfe_dev->error_info.overflow_recover_irq_mask0,
+			&vfe_dev->error_info.overflow_recover_irq_mask1);
+
+		/*Halt the hardware & Clear all other IRQ mask*/
+		vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 0);
+
+		/*Stop CAMIF Immediately*/
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+
+		/*Update overflow state*/
+		*irq_status0 = 0;
+		*irq_status1 = 0;
+
+		memset(&error_event, 0, sizeof(error_event));
+		error_event.frame_id =
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+		error_event.u.error_info.error_mask = 1 << ISP_WM_BUS_OVERFLOW;
+		msm_isp_send_event(vfe_dev,
+			ISP_EVENT_WM_BUS_OVERFLOW, &error_event);
+	}
+}
+
+void msm_isp_reset_burst_count_and_frame_drop(
+	struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
+{
+	uint32_t framedrop_period = 0;
+
+	if (stream_info->state != ACTIVE ||
+		stream_info->stream_type != BURST_STREAM) {
+		return;
+	}
+	if (stream_info->num_burst_capture != 0) {
+		framedrop_period = msm_isp_get_framedrop_period(
+		   stream_info->frame_skip_pattern);
+		stream_info->burst_frame_count =
+			stream_info->init_frame_drop +
+			(stream_info->num_burst_capture - 1) *
+			framedrop_period + 1;
+		msm_isp_reset_framedrop(vfe_dev, stream_info);
+	}
+}
+
+irqreturn_t msm_isp_process_irq(int irq_num, void *data)
+{
+	unsigned long flags;
+	struct msm_vfe_tasklet_queue_cmd *queue_cmd;
+	struct vfe_device *vfe_dev = (struct vfe_device *) data;
+	uint32_t irq_status0, irq_status1;
+	uint32_t error_mask0, error_mask1;
+
+	vfe_dev->hw_info->vfe_ops.irq_ops.
+		read_irq_status(vfe_dev, &irq_status0, &irq_status1);
+
+	if ((irq_status0 == 0) && (irq_status1 == 0)) {
+		pr_err_ratelimited("%s:VFE%d irq_status0 & 1 are both 0\n",
+			__func__, vfe_dev->pdev->id);
+		return IRQ_HANDLED;
+	}
+
+	msm_isp_process_overflow_irq(vfe_dev,
+		&irq_status0, &irq_status1);
+
+	vfe_dev->hw_info->vfe_ops.core_ops.
+		get_error_mask(&error_mask0, &error_mask1);
+	error_mask0 &= irq_status0;
+	error_mask1 &= irq_status1;
+	irq_status0 &= ~error_mask0;
+	irq_status1 &= ~error_mask1;
+	if (!vfe_dev->ignore_error &&
+		((error_mask0 != 0) || (error_mask1 != 0)))
+		msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
+
+	if ((irq_status0 == 0) && (irq_status1 == 0) &&
+		(!(((error_mask0 != 0) || (error_mask1 != 0)) &&
+		 vfe_dev->error_info.error_count == 1))) {
+		ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+	queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
+	if (queue_cmd->cmd_used) {
+		pr_err_ratelimited("%s: Tasklet queue overflow: %d\n",
+			__func__, vfe_dev->pdev->id);
+		list_del(&queue_cmd->list);
+	} else {
+		atomic_add(1, &vfe_dev->irq_cnt);
+	}
+	queue_cmd->vfeInterruptStatus0 = irq_status0;
+	queue_cmd->vfeInterruptStatus1 = irq_status1;
+	msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
+	queue_cmd->cmd_used = 1;
+	vfe_dev->taskletq_idx =
+		(vfe_dev->taskletq_idx + 1) % MSM_VFE_TASKLETQ_SIZE;
+	list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q);
+	spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+	tasklet_schedule(&vfe_dev->vfe_tasklet);
+	return IRQ_HANDLED;
+}
+
+void msm_isp_do_tasklet(unsigned long data)
+{
+	unsigned long flags;
+	struct vfe_device *vfe_dev = (struct vfe_device *) data;
+	struct msm_vfe_irq_ops *irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
+	struct msm_vfe_tasklet_queue_cmd *queue_cmd;
+	struct msm_isp_timestamp ts;
+	uint32_t irq_status0, irq_status1;
+
+	while (atomic_read(&vfe_dev->irq_cnt)) {
+		spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+		queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
+		struct msm_vfe_tasklet_queue_cmd, list);
+
+		if (!queue_cmd) {
+			atomic_set(&vfe_dev->irq_cnt, 0);
+			spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+			return;
+		}
+		atomic_sub(1, &vfe_dev->irq_cnt);
+		list_del(&queue_cmd->list);
+		queue_cmd->cmd_used = 0;
+		irq_status0 = queue_cmd->vfeInterruptStatus0;
+		irq_status1 = queue_cmd->vfeInterruptStatus1;
+		ts = queue_cmd->ts;
+		spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+		ISP_DBG("%s: status0: 0x%x status1: 0x%x\n",
+			__func__, irq_status0, irq_status1);
+		irq_ops->process_reset_irq(vfe_dev,
+			irq_status0, irq_status1);
+		irq_ops->process_halt_irq(vfe_dev,
+			irq_status0, irq_status1);
+		if (atomic_read(&vfe_dev->error_info.overflow_state)
+			!= NO_OVERFLOW) {
+			pr_err("%s: Recovery in processing, Ignore IRQs!!!\n",
+				__func__);
+			continue;
+		}
+		msm_isp_process_error_info(vfe_dev);
+		irq_ops->process_camif_irq(vfe_dev,
+			irq_status0, irq_status1, &ts);
+		irq_ops->process_axi_irq(vfe_dev,
+			irq_status0, irq_status1, &ts);
+		irq_ops->process_stats_irq(vfe_dev,
+			irq_status0, irq_status1, &ts);
+		irq_ops->process_reg_update(vfe_dev,
+			irq_status0, irq_status1, &ts);
+		irq_ops->process_epoch_irq(vfe_dev,
+			irq_status0, irq_status1, &ts);
+	}
+}
+
+int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
+{
+	struct msm_vfe_axi_src_state *src_state = arg;
+
+	if (src_state->input_src >= VFE_SRC_MAX)
+		return -EINVAL;
+	vfe_dev->axi_data.src_info[src_state->input_src].active =
+	src_state->src_active;
+	vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
+	src_state->src_frame_id;
+	return 0;
+}
+
+static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags, void *token)
+{
+	struct vfe_device *vfe_dev = NULL;
+
+	if (token) {
+		vfe_dev = (struct vfe_device *)token;
+		if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops) {
+			pr_err("%s:%d] buf_mgr %pK\n", __func__,
+				__LINE__, vfe_dev->buf_mgr);
+			goto end;
+		}
+		if (!vfe_dev->buf_mgr->pagefault_debug_disable) {
+			pr_err("%s:%d] vfe_dev %pK id %d\n", __func__,
+				__LINE__, vfe_dev, vfe_dev->pdev->id);
+			vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
+								iova);
+		}
+	} else {
+		ISP_DBG("%s:%d] no token received: %pK\n",
+			__func__, __LINE__, token);
+		goto end;
+	}
+end:
+	return;
+}
+
+int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+	long rc = 0;
+
+	ISP_DBG("%s\n", __func__);
+
+	mutex_lock(&vfe_dev->realtime_mutex);
+	mutex_lock(&vfe_dev->core_mutex);
+
+	if (vfe_dev->vfe_open_cnt++ && vfe_dev->vfe_base) {
+		mutex_unlock(&vfe_dev->core_mutex);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		return 0;
+	}
+
+	if (vfe_dev->vfe_base) {
+		pr_err("%s:%d invalid params cnt %d base %pK\n", __func__,
+			__LINE__, vfe_dev->vfe_open_cnt, vfe_dev->vfe_base);
+		vfe_dev->vfe_base = NULL;
+	}
+
+	vfe_dev->reset_pending = 0;
+	vfe_dev->isp_sof_debug = 0;
+
+	if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
+		pr_err("%s: init hardware failed\n", __func__);
+		vfe_dev->vfe_open_cnt--;
+		mutex_unlock(&vfe_dev->core_mutex);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		return -EBUSY;
+	}
+
+	memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+	atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+
+	vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
+
+	rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
+	if (rc <= 0) {
+		pr_err("%s: reset timeout\n", __func__);
+		vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
+		vfe_dev->vfe_open_cnt--;
+		mutex_unlock(&vfe_dev->core_mutex);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		return -EINVAL;
+	}
+	vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
+	ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
+
+	vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+
+	vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr, "msm_isp");
+
+	memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
+	memset(&vfe_dev->stats_data, 0,
+		sizeof(struct msm_vfe_stats_shared_data));
+	memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+	memset(&vfe_dev->fetch_engine_info, 0,
+		sizeof(vfe_dev->fetch_engine_info));
+	vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
+	vfe_dev->taskletq_idx = 0;
+	vfe_dev->vt_enable = 0;
+	vfe_dev->bus_util_factor = 0;
+	rc = of_property_read_u32(vfe_dev->pdev->dev.of_node,
+			"bus-util-factor", &vfe_dev->bus_util_factor);
+	if (rc < 0)
+		ISP_DBG("%s: Use default bus utilization factor\n", __func__);
+
+	cam_smmu_reg_client_page_fault_handler(
+			vfe_dev->buf_mgr->iommu_hdl,
+			msm_vfe_iommu_fault_handler,
+			NULL,
+			vfe_dev);
+
+	mutex_unlock(&vfe_dev->core_mutex);
+	mutex_unlock(&vfe_dev->realtime_mutex);
+	return 0;
+}
+
+#ifdef CONFIG_MSM_AVTIMER
+static void msm_isp_end_avtimer(void)
+{
+	avcs_core_disable_power_collapse(0);
+}
+#else
+static void msm_isp_end_avtimer(void)
+{
+	pr_err("AV Timer is not supported\n");
+}
+#endif
+
+int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	long rc = 0;
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+
+	ISP_DBG("%s E\n", __func__);
+	mutex_lock(&vfe_dev->realtime_mutex);
+	mutex_lock(&vfe_dev->core_mutex);
+
+	if (!vfe_dev->vfe_open_cnt) {
+		pr_err("%s invalid state open cnt %d\n", __func__,
+			vfe_dev->vfe_open_cnt);
+		mutex_unlock(&vfe_dev->core_mutex);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		return -EINVAL;
+	}
+
+	if (--vfe_dev->vfe_open_cnt) {
+		mutex_unlock(&vfe_dev->core_mutex);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		return 0;
+	}
+
+	/* Unregister page fault handler */
+	cam_smmu_reg_client_page_fault_handler(
+		vfe_dev->buf_mgr->iommu_hdl,
+		NULL, NULL, vfe_dev);
+
+	rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+	if (rc < 0)
+		pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
+
+	vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
+	vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
+	if (vfe_dev->vt_enable) {
+		msm_isp_end_avtimer();
+		vfe_dev->vt_enable = 0;
+	}
+	mutex_unlock(&vfe_dev->core_mutex);
+	mutex_unlock(&vfe_dev->realtime_mutex);
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.h
new file mode 100644
index 0000000..f2268c3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_UTIL_H__
+#define __MSM_ISP_UTIL_H__
+
+#include "msm_isp_32.h"
+#include <soc/qcom/camera2.h>
+
+/* #define CONFIG_MSM_ISP_DBG 1 */
+
+#ifdef CONFIG_MSM_ISP_DBG
+#define ISP_DBG(fmt, args...) printk(fmt, ##args)
+#else
+#define ISP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define ALT_VECTOR_IDX(x) {x = 3 - x; }
+
+struct msm_isp_bandwidth_mgr {
+	uint32_t bus_client;
+	uint32_t bus_vector_active_idx;
+	uint32_t use_count;
+	struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+};
+
+uint32_t msm_isp_get_framedrop_period(
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern);
+void msm_isp_reset_burst_count_and_frame_drop(
+	struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client);
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+	uint64_t ab, uint64_t ib);
+void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
+				      struct msm_isp_statistics *stats);
+void msm_isp_util_update_last_overflow_ab_ib(struct vfe_device *vfe_dev);
+void msm_isp_util_update_clk_rate(long clock_rate);
+void msm_isp_update_req_history(uint32_t client, uint64_t ab,
+				uint64_t ib,
+				struct msm_isp_bandwidth_info *client_info,
+				unsigned long long ts);
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client);
+
+int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub);
+
+int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub);
+
+int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_send_event(struct vfe_device *vfe_dev,
+	uint32_t type, struct msm_isp32_event_data *event_data);
+int msm_isp_cal_word_per_line(uint32_t output_format,
+	uint32_t pixel_per_line);
+int msm_isp_get_bit_per_pixel(uint32_t output_format);
+enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format);
+irqreturn_t msm_isp_process_irq(int irq_num, void *data);
+int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_do_tasklet(unsigned long data);
+void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev);
+void msm_isp_process_error_info(struct vfe_device *vfe_dev);
+int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+long msm_isp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
+int msm_isp_get_clk_info(struct vfe_device *vfe_dev,
+	struct platform_device *pdev, struct msm_cam_clk_info *vfe_clk_info);
+void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
+	struct msm_vfe_fetch_engine_info *fetch_engine_info);
+void msm_camera_io_dump_2(void __iomem *addr, int size);
+void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format);
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
+	struct vfe_device *vfe_dev);
+void msm_isp_get_avtimer_ts(struct msm_isp_timestamp *time_stamp);
+int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg);
+#endif /* __MSM_ISP_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/ispif/Makefile b/drivers/media/platform/msm/camera_v2/ispif/Makefile
index 236ec73..d56332d 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/Makefile
+++ b/drivers/media/platform/msm/camera_v2/ispif/Makefile
@@ -1,4 +1,8 @@
 ccflags-y += -Idrivers/media/platform/msm/camera_v2
 ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
 ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ifeq ($(CONFIG_MSM_ISP_V1),y)
+obj-$(CONFIG_MSM_CSID) += msm_ispif_32.o
+else
 obj-$(CONFIG_MSM_CSID) += msm_ispif.o
+endif
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.c
new file mode 100644
index 0000000..e9b2a1d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.c
@@ -0,0 +1,1581 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+#include <linux/videodev2.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/compat.h>
+#include <media/msmb_isp.h>
+
+#include "msm_ispif_32.h"
+#include "msm.h"
+#include "msm_sd.h"
+#include "msm_camera_io_util.h"
+
+#ifdef CONFIG_MSM_ISPIF_V1
+#include "msm_ispif_hwreg_v1.h"
+#else
+#include "msm_ispif_hwreg_v2.h"
+#endif
+
+#define V4L2_IDENT_ISPIF                      50001
+#define MSM_ISPIF_DRV_NAME                    "msm_ispif"
+
+#define ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY 0x00
+#define ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY  0x01
+#define ISPIF_INTF_CMD_DISABLE_IMMEDIATELY    0x02
+
+#define ISPIF_TIMEOUT_SLEEP_US                1000
+#define ISPIF_TIMEOUT_ALL_US               1000000
+
+#undef CDBG
+#ifdef CONFIG_MSMB_CAMERA_DEBUG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define CDBG(fmt, args...) do { } while (0)
+#endif
+
+static void msm_ispif_io_dump_reg(struct ispif_device *ispif)
+{
+	if (!ispif->enb_dump_reg)
+		return;
+
+	if (!ispif->base) {
+		pr_err("%s: null pointer for the ispif base\n", __func__);
+		return;
+	}
+
+	msm_camera_io_dump(ispif->base, 0x250, 1);
+}
+
+
+static inline int msm_ispif_is_intf_valid(uint32_t csid_version,
+	uint8_t intf_type)
+{
+	return (((csid_version <= CSID_VERSION_V22
+					&& intf_type != VFE0) ||
+					(intf_type >= VFE_MAX))
+				? false : true);
+}
+
+static struct msm_cam_clk_info ispif_8626_reset_clk_info[] = {
+	{"ispif_ahb_clk", NO_SET_RATE},
+	{"csi0_src_clk", NO_SET_RATE},
+	{"csi0_clk", NO_SET_RATE},
+	{"csi0_pix_clk", NO_SET_RATE},
+	{"csi0_rdi_clk", NO_SET_RATE},
+	{"csi1_src_clk", NO_SET_RATE},
+	{"csi1_clk", NO_SET_RATE},
+	{"csi1_pix_clk", NO_SET_RATE},
+	{"csi1_rdi_clk", NO_SET_RATE},
+	{"camss_vfe_vfe0_clk", NO_SET_RATE},
+	{"camss_csi_vfe0_clk", NO_SET_RATE},
+};
+
+static struct msm_cam_clk_info ispif_8974_ahb_clk_info[ISPIF_CLK_INFO_MAX];
+
+static struct msm_cam_clk_info ispif_8974_reset_clk_info[] = {
+	{"csi0_src_clk", INIT_RATE},
+	{"csi0_clk", NO_SET_RATE},
+	{"csi0_pix_clk", NO_SET_RATE},
+	{"csi0_rdi_clk", NO_SET_RATE},
+	{"csi1_src_clk", INIT_RATE},
+	{"csi1_clk", NO_SET_RATE},
+	{"csi1_pix_clk", NO_SET_RATE},
+	{"csi1_rdi_clk", NO_SET_RATE},
+	{"csi2_src_clk", INIT_RATE},
+	{"csi2_clk", NO_SET_RATE},
+	{"csi2_pix_clk", NO_SET_RATE},
+	{"csi2_rdi_clk", NO_SET_RATE},
+	{"csi3_src_clk", INIT_RATE},
+	{"csi3_clk", NO_SET_RATE},
+	{"csi3_pix_clk", NO_SET_RATE},
+	{"csi3_rdi_clk", NO_SET_RATE},
+	{"vfe0_clk_src", INIT_RATE},
+	{"camss_vfe_vfe0_clk", NO_SET_RATE},
+	{"camss_csi_vfe0_clk", NO_SET_RATE},
+	{"vfe1_clk_src", INIT_RATE},
+	{"camss_vfe_vfe1_clk", NO_SET_RATE},
+	{"camss_csi_vfe1_clk", NO_SET_RATE},
+};
+
+static int msm_ispif_reset_hw(struct ispif_device *ispif)
+{
+	int rc = 0;
+	long timeout = 0;
+	struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)];
+	struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
+
+	ispif->clk_idx = 0;
+
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_reset_clk_info, reset_clk,
+		ARRAY_SIZE(ispif_8974_reset_clk_info), 1);
+	if (rc < 0) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
+		if (rc < 0) {
+			pr_err("%s: cannot enable clock, error = %d",
+				__func__, rc);
+		} else {
+			/* This is set when device is 8x26 */
+			ispif->clk_idx = 2;
+		}
+	} else {
+		/* This is set when device is 8974 */
+		ispif->clk_idx = 1;
+	}
+
+	init_completion(&ispif->reset_complete[VFE0]);
+	if (ispif->hw_num_isps > 1)
+		init_completion(&ispif->reset_complete[VFE1]);
+
+	/* initiate reset of ISPIF */
+	msm_camera_io_w(ISPIF_RST_CMD_MASK,
+				ispif->base + ISPIF_RST_CMD_ADDR);
+	if (ispif->hw_num_isps > 1)
+		msm_camera_io_w(ISPIF_RST_CMD_1_MASK,
+					ispif->base + ISPIF_RST_CMD_1_ADDR);
+
+	timeout = wait_for_completion_timeout(
+			&ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+	CDBG("%s: VFE0 done\n", __func__);
+
+	if (timeout <= 0) {
+		pr_err("%s: VFE0 reset wait timeout\n", __func__);
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8974_reset_clk_info, reset_clk,
+			ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+		if (rc < 0) {
+			rc = msm_cam_clk_enable(&ispif->pdev->dev,
+				ispif_8626_reset_clk_info, reset_clk1,
+				ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+			if (rc < 0)
+				pr_err("%s: VFE0 reset wait timeout\n",
+					 __func__);
+		}
+		return -ETIMEDOUT;
+	}
+
+	if (ispif->hw_num_isps > 1) {
+		timeout = wait_for_completion_timeout(
+				&ispif->reset_complete[VFE1],
+				msecs_to_jiffies(500));
+		CDBG("%s: VFE1 done\n", __func__);
+		if (timeout <= 0) {
+			pr_err("%s: VFE1 reset wait timeout\n", __func__);
+			msm_cam_clk_enable(&ispif->pdev->dev,
+				ispif_8974_reset_clk_info, reset_clk,
+				ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+			return -ETIMEDOUT;
+		}
+	}
+
+	if (ispif->clk_idx == 1) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8974_reset_clk_info, reset_clk,
+			ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+		if (rc < 0) {
+			pr_err("%s: cannot disable clock, error = %d",
+				__func__, rc);
+		}
+	}
+
+	if (ispif->clk_idx == 2) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+		if (rc < 0) {
+			pr_err("%s: cannot disable clock, error = %d",
+				__func__, rc);
+		}
+	}
+
+	return rc;
+}
+
+static int msm_ispif_get_ahb_clk_info(struct ispif_device *ispif_dev,
+	struct platform_device *pdev,
+	struct msm_cam_clk_info *ahb_clk_info)
+{
+	uint32_t num_ahb_clk = 0;
+	int i, count, rc;
+	uint32_t rates[ISPIF_CLK_INFO_MAX];
+
+	struct device_node *of_node;
+
+	of_node = pdev->dev.of_node;
+
+	count = of_property_count_strings(of_node, "clock-names");
+
+	CDBG("count = %d\n", count);
+	if (count <= 0) {
+		pr_err("no clocks found in device tree, count=%d", count);
+		return 0;
+	}
+
+	if (count > ISPIF_CLK_INFO_MAX) {
+		pr_err("invalid count=%d, max is %d\n", count,
+			ISPIF_CLK_INFO_MAX);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+		rates, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return rc;
+	}
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "clock-names",
+				i, &(ahb_clk_info[num_ahb_clk].clk_name));
+		CDBG("clock-names[%d] = %s\n",
+			 i, ahb_clk_info[i].clk_name);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			return rc;
+		}
+		if (strnstr(ahb_clk_info[num_ahb_clk].clk_name, "ahb",
+			sizeof(ahb_clk_info[num_ahb_clk].clk_name))) {
+			ahb_clk_info[num_ahb_clk].clk_rate =
+				(rates[i] == 0) ? (long)-1 : rates[i];
+			CDBG("clk_rate[%d] = %ld\n", i,
+				ahb_clk_info[i].clk_rate);
+			num_ahb_clk++;
+		}
+	}
+	ispif_dev->num_ahb_clk = num_ahb_clk;
+	return 0;
+}
+
+static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable)
+{
+	int rc = 0;
+
+	if (ispif->csid_version < CSID_VERSION_V30) {
+		/* Older ISPIF versiond don't need ahb clokc */
+		return 0;
+	}
+
+	rc = msm_ispif_get_ahb_clk_info(ispif, ispif->pdev,
+		ispif_8974_ahb_clk_info);
+	if (rc < 0) {
+		pr_err("%s: msm_isp_get_clk_info() failed", __func__);
+			return -EFAULT;
+	}
+
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_ahb_clk_info, ispif->ahb_clk,
+		ispif->num_ahb_clk, enable);
+	if (rc < 0) {
+		pr_err("%s: cannot enable clock, error = %d",
+			__func__, rc);
+	}
+
+	return rc;
+}
+
+static int msm_ispif_reset(struct ispif_device *ispif)
+{
+	int rc = 0;
+	int i;
+
+	if (WARN_ON(!ispif))
+		return -EINVAL;
+
+	memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
+	for (i = 0; i < ispif->vfe_info.num_vfe; i++) {
+
+		msm_camera_io_w(1 << PIX0_LINE_BUF_EN_BIT,
+			ispif->base + ISPIF_VFE_m_CTRL_0(i));
+		msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(i));
+		msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(i));
+		msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(i));
+		msm_camera_io_w(0xFFFFFFFF, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_0(i));
+		msm_camera_io_w(0xFFFFFFFF, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_1(i));
+		msm_camera_io_w(0xFFFFFFFF, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_2(i));
+
+		msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_INPUT_SEL(i));
+
+		msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+			ispif->base + ISPIF_VFE_m_INTF_CMD_0(i));
+		msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+			ispif->base + ISPIF_VFE_m_INTF_CMD_1(i));
+		pr_debug("%s: base %pK", __func__, ispif->base);
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0));
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1));
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 0));
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 1));
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 2));
+
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_CROP(i, 0));
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_CROP(i, 1));
+	}
+
+	msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+		ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+	return rc;
+}
+
+static void msm_ispif_sel_csid_core(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+	uint32_t data;
+
+	if (WARN_ON(!ispif))
+		return;
+
+	if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+		pr_err("%s: invalid interface type\n", __func__);
+		return;
+	}
+
+	data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+	switch (intftype) {
+	case PIX0:
+		data &= ~(BIT(1) | BIT(0));
+		data |= (uint32_t)csid;
+		break;
+	case RDI0:
+		data &= ~(BIT(5) | BIT(4));
+		data |= (uint32_t)(csid << 4);
+		break;
+	case PIX1:
+		data &= ~(BIT(9) | BIT(8));
+		data |= (uint32_t)(csid << 8);
+		break;
+	case RDI1:
+		data &= ~(BIT(13) | BIT(12));
+		data |= (uint32_t)(csid << 12);
+		break;
+	case RDI2:
+		data &= ~(BIT(21) | BIT(20));
+		data |= (uint32_t)(csid << 20);
+		break;
+	}
+
+	msm_camera_io_w_mb(data, ispif->base +
+		ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+}
+
+static void msm_ispif_enable_crop(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t vfe_intf, uint16_t start_pixel,
+	uint16_t end_pixel)
+{
+	uint32_t data;
+
+	if (WARN_ON(!ispif))
+		return;
+
+	if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+		pr_err("%s: invalid interface type\n", __func__);
+		return;
+	}
+
+	data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
+	data |= (1 << (intftype + 7));
+	if (intftype == PIX0)
+		data |= 1 << PIX0_LINE_BUF_EN_BIT;
+	msm_camera_io_w(data,
+		ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
+
+	if (intftype == PIX0)
+		msm_camera_io_w_mb(start_pixel | (end_pixel << 16),
+			ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 0));
+	else if (intftype == PIX1)
+		msm_camera_io_w_mb(start_pixel | (end_pixel << 16),
+			ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 1));
+	else {
+		pr_err("%s: invalid intftype=%d\n", __func__, intftype);
+		WARN_ON(1);
+		return;
+	}
+}
+
+static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
+	uint8_t intftype, uint16_t cid_mask, uint8_t vfe_intf, uint8_t enable)
+{
+	uint32_t intf_addr, data;
+
+	if (WARN_ON((!ispif)))
+		return;
+
+	if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+		pr_err("%s: invalid interface type\n", __func__);
+		return;
+	}
+
+	switch (intftype) {
+	case PIX0:
+		intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 0);
+		break;
+	case RDI0:
+		intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 0);
+		break;
+	case PIX1:
+		intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 1);
+		break;
+	case RDI1:
+		intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 1);
+		break;
+	case RDI2:
+		intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 2);
+		break;
+	default:
+		pr_err("%s: invalid intftype=%d\n", __func__, intftype);
+		WARN_ON(1);
+		return;
+	}
+
+	data = msm_camera_io_r(ispif->base + intf_addr);
+	if (enable)
+		data |= (uint32_t)cid_mask;
+	else
+		data &= ~((uint32_t)cid_mask);
+	msm_camera_io_w_mb(data, ispif->base + intf_addr);
+}
+
+static int msm_ispif_validate_intf_status(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t vfe_intf)
+{
+	int rc = 0;
+	uint32_t data = 0;
+
+	if (WARN_ON((!ispif)))
+		return -ENODEV;
+
+	if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+		pr_err("%s: invalid interface type\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (intftype) {
+	case PIX0:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0));
+		break;
+	case RDI0:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0));
+		break;
+	case PIX1:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1));
+		break;
+	case RDI1:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1));
+		break;
+	case RDI2:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2));
+		break;
+	}
+	if ((data & 0xf) != 0xf)
+		rc = -EBUSY;
+	return rc;
+}
+
+static void msm_ispif_select_clk_mux(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+	uint32_t data = 0;
+
+	switch (intftype) {
+	case PIX0:
+		data = msm_camera_io_r(ispif->clk_mux_base);
+		data &= ~(0xf << (vfe_intf * 8));
+		data |= (csid << (vfe_intf * 8));
+		msm_camera_io_w(data, ispif->clk_mux_base);
+		break;
+
+	case RDI0:
+		data = msm_camera_io_r(ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		data &= ~(0xf << (vfe_intf * 12));
+		data |= (csid << (vfe_intf * 12));
+		msm_camera_io_w(data, ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		break;
+
+	case PIX1:
+		data = msm_camera_io_r(ispif->clk_mux_base);
+		data &= ~(0xf0 << (vfe_intf * 8));
+		data |= (csid << (4 + (vfe_intf * 8)));
+		msm_camera_io_w(data, ispif->clk_mux_base);
+		break;
+
+	case RDI1:
+		data = msm_camera_io_r(ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		data &= ~(0xf << (4 + (vfe_intf * 12)));
+		data |= (csid << (4 + (vfe_intf * 12)));
+		msm_camera_io_w(data, ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		break;
+
+	case RDI2:
+		data = msm_camera_io_r(ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		data &= ~(0xf << (8 + (vfe_intf * 12)));
+		data |= (csid << (8 + (vfe_intf * 12)));
+		msm_camera_io_w(data, ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		break;
+	}
+	CDBG("%s intftype %d data %x\n", __func__, intftype, data);
+	/* ensure clk mux is enabled */
+	mb();
+}
+
+static uint16_t msm_ispif_get_cids_mask_from_cfg(
+	struct msm_ispif_params_entry *entry)
+{
+	int i;
+	uint16_t cids_mask = 0;
+
+	if (WARN_ON(!entry)) {
+		pr_err("%s: invalid entry", __func__);
+		return cids_mask;
+	}
+
+	for (i = 0; i < entry->num_cids && i < MAX_CID_CH_PARAM_ENTRY; i++)
+		cids_mask |= (1 << entry->cids[i]);
+
+	return cids_mask;
+}
+
+static int msm_ispif_config(struct ispif_device *ispif,
+	struct msm_ispif_param_data *params)
+{
+	int rc = 0, i = 0;
+	uint16_t cid_mask;
+	enum msm_ispif_intftype intftype;
+	enum msm_ispif_vfe_intf vfe_intf;
+
+	if (WARN_ON(!ispif) || WARN_ON(!params))
+		return -EINVAL;
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+	if (params->num > MAX_PARAM_ENTRIES) {
+		pr_err("%s: invalid param entries %d\n", __func__,
+			params->num);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	for (i = 0; i < params->num; i++) {
+		vfe_intf = params->entries[i].vfe_intf;
+		if (vfe_intf >= VFE_MAX) {
+			pr_err("%s: %d invalid i %d vfe_intf %d\n", __func__,
+				__LINE__, i, vfe_intf);
+			return -EINVAL;
+		}
+		if (!msm_ispif_is_intf_valid(ispif->csid_version,
+				vfe_intf)) {
+			pr_err("%s: invalid interface type\n", __func__);
+			return -EINVAL;
+		}
+		msm_camera_io_w(0x0, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+		msm_camera_io_w(0x0, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+		msm_camera_io_w_mb(0x0, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
+	}
+
+	for (i = 0; i < params->num; i++) {
+		intftype = params->entries[i].intftype;
+
+		vfe_intf = params->entries[i].vfe_intf;
+
+		CDBG("%s intftype %x, vfe_intf %d, csid %d\n", __func__,
+			intftype, vfe_intf, params->entries[i].csid);
+
+		if ((intftype >= INTF_MAX) ||
+			(vfe_intf >=  ispif->vfe_info.num_vfe) ||
+			(ispif->csid_version <= CSID_VERSION_V22 &&
+			(vfe_intf > VFE0))) {
+			pr_err("%s: VFEID %d and CSID version %d mismatch\n",
+				__func__, vfe_intf, ispif->csid_version);
+			return -EINVAL;
+		}
+
+		if (ispif->csid_version >= CSID_VERSION_V30)
+			msm_ispif_select_clk_mux(ispif, intftype,
+				params->entries[i].csid, vfe_intf);
+
+		rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf);
+		if (rc) {
+			pr_err("%s:validate_intf_status failed, rc = %d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		msm_ispif_sel_csid_core(ispif, intftype,
+			params->entries[i].csid, vfe_intf);
+		cid_mask = msm_ispif_get_cids_mask_from_cfg(
+				&params->entries[i]);
+		msm_ispif_enable_intf_cids(ispif, intftype,
+			cid_mask, vfe_intf, 1);
+		if (params->entries[i].crop_enable)
+			msm_ispif_enable_crop(ispif, intftype, vfe_intf,
+				params->entries[i].crop_start_pixel,
+				params->entries[i].crop_end_pixel);
+	}
+
+	for (vfe_intf = 0; vfe_intf < 2; vfe_intf++) {
+		msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+
+		msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_0(vfe_intf));
+
+		msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+
+		msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_1(vfe_intf));
+
+		msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
+
+		msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_2(vfe_intf));
+	}
+
+	msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+		ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+	return rc;
+}
+
+static int msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits,
+	struct msm_ispif_param_data *params)
+{
+	uint8_t vc;
+	int i, k;
+	enum msm_ispif_intftype intf_type;
+	enum msm_ispif_cid cid;
+	enum msm_ispif_vfe_intf vfe_intf;
+
+	if (WARN_ON(!ispif) || WARN_ON(!params))
+		return -EINVAL;
+
+	for (i = 0; i < params->num; i++) {
+		vfe_intf = params->entries[i].vfe_intf;
+		if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+			pr_err("%s: invalid interface type\n", __func__);
+			return -EINVAL;
+		}
+		if (params->entries[i].num_cids > MAX_CID_CH_PARAM_ENTRY) {
+			pr_err("%s: out of range of cid_num %d\n",
+				__func__, params->entries[i].num_cids);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < params->num; i++) {
+		intf_type = params->entries[i].intftype;
+		vfe_intf = params->entries[i].vfe_intf;
+		for (k = 0; k < params->entries[i].num_cids; k++) {
+			cid = params->entries[i].cids[k];
+			vc = cid / 4;
+			if (intf_type == RDI2) {
+				/* zero out two bits */
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd1 &=
+					~(0x3 << (vc * 2 + 8));
+				/* set cmd bits */
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd1 |=
+					(cmd_bits << (vc * 2 + 8));
+			} else {
+				/* zero 2 bits */
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd &=
+					~(0x3 << (vc * 2 + intf_type * 8));
+				/* set cmd bits */
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd |=
+					(cmd_bits << (vc * 2 + intf_type * 8));
+			}
+		}
+		/* cmd for PIX0, PIX1, RDI0, RDI1 */
+		if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF)
+			msm_camera_io_w_mb(
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd,
+				ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe_intf));
+
+		/* cmd for RDI2 */
+		if (ispif->applied_intf_cmd[vfe_intf].intf_cmd1 != 0xFFFFFFFF)
+			msm_camera_io_w_mb(
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd1,
+				ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe_intf));
+	}
+	return 0;
+}
+
+static int msm_ispif_stop_immediately(struct ispif_device *ispif,
+	struct msm_ispif_param_data *params)
+{
+	int i, rc = 0;
+	uint16_t cid_mask = 0;
+
+	if (WARN_ON(!ispif) || WARN_ON(!params))
+		return -EINVAL;
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+
+	if (params->num > MAX_PARAM_ENTRIES) {
+		pr_err("%s: invalid param entries %d\n", __func__,
+			params->num);
+		rc = -EINVAL;
+		return rc;
+	}
+	msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_DISABLE_IMMEDIATELY, params);
+
+	/* after stop the interface we need to unmask the CID enable bits */
+	for (i = 0; i < params->num; i++) {
+		cid_mask = msm_ispif_get_cids_mask_from_cfg(
+			&params->entries[i]);
+		msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
+			cid_mask, params->entries[i].vfe_intf, 0);
+	}
+
+	return rc;
+}
+
+static int msm_ispif_start_frame_boundary(struct ispif_device *ispif,
+	struct msm_ispif_param_data *params)
+{
+	int rc = 0;
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+	if (params->num > MAX_PARAM_ENTRIES) {
+		pr_err("%s: invalid param entries %d\n", __func__,
+			params->num);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+
+	return rc;
+}
+
+static int msm_ispif_restart_frame_boundary(struct ispif_device *ispif,
+	struct msm_ispif_param_data *params)
+{
+	int rc = 0, i;
+	long timeout = 0;
+	uint16_t cid_mask;
+	enum msm_ispif_intftype intftype;
+	enum msm_ispif_vfe_intf vfe_intf;
+	uint32_t vfe_mask = 0;
+	uint32_t intf_addr;
+	struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)];
+	struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
+
+	ispif->clk_idx = 0;
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+	if (params->num > MAX_PARAM_ENTRIES) {
+		pr_err("%s: invalid param entries %d\n", __func__,
+			params->num);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	for (i = 0; i < params->num; i++) {
+		vfe_intf = params->entries[i].vfe_intf;
+		if (vfe_intf >= VFE_MAX) {
+			pr_err("%s: %d invalid i %d vfe_intf %d\n", __func__,
+				__LINE__, i, vfe_intf);
+			return -EINVAL;
+		}
+		vfe_mask |= (1 << vfe_intf);
+	}
+
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_reset_clk_info, reset_clk,
+		ARRAY_SIZE(ispif_8974_reset_clk_info), 1);
+	if (rc < 0) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
+		if (rc < 0) {
+			pr_err("%s: cannot enable clock, error = %d",
+				__func__, rc);
+		} else {
+			/* This is set when device is 8x26 */
+			ispif->clk_idx = 2;
+		}
+	} else {
+		/* This is set when device is 8974 */
+		ispif->clk_idx = 1;
+	}
+
+	if (vfe_mask & (1 << VFE0)) {
+		init_completion(&ispif->reset_complete[VFE0]);
+		pr_err("%s Init completion VFE0\n", __func__);
+			/* initiate reset of ISPIF */
+		msm_camera_io_w(0x00001FF9,
+				ispif->base + ISPIF_RST_CMD_ADDR);
+	}
+	if (ispif->hw_num_isps > 1 && (vfe_mask & (1 << VFE1))) {
+		init_completion(&ispif->reset_complete[VFE1]);
+		pr_err("%s Init completion VFE1\n", __func__);
+				msm_camera_io_w(0x00001FF9,
+					ispif->base + ISPIF_RST_CMD_1_ADDR);
+	}
+
+	if (vfe_mask & (1 << VFE0)) {
+		timeout = wait_for_completion_timeout(
+			&ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+		if (timeout <= 0) {
+			pr_err("%s: VFE0 reset wait timeout\n", __func__);
+			rc = -ETIMEDOUT;
+			goto disable_clk;
+		}
+	}
+
+	if (ispif->hw_num_isps > 1  && (vfe_mask & (1 << VFE1))) {
+		timeout = wait_for_completion_timeout(
+				&ispif->reset_complete[VFE1],
+				msecs_to_jiffies(500));
+		if (timeout <= 0) {
+			pr_err("%s: VFE1 reset wait timeout\n", __func__);
+			rc = -ETIMEDOUT;
+			goto disable_clk;
+		}
+	}
+
+	pr_info("%s: ISPIF reset hw done", __func__);
+
+	if (ispif->clk_idx == 1) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8974_reset_clk_info, reset_clk,
+			ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+		if (rc < 0) {
+			pr_err("%s: cannot disable clock, error = %d",
+				__func__, rc);
+			goto end;
+		}
+	}
+
+	if (ispif->clk_idx == 2) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+		if (rc < 0) {
+			pr_err("%s: cannot disable clock, error = %d",
+				__func__, rc);
+			goto end;
+		}
+	}
+
+	for (i = 0; i < params->num; i++) {
+		intftype = params->entries[i].intftype;
+		vfe_intf = params->entries[i].vfe_intf;
+
+		switch (params->entries[0].intftype) {
+		case PIX0:
+			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
+			break;
+		case RDI0:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
+			break;
+		case PIX1:
+			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
+			break;
+		case RDI1:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
+			break;
+		case RDI2:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
+			break;
+		default:
+			pr_err("%s: invalid intftype=%d\n", __func__,
+			params->entries[i].intftype);
+			rc = -EPERM;
+			goto end;
+		}
+
+		msm_ispif_intf_cmd(ispif,
+			ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+	}
+
+	for (i = 0; i < params->num; i++) {
+		intftype = params->entries[i].intftype;
+
+		vfe_intf = params->entries[i].vfe_intf;
+
+
+		cid_mask = msm_ispif_get_cids_mask_from_cfg(
+			&params->entries[i]);
+
+		msm_ispif_enable_intf_cids(ispif, intftype,
+			cid_mask, vfe_intf, 1);
+	}
+
+end:
+	return rc;
+disable_clk:
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_reset_clk_info, reset_clk,
+		ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+	if (rc < 0) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+		if (rc < 0)
+			pr_err("%s: cannot enable clock, error = %d",
+				__func__, rc);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif,
+	struct msm_ispif_param_data *params)
+{
+	int i, rc = 0;
+	uint16_t cid_mask = 0;
+	uint32_t intf_addr;
+	enum msm_ispif_vfe_intf vfe_intf;
+	uint32_t stop_flag = 0;
+
+	if (WARN_ON(!ispif) || WARN_ON(!params))
+		return -EINVAL;
+
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+
+	if (params->num > MAX_PARAM_ENTRIES) {
+		pr_err("%s: invalid param entries %d\n", __func__,
+			params->num);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	for (i = 0; i < params->num; i++) {
+		if (!msm_ispif_is_intf_valid(ispif->csid_version,
+				params->entries[i].vfe_intf)) {
+			pr_err("%s: invalid interface type\n", __func__);
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+	msm_ispif_intf_cmd(ispif,
+		ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY, params);
+
+	for (i = 0; i < params->num; i++) {
+		cid_mask =
+			msm_ispif_get_cids_mask_from_cfg(&params->entries[i]);
+		vfe_intf = params->entries[i].vfe_intf;
+
+		switch (params->entries[i].intftype) {
+		case PIX0:
+			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
+			break;
+		case RDI0:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
+			break;
+		case PIX1:
+			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
+			break;
+		case RDI1:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
+			break;
+		case RDI2:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
+			break;
+		default:
+			pr_err("%s: invalid intftype=%d\n", __func__,
+				params->entries[i].intftype);
+			rc = -EPERM;
+			goto end;
+		}
+
+		rc = readl_poll_timeout(ispif->base + intf_addr, stop_flag,
+					(stop_flag & 0xF) == 0xF,
+					ISPIF_TIMEOUT_SLEEP_US,
+					ISPIF_TIMEOUT_ALL_US);
+		if (rc < 0)
+			goto end;
+
+		/* disable CIDs in CID_MASK register */
+		msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
+			cid_mask, vfe_intf, 0);
+	}
+
+end:
+	return rc;
+}
+
+static void ispif_process_irq(struct ispif_device *ispif,
+	struct ispif_irq_status *out, enum msm_ispif_vfe_intf vfe_id)
+{
+	if (WARN_ON(!ispif) || WARN_ON(!out))
+		return;
+
+	if (out[vfe_id].ispifIrqStatus0 &
+			ISPIF_IRQ_STATUS_PIX_SOF_MASK) {
+		if (ispif->ispif_sof_debug < 5)
+			pr_err("%s: PIX0 frame id: %u\n", __func__,
+				ispif->sof_count[vfe_id].sof_cnt[PIX0]);
+		ispif->sof_count[vfe_id].sof_cnt[PIX0]++;
+		ispif->ispif_sof_debug++;
+	}
+	if (out[vfe_id].ispifIrqStatus0 &
+			ISPIF_IRQ_STATUS_RDI0_SOF_MASK) {
+		ispif->sof_count[vfe_id].sof_cnt[RDI0]++;
+	}
+	if (out[vfe_id].ispifIrqStatus1 &
+			ISPIF_IRQ_STATUS_RDI1_SOF_MASK) {
+		ispif->sof_count[vfe_id].sof_cnt[RDI1]++;
+	}
+	if (out[vfe_id].ispifIrqStatus2 &
+			ISPIF_IRQ_STATUS_RDI2_SOF_MASK) {
+		ispif->sof_count[vfe_id].sof_cnt[RDI2]++;
+	}
+}
+
+static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out,
+	void *data)
+{
+	struct ispif_device *ispif = (struct ispif_device *)data;
+
+	if (WARN_ON(!ispif) || WARN_ON(!out))
+		return;
+
+	out[VFE0].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
+		ISPIF_VFE_m_IRQ_STATUS_0(VFE0));
+	msm_camera_io_w(out[VFE0].ispifIrqStatus0,
+		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE0));
+
+	out[VFE0].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
+		ISPIF_VFE_m_IRQ_STATUS_1(VFE0));
+	msm_camera_io_w(out[VFE0].ispifIrqStatus1,
+		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE0));
+
+	out[VFE0].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
+		ISPIF_VFE_m_IRQ_STATUS_2(VFE0));
+	msm_camera_io_w_mb(out[VFE0].ispifIrqStatus2,
+		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE0));
+
+	if (ispif->vfe_info.num_vfe > 1) {
+		out[VFE1].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_IRQ_STATUS_0(VFE1));
+		msm_camera_io_w(out[VFE1].ispifIrqStatus0,
+			ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE1));
+
+		out[VFE1].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_IRQ_STATUS_1(VFE1));
+		msm_camera_io_w(out[VFE1].ispifIrqStatus1,
+				ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE1));
+
+		out[VFE1].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_IRQ_STATUS_2(VFE1));
+		msm_camera_io_w_mb(out[VFE1].ispifIrqStatus2,
+			ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE1));
+	}
+	msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+	ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+	if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
+		if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ)
+			complete(&ispif->reset_complete[VFE0]);
+
+		if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
+			pr_err("%s: VFE0 pix0 overflow.\n", __func__);
+
+		if (out[VFE0].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ)
+			pr_err("%s: VFE0 rdi0 overflow.\n", __func__);
+
+		if (out[VFE0].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ)
+			pr_err("%s: VFE0 rdi1 overflow.\n", __func__);
+
+		if (out[VFE0].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ)
+			pr_err("%s: VFE0 rdi2 overflow.\n", __func__);
+
+		ispif_process_irq(ispif, out, VFE0);
+	}
+	if (ispif->hw_num_isps > 1) {
+		if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ)
+			complete(&ispif->reset_complete[VFE1]);
+
+		if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
+			pr_err("%s: VFE1 pix0 overflow.\n", __func__);
+
+		if (out[VFE1].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ)
+			pr_err("%s: VFE1 rdi0 overflow.\n", __func__);
+
+		if (out[VFE1].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ)
+			pr_err("%s: VFE1 rdi1 overflow.\n", __func__);
+
+		if (out[VFE1].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ)
+			pr_err("%s: VFE1 rdi2 overflow.\n", __func__);
+
+		ispif_process_irq(ispif, out, VFE1);
+	}
+}
+
+static irqreturn_t msm_io_ispif_irq(int irq_num, void *data)
+{
+	struct ispif_irq_status irq[VFE_MAX];
+
+	msm_ispif_read_irq_status(irq, data);
+	return IRQ_HANDLED;
+}
+
+static int msm_ispif_set_vfe_info(struct ispif_device *ispif,
+	struct msm_ispif_vfe_info *vfe_info)
+{
+	if (!vfe_info || (vfe_info->num_vfe <= 0) ||
+		((uint32_t)(vfe_info->num_vfe) > ispif->hw_num_isps)) {
+		pr_err("Invalid VFE info: %pK %d\n", vfe_info,
+			   (vfe_info ? vfe_info->num_vfe:0));
+		return -EINVAL;
+	}
+
+	memcpy(&ispif->vfe_info, vfe_info, sizeof(struct msm_ispif_vfe_info));
+
+	return 0;
+}
+
+static int msm_ispif_init(struct ispif_device *ispif,
+	uint32_t csid_version)
+{
+	int rc = 0;
+
+	if (WARN_ON(!ispif)) {
+		pr_err("%s: invalid ispif params", __func__);
+		return -EINVAL;
+	}
+
+	if (ispif->ispif_state == ISPIF_POWER_UP) {
+		pr_err("%s: ispif already initted state = %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+
+	/* can we set to zero? */
+	ispif->applied_intf_cmd[VFE0].intf_cmd  = 0xFFFFFFFF;
+	ispif->applied_intf_cmd[VFE0].intf_cmd1 = 0xFFFFFFFF;
+	ispif->applied_intf_cmd[VFE1].intf_cmd  = 0xFFFFFFFF;
+	ispif->applied_intf_cmd[VFE1].intf_cmd1 = 0xFFFFFFFF;
+	memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
+
+	ispif->csid_version = csid_version;
+
+	if (ispif->csid_version >= CSID_VERSION_V30) {
+		if (!ispif->clk_mux_mem || !ispif->clk_mux_io) {
+			pr_err("%s csi clk mux mem %pK io %pK\n", __func__,
+				ispif->clk_mux_mem, ispif->clk_mux_io);
+			rc = -ENOMEM;
+			return rc;
+		}
+		ispif->clk_mux_base = ioremap(ispif->clk_mux_mem->start,
+			resource_size(ispif->clk_mux_mem));
+		if (!ispif->clk_mux_base) {
+			pr_err("%s: clk_mux_mem ioremap failed\n", __func__);
+			rc = -ENOMEM;
+			return rc;
+		}
+	}
+
+	ispif->base = ioremap(ispif->mem->start,
+		resource_size(ispif->mem));
+	if (!ispif->base) {
+		rc = -ENOMEM;
+		pr_err("%s: nomem\n", __func__);
+		goto end;
+	}
+	rc = request_irq(ispif->irq->start, msm_io_ispif_irq,
+		IRQF_TRIGGER_RISING, "ispif", ispif);
+	if (rc) {
+		pr_err("%s: request_irq error = %d\n", __func__, rc);
+		goto error_irq;
+	}
+
+	rc = msm_ispif_clk_ahb_enable(ispif, 1);
+	if (rc) {
+		pr_err("%s: ahb_clk enable failed", __func__);
+		goto error_ahb;
+	}
+
+	msm_ispif_reset_hw(ispif);
+
+	rc = msm_ispif_reset(ispif);
+	if (rc == 0) {
+		ispif->ispif_state = ISPIF_POWER_UP;
+		CDBG("%s: power up done\n", __func__);
+		goto end;
+	}
+
+error_ahb:
+	free_irq(ispif->irq->start, ispif);
+error_irq:
+	iounmap(ispif->base);
+
+end:
+	return rc;
+}
+
+static void msm_ispif_release(struct ispif_device *ispif)
+{
+	if (WARN_ON(!ispif)) {
+		pr_err("%s: invalid ispif params", __func__);
+		return;
+	}
+
+	if (!ispif->base) {
+		pr_err("%s: ispif base is NULL\n", __func__);
+		return;
+	}
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		return;
+	}
+
+	/* make sure no streaming going on */
+	msm_ispif_reset(ispif);
+
+	msm_ispif_clk_ahb_enable(ispif, 0);
+
+	free_irq(ispif->irq->start, ispif);
+
+	iounmap(ispif->base);
+
+	iounmap(ispif->clk_mux_base);
+
+	ispif->ispif_state = ISPIF_POWER_DOWN;
+}
+
+static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
+{
+	long rc = 0;
+	struct ispif_cfg_data *pcdata = (struct ispif_cfg_data *)arg;
+	struct ispif_device *ispif =
+		(struct ispif_device *)v4l2_get_subdevdata(sd);
+
+	if (WARN_ON(!sd) || WARN_ON(!pcdata))
+		return -EINVAL;
+
+	mutex_lock(&ispif->mutex);
+	switch (pcdata->cfg_type) {
+	case ISPIF_ENABLE_REG_DUMP:
+		ispif->enb_dump_reg = pcdata->reg_dump; /* save dump config */
+		break;
+	case ISPIF_INIT:
+		rc = msm_ispif_init(ispif, pcdata->csid_version);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+	case ISPIF_CFG:
+		rc = msm_ispif_config(ispif, &pcdata->params);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+	case ISPIF_START_FRAME_BOUNDARY:
+		rc = msm_ispif_start_frame_boundary(ispif, &pcdata->params);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+	case ISPIF_RESTART_FRAME_BOUNDARY:
+		rc = msm_ispif_restart_frame_boundary(ispif, &pcdata->params);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+
+	case ISPIF_STOP_FRAME_BOUNDARY:
+		rc = msm_ispif_stop_frame_boundary(ispif, &pcdata->params);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+	case ISPIF_STOP_IMMEDIATELY:
+		rc = msm_ispif_stop_immediately(ispif, &pcdata->params);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+	case ISPIF_RELEASE:
+		msm_ispif_release(ispif);
+		break;
+	case ISPIF_SET_VFE_INFO:
+		rc = msm_ispif_set_vfe_info(ispif, &pcdata->vfe_info);
+		break;
+	default:
+		pr_err("%s: invalid cfg_type\n", __func__);
+		rc = -EINVAL;
+		break;
+	}
+	mutex_unlock(&ispif->mutex);
+	return rc;
+}
+static struct v4l2_file_operations msm_ispif_v4l2_subdev_fops;
+
+static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	struct ispif_device *ispif =
+		(struct ispif_device *)v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_MSM_ISPIF_CFG:
+		return msm_ispif_cmd(sd, arg);
+	case MSM_SD_NOTIFY_FREEZE: {
+		ispif->ispif_sof_debug = 0;
+		return 0;
+	}
+	case MSM_SD_SHUTDOWN: {
+		struct ispif_device *ispif =
+			(struct ispif_device *)v4l2_get_subdevdata(sd);
+		if (ispif && ispif->base) {
+			mutex_lock(&ispif->mutex);
+			msm_ispif_release(ispif);
+			mutex_unlock(&ispif->mutex);
+		}
+		return 0;
+	}
+	default:
+		pr_err_ratelimited("%s: invalid cmd 0x%x received\n",
+			__func__, cmd);
+		return -ENOIOCTLCMD;
+	}
+}
+
+static long msm_ispif_subdev_do_ioctl(
+	struct file *file, unsigned int cmd, void *arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+	return msm_ispif_subdev_ioctl(sd, cmd, arg);
+}
+
+static long msm_ispif_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	return video_usercopy(file, cmd, arg, msm_ispif_subdev_do_ioctl);
+}
+
+static int ispif_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+
+	mutex_lock(&ispif->mutex);
+	/* mem remap is done in init when the clock is on */
+	ispif->open_cnt++;
+	mutex_unlock(&ispif->mutex);
+	return 0;
+}
+
+static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+
+	if (!ispif) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ispif->mutex);
+	if (ispif->open_cnt == 0) {
+		pr_err("%s: Invalid close\n", __func__);
+		rc = -ENODEV;
+		goto end;
+	}
+	ispif->open_cnt--;
+	if (ispif->open_cnt == 0)
+		msm_ispif_release(ispif);
+end:
+	mutex_unlock(&ispif->mutex);
+	return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_ispif_subdev_core_ops = {
+	/* .g_chip_ident = &msm_ispif_subdev_g_chip_ident, */
+	.ioctl = &msm_ispif_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_ispif_subdev_ops = {
+	.core = &msm_ispif_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_ispif_internal_ops = {
+	.open = ispif_open_node,
+	.close = ispif_close_node,
+};
+
+static int ispif_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct ispif_device *ispif;
+
+	ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
+	if (!ispif)
+		return -ENOMEM;
+
+	if (pdev->dev.of_node) {
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+		rc = of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,num-isps", &ispif->hw_num_isps);
+		if (rc)
+			/* backward compatibility */
+			ispif->hw_num_isps = 1;
+		/* not an error condition */
+		rc = 0;
+	}
+
+	mutex_init(&ispif->mutex);
+	ispif->mem = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "ispif");
+	if (!ispif->mem) {
+		pr_err("%s: no mem resource?\n", __func__);
+		rc = -ENODEV;
+		goto error;
+	}
+	ispif->irq = platform_get_resource_byname(pdev,
+		IORESOURCE_IRQ, "ispif");
+	if (!ispif->irq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		rc = -ENODEV;
+		goto error;
+	}
+	ispif->io = request_mem_region(ispif->mem->start,
+		resource_size(ispif->mem), pdev->name);
+	if (!ispif->io) {
+		pr_err("%s: no valid mem region\n", __func__);
+		rc = -EBUSY;
+		goto error;
+	}
+	ispif->clk_mux_mem = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "csi_clk_mux");
+	if (ispif->clk_mux_mem) {
+		ispif->clk_mux_io = request_mem_region(
+			ispif->clk_mux_mem->start,
+			resource_size(ispif->clk_mux_mem),
+			ispif->clk_mux_mem->name);
+		if (!ispif->clk_mux_io)
+			pr_err("%s: no valid csi_mux region\n", __func__);
+	}
+
+	v4l2_subdev_init(&ispif->msm_sd.sd, &msm_ispif_subdev_ops);
+	ispif->msm_sd.sd.internal_ops = &msm_ispif_internal_ops;
+	ispif->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	snprintf(ispif->msm_sd.sd.name,
+		ARRAY_SIZE(ispif->msm_sd.sd.name), MSM_ISPIF_DRV_NAME);
+	v4l2_set_subdevdata(&ispif->msm_sd.sd, ispif);
+
+	platform_set_drvdata(pdev, &ispif->msm_sd.sd);
+
+	media_entity_pads_init(&ispif->msm_sd.sd.entity, 0, NULL);
+	ispif->msm_sd.sd.entity.function = MSM_CAMERA_SUBDEV_ISPIF;
+	ispif->msm_sd.sd.entity.name = pdev->name;
+	ispif->msm_sd.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x1;
+	rc = msm_sd_register(&ispif->msm_sd);
+	if (rc) {
+		pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+		goto error;
+	}
+	msm_ispif_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
+	msm_ispif_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
+	msm_ispif_v4l2_subdev_fops.unlocked_ioctl = msm_ispif_subdev_fops_ioctl;
+	msm_ispif_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
+	msm_ispif_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
+#ifdef CONFIG_COMPAT
+	msm_ispif_v4l2_subdev_fops.compat_ioctl32 = msm_ispif_subdev_fops_ioctl;
+#endif
+	ispif->msm_sd.sd.devnode->fops = &msm_ispif_v4l2_subdev_fops;
+	ispif->pdev = pdev;
+	ispif->ispif_state = ISPIF_POWER_DOWN;
+	ispif->open_cnt = 0;
+	return 0;
+
+error:
+	mutex_destroy(&ispif->mutex);
+	kfree(ispif);
+	return rc;
+}
+
+static const struct of_device_id msm_ispif_dt_match[] = {
+	{.compatible = "qcom,ispif"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_ispif_dt_match);
+
+static struct platform_driver ispif_driver = {
+	.probe = ispif_probe,
+	.driver = {
+		.name = MSM_ISPIF_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_ispif_dt_match,
+	},
+};
+
+static int __init msm_ispif_init_module(void)
+{
+	return platform_driver_register(&ispif_driver);
+}
+
+static void __exit msm_ispif_exit_module(void)
+{
+	platform_driver_unregister(&ispif_driver);
+}
+
+module_init(msm_ispif_init_module);
+module_exit(msm_ispif_exit_module);
+MODULE_DESCRIPTION("MSM ISP Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.h
new file mode 100644
index 0000000..6217fba
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_H
+#define MSM_ISPIF_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_ispif.h>
+#include "msm_sd.h"
+
+#define ISPIF_CLK_INFO_MAX 24
+
+struct ispif_irq_status {
+	uint32_t ispifIrqStatus0;
+	uint32_t ispifIrqStatus1;
+	uint32_t ispifIrqStatus2;
+};
+
+enum msm_ispif_state_t {
+	ISPIF_POWER_UP,
+	ISPIF_POWER_DOWN,
+};
+struct ispif_sof_count {
+	uint32_t sof_cnt[INTF_MAX];
+};
+
+struct ispif_intf_cmd {
+	uint32_t intf_cmd;
+	uint32_t intf_cmd1;
+};
+
+struct ispif_device {
+	struct platform_device *pdev;
+	struct msm_sd_subdev msm_sd;
+	struct resource *mem;
+	struct resource *clk_mux_mem;
+	struct resource *irq;
+	struct resource *io;
+	struct resource *clk_mux_io;
+	void __iomem *base;
+	void __iomem *clk_mux_base;
+	struct mutex mutex;
+	uint8_t start_ack_pending;
+	uint32_t csid_version;
+	int enb_dump_reg;
+	uint32_t open_cnt;
+	struct ispif_sof_count sof_count[VFE_MAX];
+	struct ispif_intf_cmd applied_intf_cmd[VFE_MAX];
+	enum msm_ispif_state_t ispif_state;
+	struct msm_ispif_vfe_info vfe_info;
+	struct clk *ahb_clk[ISPIF_CLK_INFO_MAX];
+	struct completion reset_complete[VFE_MAX];
+	uint32_t hw_num_isps;
+	uint32_t num_ahb_clk;
+	uint32_t clk_idx;
+	uint32_t ispif_sof_debug;
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index 625a0db..c045eda 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -2077,14 +2077,8 @@
 
 static int __init msm_actuator_init_module(void)
 {
-	int32_t rc = 0;
-
 	CDBG("Enter\n");
-	rc = platform_driver_register(&msm_actuator_platform_driver);
-	if (!rc)
-		return rc;
-
-	CDBG("%s:%d rc %d\n", __func__, __LINE__, rc);
+	platform_driver_register(&msm_actuator_platform_driver);
 	return i2c_add_driver(&msm_actuator_i2c_driver);
 }
 
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_3_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_3_hwreg.h
new file mode 100644
index 0000000..ac0a989
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_3_hwreg.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_4_3_HWREG_H
+#define MSM_CSID_3_4_3_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+static uint8_t csid_lane_assign_v3_4_3[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+static struct csid_reg_parms_t csid_v3_4_3 = {
+	/* MIPI	CSID registers */
+	0x0,
+	0x4,
+	0x8,
+	0xC,
+	0x10,
+	0x14,
+	0x18,
+	0x1C,
+	0x20,
+	0x60,
+	0x64,
+	0x68,
+	0x6C,
+	0x70,
+	0x74,
+	0x78,
+	0x7C,
+	0x80,
+	0x84,
+	0x88,
+	0x8C,
+	0x90,
+	0x94,
+	0x98,
+	0xA0,
+	0xA4,
+	0xAC,
+	0xB0,
+	0xB4,
+	11,
+	0x7FFF,
+	0x4,
+	17,
+	0x30040003,
+	0xFFFFFFFF,
+	0xFFFFFFFF,
+	0xFFFFFFFF,
+	0x7f010800,
+	20,
+	0xFFFFFFFF,
+	0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index ba32526..9d3184e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -26,6 +26,7 @@
 #include "include/msm_csid_3_5_hwreg.h"
 #include "include/msm_csid_3_4_1_hwreg.h"
 #include "include/msm_csid_3_4_2_hwreg.h"
+#include "include/msm_csid_3_4_3_hwreg.h"
 #include "include/msm_csid_3_6_0_hwreg.h"
 #include "include/msm_csid_3_5_1_hwreg.h"
 #include "cam_hw_ops.h"
@@ -42,6 +43,7 @@
 #define CSID_VERSION_V34                      0x30040000
 #define CSID_VERSION_V34_1                    0x30040001
 #define CSID_VERSION_V34_2                    0x30040002
+#define CSID_VERSION_V34_3                    0x30040003
 #define CSID_VERSION_V36                      0x30060000
 #define CSID_VERSION_V37                      0x30070000
 #define CSID_VERSION_V35                      0x30050000
@@ -229,7 +231,9 @@
 static int msm_csid_reset(struct csid_device *csid_dev)
 {
 	int32_t rc = 0;
+	uint32_t irq = 0, irq_bitshift;
 
+	irq_bitshift = csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift;
 	msm_camera_io_w(csid_dev->ctrl_reg->csid_reg.csid_rst_stb_all,
 		csid_dev->base +
 		csid_dev->ctrl_reg->csid_reg.csid_rst_cmd_addr);
@@ -238,8 +242,23 @@
 	if (rc < 0) {
 		pr_err("wait_for_completion in msm_csid_reset fail rc = %d\n",
 			rc);
+	} else if (rc == 0) {
+		irq = msm_camera_io_r(csid_dev->base +
+			csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+		pr_err_ratelimited("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
+			__func__, csid_dev->pdev->id, irq);
+		if (irq & (0x1 << irq_bitshift)) {
+			rc = 1;
+			CDBG("%s succeeded", __func__);
+		} else {
+			rc = 0;
+			pr_err("%s reset csid_irq_status failed = 0x%x\n",
+				__func__, irq);
+		}
 		if (rc == 0)
 			rc = -ETIMEDOUT;
+	} else {
+		CDBG("%s succeeded", __func__);
 	}
 	return rc;
 }
@@ -306,8 +325,7 @@
 	if (!msm_csid_find_max_clk_rate(csid_dev))
 		pr_err("msm_csid_find_max_clk_rate failed\n");
 
-	clk_rate = (csid_params->csi_clk > 0) ?
-				(csid_params->csi_clk) : csid_dev->csid_max_clk;
+	clk_rate = csid_dev->csid_max_clk;
 
 	clk_rate = msm_camera_clk_set_rate(&csid_dev->pdev->dev,
 		csid_dev->csid_clk[csid_dev->csid_clk_index], clk_rate);
@@ -1163,6 +1181,12 @@
 		new_csid_dev->ctrl_reg->csid_lane_assign =
 			csid_lane_assign_v3_4_2;
 	} else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+		"qcom,csid-v3.4.3")) {
+		new_csid_dev->ctrl_reg->csid_reg = csid_v3_4_3;
+		new_csid_dev->hw_dts_version = CSID_VERSION_V34_3;
+		new_csid_dev->ctrl_reg->csid_lane_assign =
+			csid_lane_assign_v3_4_3;
+	} else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
 		"qcom,csid-v3.6.0")) {
 		new_csid_dev->ctrl_reg->csid_reg = csid_v3_6_0;
 		new_csid_dev->hw_dts_version = CSID_VERSION_V36;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index 6fc8e1e..35d5984 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -29,7 +29,6 @@
 #include "include/msm_csiphy_5_0_hwreg.h"
 #include "include/msm_csiphy_5_0_1_hwreg.h"
 #include "include/msm_csiphy_10_0_0_hwreg.h"
-
 #include "cam_hw_ops.h"
 
 #define DBG_CSIPHY 0
@@ -218,6 +217,8 @@
 		}
 	}
 
+	csiphy_dev->snps_programmed_data_rate = csiphy_params->data_rate;
+
 	if (mode == TWO_LANE_PHY_A) {
 		msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
 			mipi_csiphy_sys_ctrl.data,
@@ -322,8 +323,11 @@
 		mode = AGGREGATE_MODE;
 		num_lanes = 4;
 		if (csiphy_dev->snps_state != NOT_CONFIGURED) {
-			pr_err("%s: invalid request\n", __func__);
-			return -EINVAL;
+			if (csiphy_dev->snps_programmed_data_rate !=
+				csiphy_params->data_rate)
+				pr_err("reconfiguring snps phy");
+			else
+				return 0;
 		}
 		csiphy_dev->snps_state = CONFIGURED_AGGREGATE_MODE;
 		clk_mux_reg &= ~0xff;
@@ -332,34 +336,38 @@
 	} else if (lane_mask == LANE_MASK_PHY_A) { /* PHY A */
 		/* 2 lane config */
 		num_lanes = 2;
+		mode = TWO_LANE_PHY_A;
 		if (csiphy_dev->snps_state == NOT_CONFIGURED) {
-			mode = TWO_LANE_PHY_A;
 			csiphy_dev->snps_state = CONFIGURED_TWO_LANE_PHY_A;
 		} else if (csiphy_dev->snps_state ==
 			CONFIGURED_TWO_LANE_PHY_B) {
 			/* 2 lane + 2 lane config */
-			mode = TWO_LANE_PHY_A;
 			csiphy_dev->snps_state = CONFIGURED_COMBO_MODE;
 		} else {
-			pr_err("%s: invalid request\n", __func__);
-			return -EINVAL;
+			if (csiphy_dev->snps_programmed_data_rate !=
+				csiphy_params->data_rate)
+				pr_err("reconfiguring snps phy");
+			else
+				return 0;
 		}
 		clk_mux_reg &= ~0xf;
 		clk_mux_reg |= (uint32_t)csiphy_params->csid_core;
 	} else if (lane_mask == LANE_MASK_PHY_B) { /* PHY B */
 		/* 2 lane config */
 		num_lanes = 2;
+		mode = TWO_LANE_PHY_B;
 		if (csiphy_dev->snps_state == NOT_CONFIGURED) {
-			mode = TWO_LANE_PHY_B;
 			csiphy_dev->snps_state = CONFIGURED_TWO_LANE_PHY_B;
 		} else if (csiphy_dev->snps_state ==
 			CONFIGURED_TWO_LANE_PHY_A) {
 			/* 2 lane + 2 lane config */
-			mode = TWO_LANE_PHY_B;
 			csiphy_dev->snps_state = CONFIGURED_COMBO_MODE;
 		} else {
-			pr_err("%s: invalid request\n", __func__);
-			return -EINVAL;
+			if (csiphy_dev->snps_programmed_data_rate !=
+				csiphy_params->data_rate)
+				pr_err("reconfiguring snps phy");
+			else
+				return 0;
 		}
 		clk_mux_reg &= ~0xf0;
 		clk_mux_reg |= csiphy_params->csid_core << 4;
@@ -1239,9 +1247,7 @@
 		return rc;
 	}
 
-	clk_rate = (csiphy_params->csiphy_clk > 0)
-			? csiphy_params->csiphy_clk :
-			csiphy_dev->csiphy_max_clk;
+	clk_rate = csiphy_dev->csiphy_max_clk;
 	clk_rate = msm_camera_clk_set_rate(&csiphy_dev->pdev->dev,
 		csiphy_dev->csiphy_clk[csiphy_dev->csiphy_clk_index],
 		clk_rate);
@@ -1660,6 +1666,7 @@
 		csiphy_dev->hw_version);
 	csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
 	csiphy_dev->snps_state = NOT_CONFIGURED;
+	csiphy_dev->snps_programmed_data_rate = 0;
 	return 0;
 
 csiphy_enable_clk_fail:
@@ -1699,10 +1706,8 @@
 
 	CDBG("%s:%d called\n", __func__, __LINE__);
 	if (csiphy_dev->csiphy_state == CSIPHY_POWER_UP) {
-		pr_err("%s: csiphy invalid state %d\n", __func__,
+		pr_err("%s: csiphy current state %d\n", __func__,
 			csiphy_dev->csiphy_state);
-		rc = -EINVAL;
-		return rc;
 	}
 
 	CDBG("%s:%d called\n", __func__, __LINE__);
@@ -1766,6 +1771,7 @@
 		csiphy_dev->hw_version);
 	csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
 	csiphy_dev->snps_state = NOT_CONFIGURED;
+	csiphy_dev->snps_programmed_data_rate = 0;
 	return 0;
 
 csiphy_enable_clk_fail:
@@ -1915,6 +1921,7 @@
 
 	csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
 	csiphy_dev->snps_state = NOT_CONFIGURED;
+	csiphy_dev->snps_programmed_data_rate = 0;
 
 	if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
 		 CAM_AHB_SUSPEND_VOTE) < 0)
@@ -2047,6 +2054,7 @@
 
 	csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
 	csiphy_dev->snps_state = NOT_CONFIGURED;
+	csiphy_dev->snps_programmed_data_rate = 0;
 
 	if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
 		 CAM_AHB_SUSPEND_VOTE) < 0)
@@ -2079,7 +2087,10 @@
 			rc = -EFAULT;
 			break;
 		}
-		csiphy_dev->csiphy_sof_debug = SOF_DEBUG_DISABLE;
+		if (csiphy_dev->csiphy_sof_debug == SOF_DEBUG_ENABLE) {
+			csiphy_dev->csiphy_sof_debug = SOF_DEBUG_DISABLE;
+			rc = msm_camera_enable_irq(csiphy_dev->irq, false);
+		}
 		rc = msm_csiphy_lane_config(csiphy_dev, &csiphy_params);
 		break;
 	case CSIPHY_RELEASE:
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index 79baf3c..41d2034 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -250,6 +250,7 @@
 	uint8_t is_snps_phy;
 	enum snps_csiphy_state snps_state;
 	uint8_t num_clk_irq_registers;
+	uint64_t snps_programmed_data_rate;
 };
 
 #define VIDIOC_MSM_CSIPHY_RELEASE \
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
index df22d84..3590e15 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
@@ -20,7 +20,6 @@
 #include "msm_sensor.h"
 
 #undef CDBG
-#define MSM_CAMERA_TZ_I2C_VERBOSE
 
 #ifdef CONFIG_MSM_SEC_CCI_DEBUG
 	#define TZ_I2C_FN_RETURN(ret, i2c_fn, ...) \
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
index c0c83e5..6f39956 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
@@ -82,9 +82,13 @@
 
 int32_t msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl)
 {
+	struct msm_camera_sensor_slave_info *slave_info = NULL;
+
 	if (!s_ctrl->pdev && !s_ctrl->sensor_i2c_client->client)
 		return 0;
 	kfree(s_ctrl->sensordata->slave_info);
+	slave_info = s_ctrl->sensordata->cam_slave_info;
+	kfree(slave_info->sensor_id_info.setting.reg_setting);
 	kfree(s_ctrl->sensordata->cam_slave_info);
 	kfree(s_ctrl->sensordata->actuator_info);
 	kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info);
@@ -261,6 +265,17 @@
 		return -EINVAL;
 	}
 
+	if (slave_info->setting && slave_info->setting->size > 0) {
+		rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+			i2c_write_table(s_ctrl->sensor_i2c_client,
+			slave_info->setting);
+		if (rc < 0)
+			pr_err("Write array failed prior to probe\n");
+
+	} else {
+		CDBG("No writes needed for this sensor before probe\n");
+	}
+
 	rc = sensor_i2c_client->i2c_func_tbl->i2c_read(
 		sensor_i2c_client, slave_info->sensor_id_reg_addr,
 		&chipid, MSM_CAMERA_I2C_WORD_DATA);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 0b0f98a..7832181 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -10,7 +10,7 @@
  * GNU General Public License for more details.
  */
 
-#define SENSOR_DRIVER_I2C "i2c_camera"
+#define SENSOR_DRIVER_I2C "camera"
 /* Header file declaration */
 #include "msm_sensor.h"
 #include "msm_sd.h"
@@ -757,6 +757,8 @@
 
 	unsigned long                        mount_pos = 0;
 	uint32_t                             is_yuv;
+	struct msm_camera_i2c_reg_array     *reg_setting = NULL;
+	struct msm_sensor_id_info_t         *id_info = NULL;
 
 	/* Validate input parameters */
 	if (!setting) {
@@ -805,7 +807,54 @@
 		slave_info->camera_id = slave_info32->camera_id;
 
 		slave_info->i2c_freq_mode = slave_info32->i2c_freq_mode;
-		slave_info->sensor_id_info = slave_info32->sensor_id_info;
+		slave_info->sensor_id_info.sensor_id_reg_addr =
+			slave_info32->sensor_id_info.sensor_id_reg_addr;
+		slave_info->sensor_id_info.sensor_id_mask =
+			slave_info32->sensor_id_info.sensor_id_mask;
+		slave_info->sensor_id_info.sensor_id =
+			slave_info32->sensor_id_info.sensor_id;
+
+		slave_info->sensor_id_info.setting.addr_type =
+			slave_info32->sensor_id_info.setting.addr_type;
+		slave_info->sensor_id_info.setting.data_type =
+			slave_info32->sensor_id_info.setting.data_type;
+		slave_info->sensor_id_info.setting.delay =
+			slave_info32->sensor_id_info.setting.delay;
+		slave_info->sensor_id_info.setting.size =
+			slave_info32->sensor_id_info.setting.size;
+
+		if (!slave_info->sensor_id_info.setting.size ||
+			(slave_info->sensor_id_info.setting.size >
+				I2C_REG_DATA_MAX)) {
+			CDBG("%s:No writes needed to probe\n", __func__);
+			slave_info->sensor_id_info.setting.reg_setting = NULL;
+		} else {
+			id_info = &(slave_info->sensor_id_info);
+			reg_setting =
+				kzalloc(id_info->setting.size *
+					(sizeof
+					(struct msm_camera_i2c_reg_array)),
+					GFP_KERNEL);
+			if (!reg_setting) {
+				rc = -ENOMEM;
+				goto free_slave_info;
+			}
+			if (copy_from_user(reg_setting,
+				(void __user *)
+				compat_ptr(slave_info32->sensor_id_info.
+				setting.reg_setting),
+				slave_info->sensor_id_info.setting.size *
+				sizeof(struct msm_camera_i2c_reg_array))) {
+				pr_err("%s:%d: sensor id info copy failed\n",
+					__func__, __LINE__);
+				kfree(reg_setting);
+				rc = -EFAULT;
+				goto free_slave_info;
+			}
+
+			slave_info->sensor_id_info.setting.reg_setting =
+				reg_setting;
+		}
 
 		slave_info->slave_addr = slave_info32->slave_addr;
 		slave_info->power_setting_array.size =
@@ -841,6 +890,37 @@
 			rc = -EFAULT;
 			goto free_slave_info;
 		}
+		if (!slave_info->sensor_id_info.setting.size ||
+			slave_info->sensor_id_info.setting.size >
+			I2C_REG_DATA_MAX) {
+			CDBG("%s:No writes needed to probe\n", __func__);
+			slave_info->sensor_id_info.setting.reg_setting = NULL;
+		} else {
+			id_info = &(slave_info->sensor_id_info);
+			reg_setting =
+				kzalloc(id_info->setting.size *
+					(sizeof
+					(struct msm_camera_i2c_reg_array)),
+					GFP_KERNEL);
+			if (!reg_setting) {
+				rc = -ENOMEM;
+				goto free_slave_info;
+			}
+			if (copy_from_user(reg_setting,
+				(void __user *)
+				slave_info->sensor_id_info.setting.reg_setting,
+				slave_info->sensor_id_info.setting.size *
+				sizeof(struct msm_camera_i2c_reg_array))) {
+				pr_err("%s:%d: sensor id info copy failed\n",
+					__func__, __LINE__);
+				kfree(reg_setting);
+				rc = -EFAULT;
+				goto free_slave_info;
+			}
+
+			slave_info->sensor_id_info.setting.reg_setting =
+				reg_setting;
+		}
 	}
 
 	if (strlen(slave_info->sensor_name) >= MAX_SENSOR_NAME ||
@@ -956,6 +1036,7 @@
 		slave_info->sensor_id_info.sensor_id_reg_addr;
 	camera_info->sensor_id = slave_info->sensor_id_info.sensor_id;
 	camera_info->sensor_id_mask = slave_info->sensor_id_info.sensor_id_mask;
+	camera_info->setting = &(slave_info->sensor_id_info.setting);
 
 	/* Fill CCI master, slave address and CCI default params */
 	if (!s_ctrl->sensor_i2c_client) {
@@ -1116,6 +1197,7 @@
 free_camera_info:
 	kfree(camera_info);
 free_slave_info:
+	kfree(slave_info->sensor_id_info.setting.reg_setting);
 	kfree(slave_info);
 	return rc;
 }
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 48ef46c..c0566a3 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -3733,6 +3733,7 @@
 		.name = SDE_ROTATOR_DRV_NAME,
 		.of_match_table = sde_rotator_dt_match,
 		.pm = &sde_rotator_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 9a7d272..e6a4ed30 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -738,7 +738,7 @@
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP)|
-			(1UL << V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP)
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
 			),
 		.qmenu = mpeg_video_vidc_extradata,
 	},
@@ -1548,10 +1548,6 @@
 	}
 	case V4L2_CID_MPEG_VIDC_IMG_GRID_DIMENSION:
 	{
-		int i = 0, j = 0;
-		u32 width = 0, height = 0;
-		u32 trows, tcols;
-
 		property_id = HAL_CONFIG_HEIC_GRID_ENABLE;
 		if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC) {
 			dprintk(VIDC_ERR, "Grid is supported only for HEVC\n");
@@ -1566,38 +1562,6 @@
 		grid_enable.grid_enable = ctrl->val;
 		inst->img_grid_dimension = ctrl->val;
 		pdata = &grid_enable;
-
-		/* Update tile info table */
-		width = inst->prop.width[OUTPUT_PORT];
-		height = inst->prop.height[OUTPUT_PORT];
-		tcols = (width + inst->img_grid_dimension - 1) /
-					inst->img_grid_dimension;
-		trows = (height + inst->img_grid_dimension - 1) /
-					inst->img_grid_dimension;
-		inst->tinfo.count = trows * tcols;
-		if (inst->tinfo.count > MAX_HEIC_TILES_COUNT) {
-			dprintk(VIDC_ERR, "Tiles count exceeds maximum\n");
-			rc = -ENOTSUPP;
-			break;
-		}
-
-		dprintk(VIDC_DBG,
-			"Grid dimension %d width %d height %d row %d col %d\n",
-			inst->img_grid_dimension, width, height,
-			trows, tcols);
-
-		for (j = 0; j < trows; ++j) {
-			for (i = 0; i < tcols; ++i) {
-				inst->tinfo.tile_rects[j*tcols+i].left =
-					(i * inst->img_grid_dimension);
-				inst->tinfo.tile_rects[j*tcols+i].top =
-					(j * inst->img_grid_dimension);
-				inst->tinfo.tile_rects[j*tcols+i].width =
-					inst->img_grid_dimension;
-				inst->tinfo.tile_rects[j*tcols+i].height =
-					inst->img_grid_dimension;
-			}
-		}
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
@@ -2275,6 +2239,7 @@
 			enable.enable = 0;
 		pdata = &enable;
 		inst->clk_data.low_latency_mode = (bool) enable.enable;
+		msm_dcvs_try_enable(inst);
 		break;
 	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 8e32504..651d0a2 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -901,6 +901,51 @@
 	return rc;
 }
 
+static int msm_vidc_create_tile_info_table(struct msm_vidc_inst *inst)
+{
+	int i = 0, j = 0;
+	u32 width = 0, height = 0;
+	u32 trows = 0, tcols = 0;
+
+	/* Don't create table for non-HEIC formats*/
+	if (inst->img_grid_dimension <= 0 ||
+		inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC)
+		return 0;
+
+	width = inst->prop.width[OUTPUT_PORT];
+	height = inst->prop.height[OUTPUT_PORT];
+	tcols = (width + inst->img_grid_dimension - 1) /
+		inst->img_grid_dimension;
+	trows = (height + inst->img_grid_dimension - 1) /
+		inst->img_grid_dimension;
+	inst->tinfo.count = trows * tcols;
+	if (inst->tinfo.count > MAX_HEIC_TILES_COUNT) {
+		dprintk(VIDC_ERR,
+			"Tiles count (%d) exceeds maximum\n",
+			inst->tinfo.count);
+		return -ENOTSUPP;
+	}
+
+	dprintk(VIDC_DBG,
+		"Grid dimension %d width %d height %d row %d col %d\n",
+		inst->img_grid_dimension, width, height,
+		trows, tcols);
+
+	for (j = 0; j < trows; ++j) {
+		for (i = 0; i < tcols; ++i) {
+			inst->tinfo.tile_rects[j*tcols+i].left =
+				(i * inst->img_grid_dimension);
+			inst->tinfo.tile_rects[j*tcols+i].top =
+				(j * inst->img_grid_dimension);
+			inst->tinfo.tile_rects[j*tcols+i].width =
+				inst->img_grid_dimension;
+			inst->tinfo.tile_rects[j*tcols+i].height =
+				inst->img_grid_dimension;
+		}
+	}
+	return 0;
+}
+
 static inline int start_streaming(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -909,6 +954,13 @@
 
 	hdev = inst->core->device;
 
+	/* Create tile info table */
+	rc = msm_vidc_create_tile_info_table(inst);
+	if (rc) {
+		dprintk(VIDC_ERR, "Tile info table was not generated\n");
+		goto fail_start;
+	}
+
 	/* Check if current session is under HW capability */
 	rc = msm_vidc_check_session_supported(inst);
 	if (rc) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 4a0aa58..a665978 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -114,14 +114,18 @@
 {
 	struct recon_buf *binfo, *nextb;
 	struct vidc_input_cr_data *temp, *next;
-	u32 min_cf = 0, max_cf = 0;
-	u32 min_input_cr = 0, max_input_cr = 0, min_cr = 0, max_cr = 0;
+	u32 max_cr = 0, max_cf = 0, max_input_cr = 0;
+	u32 min_cr = MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO;
+	u32 min_input_cr = MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO;
+	u32 min_cf = MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR;
 
 	mutex_lock(&inst->reconbufs.lock);
 	list_for_each_entry_safe(binfo, nextb, &inst->reconbufs.list, list) {
-		min_cr = min(min_cr, binfo->CR);
+		if (binfo->CR)
+			min_cr = min(min_cr, binfo->CR);
+		if (binfo->CF)
+			min_cf = min(min_cf, binfo->CF);
 		max_cr = max(max_cr, binfo->CR);
-		min_cf = min(min_cf, binfo->CF);
 		max_cf = max(max_cf, binfo->CF);
 	}
 	mutex_unlock(&inst->reconbufs.lock);
@@ -605,6 +609,7 @@
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	u64 rate = 0;
 	struct clock_data *dcvs = NULL;
+	u32 operating_rate, vsp_factor_num = 10, vsp_factor_den = 7;
 
 	core = inst->core;
 	dcvs = &inst->clk_data;
@@ -627,8 +632,19 @@
 
 		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
 
-		/* 10 / 7 is overhead factor */
-		vsp_cycles += (inst->clk_data.bitrate * 10) / 7;
+		operating_rate = inst->clk_data.operating_rate >> 16;
+		if (operating_rate > inst->prop.fps && inst->prop.fps) {
+			vsp_factor_num *= operating_rate;
+			vsp_factor_den *= inst->prop.fps;
+		}
+		//adjust factor for 2 core case, due to workload is not
+		//equally distributed on 2 cores, use 0.65 instead of 0.5
+		if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+			vsp_factor_num = vsp_factor_num * 13 / 10;
+			vsp_factor_den *= 2;
+		}
+		vsp_cycles += ((u64)inst->clk_data.bitrate * vsp_factor_num) /
+				vsp_factor_den;
 	} else if (inst->session_type == MSM_VIDC_DECODER) {
 		vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles;
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index b392610..d77ea21 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -2035,6 +2035,7 @@
 	struct msm_vidc_cb_cmd_done *response = data;
 	struct msm_vidc_inst *inst;
 	struct v4l2_event flush_event = {0};
+	struct recon_buf *binfo;
 	u32 *ptr = NULL;
 	enum hal_flush flush_type;
 	int rc;
@@ -2089,6 +2090,13 @@
 		goto exit;
 	}
 
+	mutex_lock(&inst->reconbufs.lock);
+	list_for_each_entry(binfo, &inst->reconbufs.list, list) {
+		binfo->CR = 0;
+		binfo->CF = 0;
+	}
+	mutex_unlock(&inst->reconbufs.lock);
+
 	dprintk(VIDC_DBG,
 		"Notify flush complete, flush_type: %x\n", flush_type);
 	v4l2_event_queue_fh(&inst->event_handler, &flush_event);
@@ -2353,6 +2361,17 @@
 	return 0;
 }
 
+static bool is_heic_encode_session(struct msm_vidc_inst *inst)
+{
+	if (inst->session_type == MSM_VIDC_ENCODER &&
+		(get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
+		HAL_VIDEO_CODEC_HEVC) &&
+		(inst->img_grid_dimension > 0))
+		return true;
+	else
+		return false;
+}
+
 static bool is_eos_buffer(struct msm_vidc_inst *inst, u32 device_addr)
 {
 	struct eos_buf *temp, *next;
@@ -2398,9 +2417,7 @@
 	}
 
 	empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
-	if ((get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
-		HAL_VIDEO_CODEC_HEVC) &&
-		(inst->img_grid_dimension > 0) &&
+	if (is_heic_encode_session(inst) &&
 		(empty_buf_done->input_tag < inst->tinfo.count - 1)) {
 		dprintk(VIDC_DBG, "Wait for last tile. Current tile no: %d\n",
 		empty_buf_done->input_tag);
@@ -3145,10 +3162,11 @@
 static void msm_vidc_print_running_insts(struct msm_vidc_core *core)
 {
 	struct msm_vidc_inst *temp;
+	int op_rate = 0;
 
 	dprintk(VIDC_ERR, "Running instances:\n");
-	dprintk(VIDC_ERR, "%4s|%4s|%4s|%4s|%4s\n",
-			"type", "w", "h", "fps", "prop");
+	dprintk(VIDC_ERR, "%4s|%4s|%4s|%4s|%6s|%4s\n",
+			"type", "w", "h", "fps", "opr", "prop");
 
 	mutex_lock(&core->lock);
 	list_for_each_entry(temp, &core->instances, list) {
@@ -3162,13 +3180,21 @@
 			if (msm_comm_turbo_session(temp))
 				strlcat(properties, "T", sizeof(properties));
 
-			dprintk(VIDC_ERR, "%4d|%4d|%4d|%4d|%4s\n",
+			if (is_realtime_session(temp))
+				strlcat(properties, "R", sizeof(properties));
+
+			if (temp->clk_data.operating_rate)
+				op_rate = temp->clk_data.operating_rate >> 16;
+			else
+				op_rate = temp->prop.fps;
+
+			dprintk(VIDC_ERR, "%4d|%4d|%4d|%4d|%6d|%4s\n",
 					temp->session_type,
 					max(temp->prop.width[CAPTURE_PORT],
 						temp->prop.width[OUTPUT_PORT]),
 					max(temp->prop.height[CAPTURE_PORT],
 						temp->prop.height[OUTPUT_PORT]),
-					temp->prop.fps, properties);
+					temp->prop.fps, op_rate, properties);
 		}
 	}
 	mutex_unlock(&core->lock);
@@ -4416,9 +4442,7 @@
 		for (c = 0; c < etbs.count; ++c) {
 			struct vidc_frame_data *frame_data = &etbs.data[c];
 
-			if (get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
-				HAL_VIDEO_CODEC_HEVC &&
-				(inst->img_grid_dimension > 0)) {
+			if (is_heic_encode_session(inst)) {
 				rc = msm_comm_qbuf_heic_tiles(inst, frame_data);
 				if (rc) {
 					dprintk(VIDC_ERR,
@@ -5496,6 +5520,11 @@
 	u32 input_height, input_width, output_height, output_width;
 	u32 rotation;
 
+	if (is_heic_encode_session(inst)) {
+		dprintk(VIDC_DBG, "Skip downscale check for HEIC\n");
+		return 0;
+	}
+
 	input_height = inst->prop.height[OUTPUT_PORT];
 	input_width = inst->prop.width[OUTPUT_PORT];
 	output_height = inst->prop.height[CAPTURE_PORT];
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 530fe3a..1dc6723 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -25,6 +25,7 @@
 enum clock_properties {
 	CLOCK_PROP_HAS_SCALING = 1 << 0,
 	CLOCK_PROP_HAS_MEM_RETENTION    = 1 << 1,
+	CLOCK_PROP_DISABLE_MEMCORE_ONLY = 1 << 2,
 };
 
 #define PERF_GOV "performance"
@@ -666,6 +667,11 @@
 		else
 			vc->has_mem_retention = false;
 
+		if (clock_props[c] & CLOCK_PROP_DISABLE_MEMCORE_ONLY)
+			vc->disable_memcore_only = true;
+		else
+			vc->disable_memcore_only = false;
+
 		dprintk(VIDC_DBG, "Found clock %s: scale-able = %s\n", vc->name,
 			vc->count ? "yes" : "no");
 	}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 23e33fe..06d8fa5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -90,6 +90,7 @@
 	u32 count;
 	bool has_scaling;
 	bool has_mem_retention;
+	bool disable_memcore_only;
 };
 
 struct clock_set {
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 7e7ed47..79ed798 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,9 @@
 /* Poll interval in uS */
 #define POLL_INTERVAL_US 50
 
+#define VENUS_AXI_HALT_ACK_TIMEOUT_US		500000
+
+
 enum tzbsp_video_state {
 	TZBSP_VIDEO_STATE_SUSPEND = 0,
 	TZBSP_VIDEO_STATE_RESUME = 1,
@@ -1094,6 +1097,8 @@
 	int rc = 0;
 
 	venus_hfi_for_each_clock(device, cl) {
+		if (cl->disable_memcore_only)
+			continue;
 		if (cl->has_scaling) {/* has_scaling */
 			device->clk_freq = freq;
 			rc = clk_set_rate(cl->clk, freq);
@@ -2850,12 +2855,52 @@
 	mutex_unlock(&device->lock);
 }
 
+static int __halt_axi(struct venus_hfi_device *device)
+{
+	u32 reg;
+	int rc = 0;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "Invalid input\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Driver needs to make sure that clocks are enabled to read Venus AXI
+	 * registers. If not skip AXI HALT.
+	 */
+	if (!device->power_enabled) {
+		dprintk(VIDC_WARN,
+			"Clocks are OFF, skipping AXI HALT\n");
+		return -EINVAL;
+	}
+
+	/* Halt AXI and AXI IMEM VBIF Access */
+	reg = __read_register(device, VENUS_WRAPPER_AXI_HALT);
+	reg |= BRIC_AXI_HALT;
+	__write_register(device, VENUS_WRAPPER_AXI_HALT, reg);
+
+	/* Request for AXI bus port halt */
+	rc = readl_poll_timeout(device->hal_data->register_base
+			+ VENUS_WRAPPER_AXI_HALT_STATUS,
+			reg, reg & BRIC_AXI_HALT_ACK,
+			POLL_INTERVAL_US,
+			VENUS_AXI_HALT_ACK_TIMEOUT_US);
+	if (rc)
+		dprintk(VIDC_WARN, "AXI bus port halt timeout\n");
+
+	return rc;
+}
+
 static void __process_sys_error(struct venus_hfi_device *device)
 {
 	struct hfi_sfr_struct *vsfr = NULL;
 
 	__set_state(device, VENUS_STATE_DEINIT);
 
+	if (__halt_axi(device))
+		dprintk(VIDC_WARN, "Failed to halt AXI after SYS_ERROR\n");
+
 	vsfr = (struct hfi_sfr_struct *)device->sfr.align_virtual_addr;
 	if (vsfr) {
 		void *p = memchr(vsfr->rg_data, '\0', vsfr->bufSize);
@@ -3319,6 +3364,8 @@
 	}
 
 	venus_hfi_for_each_clock_reverse(device, cl) {
+		if (cl->disable_memcore_only)
+			continue;
 		dprintk(VIDC_DBG, "Clock: %s disable and unprepare\n",
 				cl->name);
 		rc = clk_set_flags(cl->clk, CLKFLAG_NORETAIN_PERIPH);
@@ -3348,6 +3395,11 @@
 	}
 
 	venus_hfi_for_each_clock(device, cl) {
+		/* MEM CORE is ON by default. Unset it for unused clocks*/
+		if (cl->disable_memcore_only) {
+			clk_set_flags(cl->clk, CLKFLAG_NORETAIN_MEM);
+			continue;
+		}
 		/*
 		 * For the clocks we control, set the rate prior to preparing
 		 * them.  Since we don't really have a load at this point, scale
@@ -3385,6 +3437,8 @@
 
 fail_clk_enable:
 	venus_hfi_for_each_clock_reverse_continue(device, cl, c) {
+		if (cl->disable_memcore_only)
+			continue;
 		dprintk(VIDC_ERR, "Clock: %s disable and unprepare\n",
 			cl->name);
 		clk_disable_unprepare(cl->clk);
@@ -3478,7 +3532,6 @@
 		devfreq_suspend_device(bus->devfreq);
 	}
 
-	device->bus_vote = DEFAULT_BUS_VOTE;
 	return 0;
 
 err_add_dev:
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index f4ea86b..dd94a88 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -492,6 +492,7 @@
 
 struct hfi_heic_frame_quality {
 	u32 frame_quality;
+	u32 reserved[3];
 };
 
 struct hfi_heic_grid_enable {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_io.h b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
index 3433483..3ddd633 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_io.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -148,6 +148,12 @@
 #define VENUS_VBIF_AXI_HALT_CTRL1_HALT_ACK		BIT(0)
 #define VENUS_VBIF_AXI_HALT_ACK_TIMEOUT_US		500000
 
+#define VENUS_WRAPPER_AXI_HALT 0x000E2008
+#define VENUS_WRAPPER_AXI_HALT_STATUS 0x000E200C
+
+#define BRIC_AXI_HALT BIT(16)
+#define BRIC_AXI_HALT_ACK BIT(16)
+
 #define VIDC_VENUS0_WRAPPER_VBIF_REQ_PRIORITY \
 	(VIDC_WRAPPER_BASE_OFFS + 0x20)
 #define VIDC_VENUS0_WRAPPER_VBIF_PRIORITY_LEVEL \
diff --git a/drivers/media/platform/msm/vidc_3x/hfi_packetization.c b/drivers/media/platform/msm/vidc_3x/hfi_packetization.c
index b15baaa..1de5bd1 100644
--- a/drivers/media/platform/msm/vidc_3x/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc_3x/hfi_packetization.c
@@ -658,9 +658,12 @@
 	case HAL_EXTRADATA_STREAM_USERDATA:
 		ret = HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA;
 		break;
-	case HAL_EXTRADATA_FRAME_QP:
+	case HAL_EXTRADATA_DEC_FRAME_QP:
 		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA;
 		break;
+	case HAL_EXTRADATA_ENC_FRAME_QP:
+		ret = HFI_PROPERTY_PARAM_VENC_FRAME_QP_EXTRADATA;
+		break;
 	case HAL_EXTRADATA_FRAME_BITS_INFO:
 		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA;
 		break;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_venc.c b/drivers/media/platform/msm/vidc_3x/msm_venc.c
index d129dc2..ef6e360 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_venc.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_venc.c
@@ -823,7 +823,7 @@
 		.name = "Extradata Type",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
-		.maximum = V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO,
+		.maximum = V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP,
 		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
 		.menu_skip_mask = ~(
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -846,7 +846,8 @@
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO) |
+			(1ULL << V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP)
 			),
 		.qmenu = mpeg_video_vidc_extradata,
 	},
@@ -1564,6 +1565,7 @@
 		case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
 		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
 		case V4L2_MPEG_VIDC_EXTRADATA_LTR:
+		case V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP:
 		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
 			inst->fmts[CAPTURE_PORT].num_planes = 2;
 		default:
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
index bd58117..502a5c7 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
@@ -77,9 +77,11 @@
 	"Extradata output crop",
 	"Extradata display colour SEI",
 	"Extradata light level SEI",
+	"Extradata PQ Info",
 	"Extradata display VUI",
 	"Extradata vpx color space",
-	"Extradata PQ Info",
+	"Extradata UBWC CR stats info",
+	"Extradata enc frame QP"
 };
 
 struct getprop_buf {
@@ -4727,7 +4729,10 @@
 		ret = HAL_EXTRADATA_STREAM_USERDATA;
 		break;
 	case V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP:
-		ret = HAL_EXTRADATA_FRAME_QP;
+		ret = HAL_EXTRADATA_DEC_FRAME_QP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP:
+		ret = HAL_EXTRADATA_ENC_FRAME_QP;
 		break;
 	case V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO:
 		ret = HAL_EXTRADATA_FRAME_BITS_INFO;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
index 93368f6..c7eb5f1 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
@@ -316,7 +316,7 @@
 	s32 maximum;
 	s32 default_value;
 	u32 step;
-	u32 menu_skip_mask;
+	u64 menu_skip_mask;
 	u32 flags;
 	const char * const *qmenu;
 };
diff --git a/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h b/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
index 1a25a58..875db09 100644
--- a/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
@@ -112,7 +112,8 @@
 	HAL_EXTRADATA_ASPECT_RATIO,
 	HAL_EXTRADATA_MPEG2_SEQDISP,
 	HAL_EXTRADATA_STREAM_USERDATA,
-	HAL_EXTRADATA_FRAME_QP,
+	HAL_EXTRADATA_DEC_FRAME_QP,
+	HAL_EXTRADATA_ENC_FRAME_QP,
 	HAL_EXTRADATA_FRAME_BITS_INFO,
 	HAL_EXTRADATA_INPUT_CROP,
 	HAL_EXTRADATA_DIGITAL_ZOOM,
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index c12209c..390d708 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2169,6 +2169,12 @@
 	pxa_dma_stop_channels(pcdev);
 
 	pxa_camera_destroy_formats(pcdev);
+
+	if (pcdev->mclk_clk) {
+		v4l2_clk_unregister(pcdev->mclk_clk);
+		pcdev->mclk_clk = NULL;
+	}
+
 	video_unregister_device(&pcdev->vdev);
 	pcdev->sensor = NULL;
 
@@ -2495,7 +2501,13 @@
 	dma_release_channel(pcdev->dma_chans[1]);
 	dma_release_channel(pcdev->dma_chans[2]);
 
-	v4l2_clk_unregister(pcdev->mclk_clk);
+	v4l2_async_notifier_unregister(&pcdev->notifier);
+
+	if (pcdev->mclk_clk) {
+		v4l2_clk_unregister(pcdev->mclk_clk);
+		pcdev->mclk_clk = NULL;
+	}
+
 	v4l2_device_unregister(&pcdev->v4l2_dev);
 
 	dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 0413a86..5c9db09 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -1256,16 +1256,17 @@
 {
 	const struct s3c_camif_variant *variant = camif->variant;
 	const struct vp_pix_limits *pix_lim;
-	int i = ARRAY_SIZE(camif_mbus_formats);
+	unsigned int i;
 
 	/* FIXME: constraints against codec or preview path ? */
 	pix_lim = &variant->vp_pix_limits[VP_CODEC];
 
-	while (i-- >= 0)
+	for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++)
 		if (camif_mbus_formats[i] == mf->code)
 			break;
 
-	mf->code = camif_mbus_formats[i];
+	if (i == ARRAY_SIZE(camif_mbus_formats))
+		mf->code = camif_mbus_formats[0];
 
 	if (pad == CAMIF_SD_PAD_SINK) {
 		v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index aceb38d..b1c3725 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -1167,6 +1167,7 @@
 		v4l2_ctrl_activate(dev->radio_rx_rds_ta, dev->radio_rx_rds_controls);
 		v4l2_ctrl_activate(dev->radio_rx_rds_tp, dev->radio_rx_rds_controls);
 		v4l2_ctrl_activate(dev->radio_rx_rds_ms, dev->radio_rx_rds_controls);
+		dev->radio_rx_dev.device_caps = dev->radio_rx_caps;
 		break;
 	case V4L2_CID_RDS_RECEPTION:
 		dev->radio_rx_rds_enabled = ctrl->val;
@@ -1241,6 +1242,7 @@
 		dev->radio_tx_caps &= ~V4L2_CAP_READWRITE;
 		if (!dev->radio_tx_rds_controls)
 			dev->radio_tx_caps |= V4L2_CAP_READWRITE;
+		dev->radio_tx_dev.device_caps = dev->radio_tx_caps;
 		break;
 	case V4L2_CID_RDS_TX_PTY:
 		if (dev->radio_rx_rds_controls)
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index db525cd..d9f88a4 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1381,8 +1381,13 @@
 		goto rc_dev_fail;
 
 	/* wire up inbound data handler */
-	usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
-				mceusb_dev_recv, ir, ep_in->bInterval);
+	if (usb_endpoint_xfer_int(ep_in))
+		usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+				 mceusb_dev_recv, ir, ep_in->bInterval);
+	else
+		usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+				  mceusb_dev_recv, ir);
+
 	ir->urb_in->transfer_dma = ir->dma_in;
 	ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index d148463..6bf48a7 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -189,7 +189,7 @@
    USB 2.0 spec says bulk packet size is always 512 bytes
  */
 #define EM28XX_BULK_PACKET_MULTIPLIER 384
-#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384
+#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 94
 
 #define EM28XX_INTERLACED_DEFAULT 1
 
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
index e546b01..2dcc8d0 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/media/usb/stkwebcam/stk-sensor.c
@@ -228,7 +228,7 @@
 static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
 {
 	int i = 0;
-	int tmpval = 0;
+	u8 tmpval = 0;
 
 	if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
 		return 1;
@@ -253,7 +253,7 @@
 static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
 {
 	int i = 0;
-	int tmpval = 0;
+	u8 tmpval = 0;
 
 	if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
 		return 1;
@@ -274,7 +274,7 @@
 	if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
 		return 1;
 
-	*val = (u8) tmpval;
+	*val = tmpval;
 	return 0;
 }
 
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 22a9aae..1c48f2f 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -144,7 +144,7 @@
 		return 0;
 }
 
-int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
+int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
 {
 	struct usb_device *udev = dev->udev;
 	unsigned char *buf;
@@ -163,7 +163,7 @@
 			sizeof(u8),
 			500);
 	if (ret >= 0)
-		memcpy(value, buf, sizeof(u8));
+		*value = *buf;
 
 	kfree(buf);
 	return ret;
@@ -171,9 +171,10 @@
 
 static int stk_start_stream(struct stk_camera *dev)
 {
-	int value;
+	u8 value;
 	int i, ret;
-	int value_116, value_117;
+	u8 value_116, value_117;
+
 
 	if (!is_present(dev))
 		return -ENODEV;
@@ -213,7 +214,7 @@
 
 static int stk_stop_stream(struct stk_camera *dev)
 {
-	int value;
+	u8 value;
 	int i;
 	if (is_present(dev)) {
 		stk_camera_read_reg(dev, 0x0100, &value);
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
index 9bbfa3d..92bb48e 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/media/usb/stkwebcam/stk-webcam.h
@@ -129,7 +129,7 @@
 #define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
 
 int stk_camera_write_reg(struct stk_camera *, u16, u8);
-int stk_camera_read_reg(struct stk_camera *, u16, int *);
+int stk_camera_read_reg(struct stk_camera *, u16, u8 *);
 
 int stk_sensor_init(struct stk_camera *);
 int stk_sensor_configure(struct stk_camera *);
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index 0324633..e56a49a 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -109,6 +109,8 @@
 	return 0;
 
 usbtv_audio_fail:
+	/* we must not free at this point */
+	usb_get_dev(usbtv->udev);
 	usbtv_video_free(usbtv);
 
 usbtv_video_fail:
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 9eaab98..d4e93f1 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -101,7 +101,7 @@
 static int put_v4l2_window32(struct v4l2_window __user *kp,
 			     struct v4l2_window32 __user *up)
 {
-	struct v4l2_clip __user *kclips = kp->clips;
+	struct v4l2_clip __user *kclips;
 	struct v4l2_clip32 __user *uclips;
 	compat_caddr_t p;
 	u32 clipcount;
@@ -116,6 +116,8 @@
 	if (!clipcount)
 		return 0;
 
+	if (get_user(kclips, &kp->clips))
+		return -EFAULT;
 	if (get_user(p, &up->clips))
 		return -EFAULT;
 	uclips = compat_ptr(p);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 9ccf7f5..4299ce0 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -334,6 +334,10 @@
 	struct vb2_buffer *vb;
 	int ret;
 
+	/* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
+	num_buffers = min_t(unsigned int, num_buffers,
+			    VB2_MAX_FRAME - q->num_buffers);
+
 	for (buffer = 0; buffer < num_buffers; ++buffer) {
 		/* Allocate videobuf buffer structures */
 		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index ab3227b..760cbf2 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -104,7 +104,7 @@
 			if (nums[i-1] + 1 != nums[i])
 				goto fail_map;
 		buf->vaddr = (__force void *)
-				ioremap_nocache(nums[0] << PAGE_SHIFT, size);
+			ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
 	} else {
 		buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
 					PAGE_KERNEL);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 02b5f69..14cf6df 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2698,6 +2698,8 @@
 				__FILE__, __LINE__, iocnum);
 		return -ENODEV;
 	}
+	if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
+		return -EINVAL;
 	dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
 	    ioc->name));
 
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ee1667..00dff9b 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@
 	.cmd_per_lun			= 7,
 	.use_clustering			= ENABLE_CLUSTERING,
 	.shost_attrs			= mptscsih_host_attrs,
+	.no_write_same			= 1,
 };
 
 static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e3f4c39..a233173 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -825,4 +825,5 @@
 source "drivers/misc/genwqe/Kconfig"
 source "drivers/misc/echo/Kconfig"
 source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/fpr_FingerprintCard/Kconfig"
 endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f1c9467..8e5d0f6 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -57,6 +57,7 @@
 obj-$(CONFIG_CXL_BASE)		+= cxl/
 obj-$(CONFIG_PANEL)             += panel.o
 obj-$(CONFIG_QPNP_MISC) 	+= qpnp-misc.o
+obj-$(CONFIG_FPR_FPC) 	+= fpr_FingerprintCard/
 obj-$(CONFIG_MEMORY_STATE_TIME)	+= memory_state_time.o
 
 obj-$(CONFIG_UID_SYS_STATS)	+= uid_sys_stats.o
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index c63d61e..381a9a1 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -401,8 +401,10 @@
 	if (down_interruptible(&sem) != 0)
 		return -EPERM;
 
-	if (!(adapter = get_cxl_adapter(adapter_num)))
-		return -ENODEV;
+	if (!(adapter = get_cxl_adapter(adapter_num))) {
+		rc = -ENODEV;
+		goto err_unlock;
+	}
 
 	file->private_data = adapter;
 	continue_token = 0;
@@ -446,6 +448,8 @@
 		free_page((unsigned long) le);
 err:
 	put_device(&adapter->dev);
+err_unlock:
+	up(&sem);
 
 	return rc;
 }
diff --git a/drivers/misc/fpr_FingerprintCard/Kconfig b/drivers/misc/fpr_FingerprintCard/Kconfig
new file mode 100644
index 0000000..c9599e6
--- /dev/null
+++ b/drivers/misc/fpr_FingerprintCard/Kconfig
@@ -0,0 +1,10 @@
+#
+# FingerprintCard fingerprint driver
+#
+menu "FingerprintCard fingerprint driver"
+config FPR_FPC
+       default n
+       tristate "FPC_BTP fingerprint sensor support"
+       depends on SPI_MASTER
+
+endmenu
diff --git a/drivers/misc/fpr_FingerprintCard/Makefile b/drivers/misc/fpr_FingerprintCard/Makefile
new file mode 100644
index 0000000..96681eb
--- /dev/null
+++ b/drivers/misc/fpr_FingerprintCard/Makefile
@@ -0,0 +1,5 @@
+# Makefile for FingerprintCard fingerprint driver
+
+fpc1020-objs := fpc1020_platform_tee.o
+obj-$(CONFIG_FPR_FPC) += fpc1020.o
+
diff --git a/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c b/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c
new file mode 100644
index 0000000..a6ed374
--- /dev/null
+++ b/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c
@@ -0,0 +1,683 @@
+/*
+ * FPC1020 Fingerprint sensor device driver
+ *
+ * This driver will control the platform resources that the FPC fingerprint
+ * sensor needs to operate. The major things are probing the sensor to check
+ * that it is actually connected and let the Kernel know this and with that also
+ * enabling and disabling of regulators, controlling GPIOs such as sensor reset
+ * line, sensor IRQ line.
+ *
+ * The driver will expose most of its available functionality in sysfs which
+ * enables dynamic control of these features from eg. a user space process.
+ *
+ * The sensor's IRQ events will be pushed to Kernel's event handling system and
+ * are exposed in the drivers event node.
+ *
+ * This driver will NOT send any commands to the sensor it only controls the
+ * electrical parts.
+ *
+ *
+ * Copyright (c) 2015 Fingerprint Cards AB <tech@fingerprints.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License Version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
+
+
+#define FPC_TTW_HOLD_TIME		1000
+#define RESET_LOW_SLEEP_MIN_US		5000
+#define RESET_LOW_SLEEP_MAX_US		(RESET_LOW_SLEEP_MIN_US + 100)
+#define RESET_HIGH_SLEEP1_MIN_US	100
+#define RESET_HIGH_SLEEP1_MAX_US	(RESET_HIGH_SLEEP1_MIN_US + 100)
+#define RESET_HIGH_SLEEP2_MIN_US	5000
+#define RESET_HIGH_SLEEP2_MAX_US	(RESET_HIGH_SLEEP2_MIN_US + 100)
+#define PWR_ON_SLEEP_MIN_US		100
+#define PWR_ON_SLEEP_MAX_US		(PWR_ON_SLEEP_MIN_US + 900)
+#define NUM_PARAMS_REG_ENABLE_SET	2
+
+#define RELEASE_WAKELOCK_W_V		"release_wakelock_with_verification"
+#define RELEASE_WAKELOCK		"release_wakelock"
+#define START_IRQS_RECEIVED_CNT		"start_irqs_received_counter"
+
+static const char * const pctl_names[] = {
+	"fpc1020_reset_reset",
+	"fpc1020_reset_active",
+	"fpc1020_irq_active",
+};
+
+struct vreg_config {
+	char *name;
+	unsigned long vmin;
+	unsigned long vmax;
+	int ua_load;
+};
+
+static const struct vreg_config vreg_conf[] = {
+	{ "vdd_ana", 1800000UL, 1800000UL, 6000, },
+	{ "vcc_spi", 1800000UL, 1800000UL, 10, },
+	{ "vdd_io", 1800000UL, 1800000UL, 6000, },
+};
+
+struct fpc1020_data {
+	struct device *dev;
+	struct pinctrl *fingerprint_pinctrl;
+	struct pinctrl_state *pinctrl_state[ARRAY_SIZE(pctl_names)];
+	struct regulator *vreg[ARRAY_SIZE(vreg_conf)];
+	struct wakeup_source ttw_wl;
+	struct mutex lock; /* To set/get exported values in sysfs */
+	int irq_gpio;
+	int rst_gpio;
+	int nbr_irqs_received;
+	int nbr_irqs_received_counter_start;
+	bool prepared;
+	atomic_t wakeup_enabled; /* Used both in ISR and non-ISR */
+};
+
+static int vreg_setup(struct fpc1020_data *fpc1020, const char *name,
+	bool enable)
+{
+	size_t i;
+	int rc;
+	struct regulator *vreg;
+	struct device *dev = fpc1020->dev;
+
+	for (i = 0; i < ARRAY_SIZE(vreg_conf); i++) {
+		const char *n = vreg_conf[i].name;
+
+		if (!memcmp(n, name, strlen(n)))
+			goto found;
+	}
+
+	dev_err(dev, "Regulator %s not found\n", name);
+
+	return -EINVAL;
+
+found:
+	vreg = fpc1020->vreg[i];
+	if (enable) {
+		if (!vreg) {
+			vreg = devm_regulator_get(dev, name);
+			if (IS_ERR_OR_NULL(vreg)) {
+				dev_err(dev, "Unable to get %s\n", name);
+				return PTR_ERR(vreg);
+			}
+		}
+
+		if (regulator_count_voltages(vreg) > 0) {
+			rc = regulator_set_voltage(vreg, vreg_conf[i].vmin,
+					vreg_conf[i].vmax);
+			if (rc)
+				dev_err(dev,
+					"Unable to set voltage on %s, %d\n",
+					name, rc);
+		}
+
+		rc = regulator_set_load(vreg, vreg_conf[i].ua_load);
+		if (rc < 0)
+			dev_err(dev, "Unable to set current on %s, %d\n",
+					name, rc);
+
+		rc = regulator_enable(vreg);
+		if (rc) {
+			dev_err(dev, "error enabling %s: %d\n", name, rc);
+			vreg = NULL;
+		}
+		fpc1020->vreg[i] = vreg;
+	} else {
+		if (vreg) {
+			if (regulator_is_enabled(vreg)) {
+				regulator_disable(vreg);
+				dev_dbg(dev, "disabled %s\n", name);
+			}
+			fpc1020->vreg[i] = NULL;
+		}
+		rc = 0;
+	}
+
+	return rc;
+}
+
+/*
+ * sysfs node for controlling clocks.
+ *
+ * This is disabled in platform variant of this driver but kept for
+ * backwards compatibility. Only prints a debug print that it is
+ * disabled.
+ */
+static ssize_t clk_enable_set(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	dev_dbg(dev,
+		"clk_enable sysfs node not enabled in platform driver\n");
+
+	return count;
+}
+static DEVICE_ATTR(clk_enable, 0200, NULL, clk_enable_set);
+
+/*
+ * Will try to select the set of pins (GPIOS) defined in a pin control node of
+ * the device tree named @p name.
+ *
+ * The node can contain several eg. GPIOs that is controlled when selecting it.
+ * The node may activate or deactivate the pins it contains, the action is
+ * defined in the device tree node itself and not here. The states used
+ * internally is fetched at probe time.
+ *
+ * @see pctl_names
+ * @see fpc1020_probe
+ */
+static int select_pin_ctl(struct fpc1020_data *fpc1020, const char *name)
+{
+	size_t i;
+	int rc;
+	struct device *dev = fpc1020->dev;
+
+	for (i = 0; i < ARRAY_SIZE(pctl_names); i++) {
+		const char *n = pctl_names[i];
+
+		if (!memcmp(n, name, strlen(n))) {
+			rc = pinctrl_select_state(fpc1020->fingerprint_pinctrl,
+					fpc1020->pinctrl_state[i]);
+			if (rc)
+				dev_err(dev, "cannot select '%s'\n", name);
+			else
+				dev_dbg(dev, "Selected '%s'\n", name);
+			goto exit;
+		}
+	}
+
+	rc = -EINVAL;
+	dev_err(dev, "%s:'%s' not found\n", __func__, name);
+
+exit:
+	return rc;
+}
+
+static ssize_t pinctl_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+	int rc;
+
+	mutex_lock(&fpc1020->lock);
+	rc = select_pin_ctl(fpc1020, buf);
+	mutex_unlock(&fpc1020->lock);
+
+	return rc ? rc : count;
+}
+static DEVICE_ATTR(pinctl_set, 0200, NULL, pinctl_set);
+
+static ssize_t regulator_enable_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+	char op;
+	char name[16];
+	int rc;
+	bool enable;
+
+	if (sscanf(buf, "%15[^,],%c", name, &op) != NUM_PARAMS_REG_ENABLE_SET)
+		return -EINVAL;
+	if (op == 'e')
+		enable = true;
+	else if (op == 'd')
+		enable = false;
+	else
+		return -EINVAL;
+
+	mutex_lock(&fpc1020->lock);
+	rc = vreg_setup(fpc1020, name, enable);
+	mutex_unlock(&fpc1020->lock);
+
+	return rc ? rc : count;
+}
+static DEVICE_ATTR(regulator_enable, 0200, NULL, regulator_enable_set);
+
+static int hw_reset(struct fpc1020_data *fpc1020)
+{
+	int irq_gpio;
+	int rc;
+
+	irq_gpio = gpio_get_value(fpc1020->irq_gpio);
+
+	rc = select_pin_ctl(fpc1020, "fpc1020_reset_active");
+
+	if (rc)
+		goto exit;
+
+	usleep_range(RESET_HIGH_SLEEP1_MIN_US, RESET_HIGH_SLEEP1_MAX_US);
+
+	rc = select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+
+	if (rc)
+		goto exit;
+	usleep_range(RESET_LOW_SLEEP_MIN_US, RESET_LOW_SLEEP_MAX_US);
+
+	rc = select_pin_ctl(fpc1020, "fpc1020_reset_active");
+	if (rc)
+		goto exit;
+	usleep_range(RESET_HIGH_SLEEP2_MIN_US, RESET_HIGH_SLEEP2_MAX_US);
+
+	irq_gpio = gpio_get_value(fpc1020->irq_gpio);
+
+exit:
+	return rc;
+}
+
+static ssize_t hw_reset_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc = -EINVAL;
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+
+	if (!memcmp(buf, "reset", strlen("reset"))) {
+		mutex_lock(&fpc1020->lock);
+		rc = hw_reset(fpc1020);
+		mutex_unlock(&fpc1020->lock);
+	} else {
+		return rc;
+	}
+
+	return rc ? rc : count;
+}
+static DEVICE_ATTR(hw_reset, 0200, NULL, hw_reset_set);
+
+/*
+ * Will setup GPIOs, and regulators to correctly initialize the touch sensor to
+ * be ready for work.
+ *
+ * In the correct order according to the sensor spec this function will
+ * enable/disable regulators, and reset line, all to set the sensor in a
+ * correct power on or off state "electrical" wise.
+ *
+ * @see  device_prepare_set
+ * @note This function will not send any commands to the sensor it will only
+ *       control it "electrically".
+ */
+static int device_prepare(struct fpc1020_data *fpc1020, bool enable)
+{
+	int rc = 0;
+
+	mutex_lock(&fpc1020->lock);
+	if (enable && !fpc1020->prepared) {
+		fpc1020->prepared = true;
+		select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+
+		rc = vreg_setup(fpc1020, "vcc_spi", true);
+		if (rc)
+			goto exit;
+
+		rc = vreg_setup(fpc1020, "vdd_io", true);
+		if (rc)
+			goto exit_1;
+
+		rc = vreg_setup(fpc1020, "vdd_ana", true);
+		if (rc)
+			goto exit_2;
+
+		usleep_range(PWR_ON_SLEEP_MIN_US, PWR_ON_SLEEP_MAX_US);
+
+		(void)select_pin_ctl(fpc1020, "fpc1020_reset_active");
+	} else if (!enable && fpc1020->prepared) {
+		rc = 0;
+		(void)select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+
+		usleep_range(PWR_ON_SLEEP_MIN_US, PWR_ON_SLEEP_MAX_US);
+
+		(void)vreg_setup(fpc1020, "vdd_ana", false);
+exit_2:
+		(void)vreg_setup(fpc1020, "vdd_io", false);
+exit_1:
+		(void)vreg_setup(fpc1020, "vcc_spi", false);
+exit:
+		fpc1020->prepared = false;
+	}
+
+	mutex_unlock(&fpc1020->lock);
+
+	return rc;
+}
+
+/*
+ * sysfs node to enable/disable (power up/power down) the touch sensor
+ *
+ * @see device_prepare
+ */
+static ssize_t device_prepare_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc;
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+
+	if (!memcmp(buf, "enable", strlen("enable")))
+		rc = device_prepare(fpc1020, true);
+	else if (!memcmp(buf, "disable", strlen("disable")))
+		rc = device_prepare(fpc1020, false);
+	else
+		return -EINVAL;
+
+	return rc ? rc : count;
+}
+static DEVICE_ATTR(device_prepare, 0200, NULL, device_prepare_set);
+
+/**
+ * sysfs node for controlling whether the driver is allowed
+ * to wake up the platform on interrupt.
+ */
+static ssize_t wakeup_enable_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+	ssize_t ret = count;
+
+	mutex_lock(&fpc1020->lock);
+	if (!memcmp(buf, "enable", strlen("enable")))
+		atomic_set(&fpc1020->wakeup_enabled, 1);
+	else if (!memcmp(buf, "disable", strlen("disable")))
+		atomic_set(&fpc1020->wakeup_enabled, 0);
+	else
+		ret = -EINVAL;
+	mutex_unlock(&fpc1020->lock);
+
+	return ret;
+}
+static DEVICE_ATTR(wakeup_enable, 0200, NULL, wakeup_enable_set);
+
+
+/*
+ * sysfs node for controlling the wakelock.
+ */
+static ssize_t handle_wakelock_cmd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct  fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+	ssize_t ret = count;
+
+	mutex_lock(&fpc1020->lock);
+	if (!memcmp(buf, RELEASE_WAKELOCK_W_V,
+		min(count, strlen(RELEASE_WAKELOCK_W_V)))) {
+		if (fpc1020->nbr_irqs_received_counter_start ==
+				fpc1020->nbr_irqs_received) {
+			__pm_relax(&fpc1020->ttw_wl);
+		} else {
+				dev_dbg(dev, "Ignore releasing of wakelock %d != %d",
+				fpc1020->nbr_irqs_received_counter_start,
+				fpc1020->nbr_irqs_received);
+		}
+	} else if (!memcmp(buf, RELEASE_WAKELOCK, min(count,
+				strlen(RELEASE_WAKELOCK)))) {
+		__pm_relax(&fpc1020->ttw_wl);
+	} else if (!memcmp(buf, START_IRQS_RECEIVED_CNT,
+			min(count, strlen(START_IRQS_RECEIVED_CNT)))) {
+		fpc1020->nbr_irqs_received_counter_start =
+		fpc1020->nbr_irqs_received;
+	} else
+		ret = -EINVAL;
+	mutex_unlock(&fpc1020->lock);
+
+	return ret;
+}
+static DEVICE_ATTR(handle_wakelock, 0200, NULL, handle_wakelock_cmd);
+
+/*
+ * sysf node to check the interrupt status of the sensor, the interrupt
+ * handler should perform sysf_notify to allow userland to poll the node.
+ */
+static ssize_t irq_get(struct device *dev,
+	struct device_attribute *attr,
+	char *buf)
+{
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+	int irq = gpio_get_value(fpc1020->irq_gpio);
+
+	return scnprintf(buf, PAGE_SIZE, "%i\n", irq);
+}
+
+/*
+ * writing to the irq node will just drop a printk message
+ * and return success, used for latency measurement.
+ */
+static ssize_t irq_ack(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+
+	dev_dbg(fpc1020->dev, "%s\n", __func__);
+
+	return count;
+}
+static DEVICE_ATTR(irq, 0600 | 0200, irq_get, irq_ack);
+
+static struct attribute *attributes[] = {
+	&dev_attr_pinctl_set.attr,
+	&dev_attr_device_prepare.attr,
+	&dev_attr_regulator_enable.attr,
+	&dev_attr_hw_reset.attr,
+	&dev_attr_wakeup_enable.attr,
+	&dev_attr_handle_wakelock.attr,
+	&dev_attr_clk_enable.attr,
+	&dev_attr_irq.attr,
+	NULL
+};
+
+static const struct attribute_group attribute_group = {
+	.attrs = attributes,
+};
+
+static irqreturn_t fpc1020_irq_handler(int irq, void *handle)
+{
+	struct fpc1020_data *fpc1020 = handle;
+
+	pr_info("fpc1020 irq handler: %s\n", __func__);
+	mutex_lock(&fpc1020->lock);
+	if (atomic_read(&fpc1020->wakeup_enabled)) {
+		fpc1020->nbr_irqs_received++;
+		__pm_wakeup_event(&fpc1020->ttw_wl,
+					msecs_to_jiffies(FPC_TTW_HOLD_TIME));
+	}
+	mutex_unlock(&fpc1020->lock);
+
+	sysfs_notify(&fpc1020->dev->kobj, NULL, dev_attr_irq.attr.name);
+
+	return IRQ_HANDLED;
+}
+
+static int fpc1020_request_named_gpio(struct fpc1020_data *fpc1020,
+	const char *label, int *gpio)
+{
+	struct device *dev = fpc1020->dev;
+	struct device_node *np = dev->of_node;
+	int rc;
+
+	rc = of_get_named_gpio(np, label, 0);
+
+	if (rc < 0) {
+		dev_err(dev, "failed to get '%s'\n", label);
+		return rc;
+	}
+	*gpio = rc;
+
+	rc = devm_gpio_request(dev, *gpio, label);
+	if (rc) {
+		dev_err(dev, "failed to request gpio %d\n", *gpio);
+		return rc;
+	}
+	dev_dbg(dev, "%s %d\n", label, *gpio);
+
+	return 0;
+}
+
+static int fpc1020_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int rc = 0;
+	size_t i;
+	int irqf;
+	struct fpc1020_data *fpc1020 = devm_kzalloc(dev, sizeof(*fpc1020),
+			GFP_KERNEL);
+	if (!fpc1020) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+
+	fpc1020->dev = dev;
+	platform_set_drvdata(pdev, fpc1020);
+
+	rc = fpc1020_request_named_gpio(fpc1020, "fpc,gpio_irq",
+			&fpc1020->irq_gpio);
+	if (rc)
+		goto exit;
+	rc = fpc1020_request_named_gpio(fpc1020, "fpc,gpio_rst",
+			&fpc1020->rst_gpio);
+	if (rc)
+		goto exit;
+
+	fpc1020->fingerprint_pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR(fpc1020->fingerprint_pinctrl)) {
+		if (PTR_ERR(fpc1020->fingerprint_pinctrl) == -EPROBE_DEFER) {
+			dev_info(dev, "pinctrl not ready\n");
+			rc = -EPROBE_DEFER;
+			goto exit;
+		}
+		dev_err(dev, "Target does not use pinctrl\n");
+		fpc1020->fingerprint_pinctrl = NULL;
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(pctl_names); i++) {
+		const char *n = pctl_names[i];
+		struct pinctrl_state *state =
+			pinctrl_lookup_state(fpc1020->fingerprint_pinctrl, n);
+		if (IS_ERR(state)) {
+			dev_err(dev, "cannot find '%s'\n", n);
+			rc = -EINVAL;
+			goto exit;
+		}
+		dev_info(dev, "found pin control %s\n", n);
+		fpc1020->pinctrl_state[i] = state;
+	}
+
+	rc = select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+	if (rc)
+		goto exit;
+	rc = select_pin_ctl(fpc1020, "fpc1020_irq_active");
+	if (rc)
+		goto exit;
+
+	atomic_set(&fpc1020->wakeup_enabled, 0);
+
+	irqf = IRQF_TRIGGER_RISING | IRQF_ONESHOT;
+	if (of_property_read_bool(dev->of_node, "fpc,enable-wakeup")) {
+		irqf |= IRQF_NO_SUSPEND;
+		device_init_wakeup(dev, 1);
+	}
+
+	mutex_init(&fpc1020->lock);
+	rc = devm_request_threaded_irq(dev, gpio_to_irq(fpc1020->irq_gpio),
+			NULL, fpc1020_irq_handler, irqf,
+			dev_name(dev), fpc1020);
+	if (rc) {
+		dev_err(dev, "could not request irq %d\n",
+				gpio_to_irq(fpc1020->irq_gpio));
+		goto exit;
+	}
+
+	dev_info(dev, "requested irq %d\n", gpio_to_irq(fpc1020->irq_gpio));
+
+	/* Request that the interrupt should be wakeable */
+	enable_irq_wake(gpio_to_irq(fpc1020->irq_gpio));
+
+	wakeup_source_init(&fpc1020->ttw_wl, "fpc_ttw_wl");
+
+	rc = sysfs_create_group(&dev->kobj, &attribute_group);
+	if (rc) {
+		dev_err(dev, "could not create sysfs\n");
+		goto exit;
+	}
+
+	if (of_property_read_bool(dev->of_node, "fpc,enable-on-boot")) {
+		dev_info(dev, "Enabling hardware\n");
+		(void)device_prepare(fpc1020, true);
+	}
+
+	rc = hw_reset(fpc1020);
+
+	dev_info(dev, "%s: ok\n", __func__);
+
+exit:
+	return rc;
+}
+
+static int fpc1020_remove(struct platform_device *pdev)
+{
+	struct fpc1020_data *fpc1020 = platform_get_drvdata(pdev);
+
+	sysfs_remove_group(&pdev->dev.kobj, &attribute_group);
+	mutex_destroy(&fpc1020->lock);
+	wakeup_source_trash(&fpc1020->ttw_wl);
+	(void)vreg_setup(fpc1020, "vdd_ana", false);
+	(void)vreg_setup(fpc1020, "vdd_io", false);
+	(void)vreg_setup(fpc1020, "vcc_spi", false);
+	dev_info(&pdev->dev, "%s\n", __func__);
+
+	return 0;
+}
+
+static const struct of_device_id fpc1020_of_match[] = {
+	{ .compatible = "fpc,fpc1020", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, fpc1020_of_match);
+
+static struct platform_driver fpc1020_driver = {
+	.driver = {
+		.name	= "fpc1020",
+		.owner	= THIS_MODULE,
+		.of_match_table = fpc1020_of_match,
+	},
+	.probe	= fpc1020_probe,
+	.remove	= fpc1020_remove,
+};
+
+static int __init fpc1020_init(void)
+{
+	int rc = platform_driver_register(&fpc1020_driver);
+
+	if (!rc)
+		pr_info("%s OK\n", __func__);
+	else
+		pr_err("%s %d\n", __func__, rc);
+
+	return rc;
+}
+
+static void __exit fpc1020_exit(void)
+{
+	pr_info("%s\n", __func__);
+	platform_driver_unregister(&fpc1020_driver);
+}
+
+module_init(fpc1020_init);
+module_exit(fpc1020_exit);
+
+
+MODULE_DESCRIPTION("FPC1020 Fingerprint sensor device driver.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 41f3186..60f5a8d 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -551,7 +551,6 @@
 		break;
 
 	default:
-		dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
 		rets = -ENOIOCTLCMD;
 	}
 
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 2b31ed3..07ae56f 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -4061,8 +4061,11 @@
 	int retry = 0;
 
 	do {
-		if (retry++)
+		if (retry++) {
+			mutex_unlock(&app_access_lock);
 			msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
+			mutex_lock(&app_access_lock);
+		}
 		ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
 			SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
 	} while (IS_ERR_OR_NULL(ihandle) &&
@@ -4212,8 +4215,12 @@
 	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
 			&resp, sizeof(resp));
 	if (ret) {
-		pr_err("scm_call to load failed : ret %d\n", ret);
-		ret = -EIO;
+		pr_err("scm_call to load failed : ret %d, result %x\n",
+			ret, resp.result);
+		if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
+			ret = -EEXIST;
+		else
+			ret = -EIO;
 		goto exit_disable_clk_vote;
 	}
 
@@ -4465,6 +4472,7 @@
 	}
 	mutex_lock(&app_access_lock);
 
+recheck:
 	app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
 	strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
 	ret = __qseecom_check_app_exists(app_ireq, &app_id);
@@ -4494,7 +4502,10 @@
 		pr_debug("%s: Loading app for the first time'\n",
 				qseecom.pdev->init_name);
 		ret = __qseecom_load_fw(data, app_name, &app_id);
-		if (ret < 0)
+		if (ret == -EEXIST) {
+			pr_err("recheck if TA %s is loaded\n", app_name);
+			goto recheck;
+		} else if (ret < 0)
 			goto exit_ion_free;
 	}
 	data->client.app_id = app_id;
@@ -6979,19 +6990,6 @@
 		break;
 	}
 
-	case QSEECOM_IOCTL_SET_ENCDEC_INFO: {
-		struct qseecom_encdec_conf_t conf;
-
-		ret = copy_from_user(&conf, argp, sizeof(conf));
-		if (ret) {
-			pr_err("copy_from_user failed\n");
-			return -EFAULT;
-		}
-		ret = qcom_ice_set_fde_conf(conf.start_sector, conf.fs_size,
-					conf.index, conf.mode);
-		break;
-	}
-
 	case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
 		if ((data->listener.id == 0) ||
 			(data->type != QSEECOM_LISTENER_SERVICE)) {
@@ -8780,6 +8778,7 @@
 static int qseecom_remove(struct platform_device *pdev)
 {
 	struct qseecom_registered_kclient_list *kclient = NULL;
+	struct qseecom_registered_kclient_list *kclient_tmp = NULL;
 	unsigned long flags = 0;
 	int ret = 0;
 	int i;
@@ -8789,10 +8788,8 @@
 	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
 	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
 
-	list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
-								list) {
-		if (!kclient)
-			goto exit_irqrestore;
+	list_for_each_entry_safe(kclient, kclient_tmp,
+		&qseecom.registered_kclient_list_head, list) {
 
 		/* Break the loop if client handle is NULL */
 		if (!kclient->handle)
@@ -8816,7 +8813,7 @@
 	kzfree(kclient->handle);
 exit_free_kclient:
 	kzfree(kclient);
-exit_irqrestore:
+
 	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
 
 	if (qseecom.qseos_version > QSEEE_VERSION_00)
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 728c65a..a940f97 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -14,6 +14,7 @@
  */
 
 #include <linux/atomic.h>
+#include <linux/cpufreq_times.h>
 #include <linux/err.h>
 #include <linux/hashtable.h>
 #include <linux/init.h>
@@ -422,6 +423,10 @@
 		kstrtol(end_uid, 10, &uid_end) != 0) {
 		return -EINVAL;
 	}
+
+	/* Also remove uids from /proc/uid_time_in_state */
+	cpufreq_task_times_remove_uids(uid_start, uid_end);
+
 	rt_mutex_lock(&uid_lock);
 
 	for (; uid_start <= uid_end; uid_start++) {
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f84a427..f735ab4 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -298,8 +298,11 @@
 	size_t pas_size;
 	size_t vas_size;
 	size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
-	const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+	u64 num_pages;
 
+	if (size > SIZE_MAX - PAGE_SIZE)
+		return NULL;
+	num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
 	if (num_pages >
 		 (SIZE_MAX - queue_size) /
 		 (sizeof(*queue->kernel_if->u.g.pas) +
@@ -624,9 +627,12 @@
 {
 	struct vmci_queue *queue;
 	size_t queue_page_size;
-	const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+	u64 num_pages;
 	const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
 
+	if (size > SIZE_MAX - PAGE_SIZE)
+		return NULL;
+	num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
 	if (num_pages > (SIZE_MAX - queue_size) /
 		 sizeof(*queue->kernel_if->u.h.page))
 		return NULL;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2405ae3..f78b659 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3630,7 +3630,7 @@
 	 * or disable state so cannot receive any completion of
 	 * other requests.
 	 */
-	BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+	WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
 
 	/* clear pending request */
 	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3664,7 +3664,7 @@
 out:
 
 	mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
-	if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+	if (!(err || cmdq_req->resp_err)) {
 		mmc_host_clk_release(host);
 		wake_up(&ctx_info->wait);
 		mmc_put_card(host->card);
@@ -4099,6 +4099,13 @@
 			if (ret == -EBUSY || ret == -EAGAIN) {
 				mmc_blk_cmdq_requeue_rw_rq(mq, req);
 				mmc_put_card(host->card);
+			} else if (ret == -ENOMEM) {
+				/*
+				 * Elaborate error handling is not needed for
+				 * system errors. Let the higher layer decide
+				 * on the next steps.
+				 */
+				goto out;
 			}
 		}
 	}
@@ -4694,9 +4701,7 @@
 
 	dev_set_drvdata(&card->dev, md);
 
-#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 	mmc_set_bus_resume_policy(card->host, 1);
-#endif
 
 	if (mmc_add_disk(md))
 		goto out;
@@ -4741,9 +4746,7 @@
 	pm_runtime_put_noidle(&card->dev);
 	mmc_blk_remove_req(md);
 	dev_set_drvdata(&card->dev, NULL);
-#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 	mmc_set_bus_resume_policy(card->host, 0);
-#endif
 }
 
 static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index aa8373d..66165d9 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -314,6 +314,8 @@
 						host->max_req_size / 512));
 	blk_queue_max_segment_size(mq->queue, host->max_seg_size);
 	blk_queue_max_segments(mq->queue, host->max_segs);
+	if (host->inlinecrypt_support)
+		queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue);
 }
 
 /**
@@ -483,6 +485,9 @@
 success:
 	sema_init(&mq->thread_sem, 1);
 
+	if (host->inlinecrypt_support)
+		queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue);
+
 	/* hook for pm qos legacy init */
 	if (card->host->ops->init)
 		card->host->ops->init(card->host);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8700e72..69465f8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1972,10 +1972,9 @@
  */
 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 {
-#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 	if (mmc_bus_needs_resume(host))
 		mmc_resume_bus(host);
-#endif
+
 	__mmc_start_req(host, mrq);
 
 	if (!mrq->cap_cmd_during_tfr)
@@ -2418,10 +2417,9 @@
 {
 	pm_runtime_get_sync(&card->dev);
 	mmc_claim_host(card->host);
-#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+
 	if (mmc_bus_needs_resume(card->host))
 		mmc_resume_bus(card->host);
-#endif
 }
 EXPORT_SYMBOL(mmc_get_card);
 
@@ -3380,6 +3378,7 @@
 {
 	unsigned long flags;
 	int err = 0;
+	int card_present = true;
 
 	if (!mmc_bus_needs_resume(host))
 		return -EINVAL;
@@ -3390,10 +3389,28 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 
 	mmc_bus_get(host);
-	if (host->bus_ops && !host->bus_dead && host->card) {
+	if (host->ops->get_cd)
+		card_present = host->ops->get_cd(host);
+
+	if (host->bus_ops && !host->bus_dead && host->card && card_present) {
 		mmc_power_up(host, host->card->ocr);
 		BUG_ON(!host->bus_ops->resume);
-		host->bus_ops->resume(host);
+		err = host->bus_ops->resume(host);
+		if (err) {
+			pr_err("%s: %s: resume failed: %d\n",
+				       mmc_hostname(host), __func__, err);
+			/*
+			 * If we have cd-gpio based detection mechanism and
+			 * deferred resume is supported, we will not detect
+			 * card removal event when system is suspended. So if
+			 * resume fails after a system suspend/resume,
+			 * schedule the work to detect card presence.
+			 */
+			if (mmc_card_is_removable(host) &&
+					!(host->caps & MMC_CAP_NEEDS_POLL)) {
+				mmc_detect_change(host, 0);
+			}
+		}
 		if (mmc_card_cmdq(host->card)) {
 			err = mmc_cmdq_halt(host, false);
 			if (err)
@@ -4703,7 +4720,7 @@
 	struct mmc_host *host = container_of(
 		notify_block, struct mmc_host, pm_notify);
 	unsigned long flags;
-	int err = 0;
+	int err = 0, present = 0;
 
 	switch (mode) {
 	case PM_RESTORE_PREPARE:
@@ -4750,8 +4767,12 @@
 
 		spin_lock_irqsave(&host->lock, flags);
 		host->rescan_disable = 0;
+		if (host->ops->get_cd)
+			present = host->ops->get_cd(host);
+
 		if (mmc_bus_manual_resume(host) &&
-				!host->ignore_bus_resume_flags) {
+				!host->ignore_bus_resume_flags &&
+				present) {
 			spin_unlock_irqrestore(&host->lock, flags);
 			break;
 		}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 965d1f0..71a1cfb 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1169,6 +1169,9 @@
  */
 static int mmc_sd_alive(struct mmc_host *host)
 {
+	if (host->ops->get_cd && !host->ops->get_cd(host))
+		return -ENOMEDIUM;
+
 	return mmc_send_status(host->card, NULL);
 }
 
@@ -1197,6 +1200,16 @@
 		return;
 	}
 
+	if (mmc_bus_needs_resume(host))
+		mmc_resume_bus(host);
+
+	if (host->ops->get_cd && !host->ops->get_cd(host)) {
+		err = -ENOMEDIUM;
+		mmc_card_set_removed(host->card);
+		mmc_card_clr_suspended(host->card);
+		goto out;
+	}
+
 	/*
 	 * Just check if our card has been removed.
 	 */
@@ -1219,6 +1232,7 @@
 	err = _mmc_detect_card_removed(host);
 #endif
 
+out:
 	mmc_put_card(host->card);
 
 	if (err) {
@@ -1303,6 +1317,11 @@
 	if (!mmc_card_suspended(host->card))
 		goto out;
 
+	if (host->ops->get_cd && !host->ops->get_cd(host)) {
+		mmc_card_clr_suspended(host->card);
+		goto out;
+	}
+
 	mmc_power_up(host, host->card->ocr);
 #ifdef CONFIG_MMC_PARANOID_SD_INIT
 	retries = 5;
@@ -1329,6 +1348,8 @@
 			mmc_hostname(host), __func__, err);
 		mmc_card_set_removed(host->card);
 		mmc_detect_change(host, msecs_to_jiffies(200));
+	} else if (err) {
+		goto out;
 	}
 	mmc_card_clr_suspended(host->card);
 
@@ -1398,6 +1419,9 @@
 
 static int mmc_sd_reset(struct mmc_host *host)
 {
+	if (host->ops->get_cd && !host->ops->get_cd(host))
+		return -ENOMEDIUM;
+
 	mmc_power_cycle(host, host->card->ocr);
 	return mmc_sd_init_card(host, host->card->ocr, host->card);
 }
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 91ad946..090f64da 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -35,6 +35,10 @@
 {
 	/* Schedule a card detection after a debounce timeout */
 	struct mmc_host *host = dev_id;
+	int present = host->ops->get_cd(host);
+
+	pr_debug("%s: cd gpio irq, gpio state %d (CARD_%s)\n",
+		mmc_hostname(host), present, present?"INSERT":"REMOVAL");
 
 	host->trigger_card_event = true;
 	mmc_detect_change(host, msecs_to_jiffies(200));
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 684087d..1752007 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -368,9 +368,9 @@
 		host->irq_mask &= ~irq;
 	else
 		host->irq_mask |= irq;
-	spin_unlock_irqrestore(&host->lock, flags);
 
 	writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
+	spin_unlock_irqrestore(&host->lock, flags);
 }
 
 static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 50dd6bd..524c8e0 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -33,6 +33,8 @@
 	const struct sdhci_iproc_data *data;
 	u32 shadow_cmd;
 	u32 shadow_blk;
+	bool is_cmd_shadowed;
+	bool is_blk_shadowed;
 };
 
 #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
@@ -48,8 +50,22 @@
 
 static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
 {
-	u32 val = sdhci_iproc_readl(host, (reg & ~3));
-	u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
+	u32 val;
+	u16 word;
+
+	if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
+		/* Get the saved transfer mode */
+		val = iproc_host->shadow_cmd;
+	} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
+		   iproc_host->is_blk_shadowed) {
+		/* Get the saved block info */
+		val = iproc_host->shadow_blk;
+	} else {
+		val = sdhci_iproc_readl(host, (reg & ~3));
+	}
+	word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
 	return word;
 }
 
@@ -105,13 +121,15 @@
 
 	if (reg == SDHCI_COMMAND) {
 		/* Write the block now as we are issuing a command */
-		if (iproc_host->shadow_blk != 0) {
+		if (iproc_host->is_blk_shadowed) {
 			sdhci_iproc_writel(host, iproc_host->shadow_blk,
 				SDHCI_BLOCK_SIZE);
-			iproc_host->shadow_blk = 0;
+			iproc_host->is_blk_shadowed = false;
 		}
 		oldval = iproc_host->shadow_cmd;
-	} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
+		iproc_host->is_cmd_shadowed = false;
+	} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
+		   iproc_host->is_blk_shadowed) {
 		/* Block size and count are stored in shadow reg */
 		oldval = iproc_host->shadow_blk;
 	} else {
@@ -123,9 +141,11 @@
 	if (reg == SDHCI_TRANSFER_MODE) {
 		/* Save the transfer mode until the command is issued */
 		iproc_host->shadow_cmd = newval;
+		iproc_host->is_cmd_shadowed = true;
 	} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
 		/* Save the block info until the command is issued */
 		iproc_host->shadow_blk = newval;
+		iproc_host->is_blk_shadowed = true;
 	} else {
 		/* Command or other regular 32-bit write */
 		sdhci_iproc_writel(host, newval, reg & ~3);
@@ -176,7 +196,6 @@
 	.caps1 = SDHCI_DRIVER_TYPE_C |
 		 SDHCI_DRIVER_TYPE_D |
 		 SDHCI_SUPPORT_DDR50,
-	.mmc_caps = MMC_CAP_1_8V_DDR,
 };
 
 static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index d91eb67..7d24213 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -5075,6 +5075,7 @@
 			goto vreg_deinit;
 		}
 		host->is_crypto_en = true;
+		msm_host->mmc->inlinecrypt_support = true;
 		/* Packed commands cannot be encrypted/decrypted using ICE */
 		msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
 				MMC_CAP2_PACKED_WR_CONTROL);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index b0b9ceb..cfb7947 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -492,6 +492,8 @@
 		slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
 		break;
 	case INTEL_MRFLD_SDIO:
+		/* Advertise 2.0v for compatibility with the SDIO card's OCR */
+		slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
 		slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
 					 MMC_CAP_POWER_OFF_CARD;
 		break;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b674b38..36aecd2 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1575,6 +1575,13 @@
 	if (mode != MMC_POWER_OFF) {
 		switch (1 << vdd) {
 		case MMC_VDD_165_195:
+		/*
+		 * Without a regulator, SDHCI does not support 2.0v
+		 * so we only get here if the driver deliberately
+		 * added the 2.0v range to ocr_avail. Map it to 1.8v
+		 * for the purpose of turning on the power.
+		 */
+		case MMC_VDD_20_21:
 			pwr = SDHCI_POWER_180;
 			break;
 		case MMC_VDD_29_30:
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5e1b68c..e1b603c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
 #define I82802AB	0x00ad
 #define I82802AC	0x00ac
 #define PF38F4476	0x881c
+#define M28F00AP30	0x8963
 /* STMicroelectronics chips */
 #define M50LPW080       0x002F
 #define M50FLW080A	0x0080
@@ -375,6 +376,17 @@
 		extp->MinorVersion = '1';
 }
 
+static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
+{
+	/*
+	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
+	 * Erase Supend for their small Erase Blocks(0x8000)
+	 */
+	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
+		return 1;
+	return 0;
+}
+
 static inline struct cfi_pri_intelext *
 read_pri_intelext(struct map_info *map, __u16 adr)
 {
@@ -831,21 +843,30 @@
 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
 			goto sleep;
 
+		/* Do not allow suspend iff read/write to EB address */
+		if ((adr & chip->in_progress_block_mask) ==
+		    chip->in_progress_block_addr)
+			goto sleep;
+
+		/* do not suspend small EBs, buggy Micron Chips */
+		if (cfi_is_micron_28F00AP30(cfi, chip) &&
+		    (chip->in_progress_block_mask == ~(0x8000-1)))
+			goto sleep;
 
 		/* Erase suspend */
-		map_write(map, CMD(0xB0), adr);
+		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 
 		/* If the flash has finished erasing, then 'erase suspend'
 		 * appears to make some (28F320) flash devices switch to
 		 * 'read' mode.  Make sure that we switch to 'read status'
 		 * mode so we get the right data. --rmk
 		 */
-		map_write(map, CMD(0x70), adr);
+		map_write(map, CMD(0x70), chip->in_progress_block_addr);
 		chip->oldstate = FL_ERASING;
 		chip->state = FL_ERASE_SUSPENDING;
 		chip->erase_suspended = 1;
 		for (;;) {
-			status = map_read(map, adr);
+			status = map_read(map, chip->in_progress_block_addr);
 			if (map_word_andequal(map, status, status_OK, status_OK))
 			        break;
 
@@ -1041,8 +1062,8 @@
 		   sending the 0x70 (Read Status) command to an erasing
 		   chip and expecting it to be ignored, that's what we
 		   do. */
-		map_write(map, CMD(0xd0), adr);
-		map_write(map, CMD(0x70), adr);
+		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
+		map_write(map, CMD(0x70), chip->in_progress_block_addr);
 		chip->oldstate = FL_READY;
 		chip->state = FL_ERASING;
 		break;
@@ -1933,6 +1954,8 @@
 	map_write(map, CMD(0xD0), adr);
 	chip->state = FL_ERASING;
 	chip->erase_suspended = 0;
+	chip->in_progress_block_addr = adr;
+	chip->in_progress_block_mask = ~(len - 1);
 
 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
 				   adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 9dca881..107c05b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -812,9 +812,10 @@
 		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 			goto sleep;
 
-		/* We could check to see if we're trying to access the sector
-		 * that is currently being erased. However, no user will try
-		 * anything like that so we just wait for the timeout. */
+		/* Do not allow suspend iff read/write to EB address */
+		if ((adr & chip->in_progress_block_mask) ==
+		    chip->in_progress_block_addr)
+			goto sleep;
 
 		/* Erase suspend */
 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2263,6 +2264,7 @@
 	chip->state = FL_ERASING;
 	chip->erase_suspended = 0;
 	chip->in_progress_block_addr = adr;
+	chip->in_progress_block_mask = ~(map->size - 1);
 
 	INVALIDATE_CACHE_UDELAY(map, chip,
 				adr, map->size,
@@ -2352,6 +2354,7 @@
 	chip->state = FL_ERASING;
 	chip->erase_suspended = 0;
 	chip->in_progress_block_addr = adr;
+	chip->in_progress_block_mask = ~(len - 1);
 
 	INVALIDATE_CACHE_UDELAY(map, chip,
 				adr, len,
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 7c0b27d..b479bd8 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1889,6 +1889,8 @@
 	do {
 		uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
 		mask = (1 << (cfi->device_type * 8)) - 1;
+		if (ofs >= map->size)
+			return 0;
 		result = map_read(map, base + ofs);
 		bank++;
 	} while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 427039b..d9dab42 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -2047,18 +2047,20 @@
 
 	ret = nand_boot_init(this);
 	if (ret)
-		goto err_out;
+		goto err_nand_cleanup;
 	ret = chip->scan_bbt(mtd);
 	if (ret)
-		goto err_out;
+		goto err_nand_cleanup;
 
 	ret = mtd_device_register(mtd, NULL, 0);
 	if (ret)
-		goto err_out;
+		goto err_nand_cleanup;
 	return 0;
 
+err_nand_cleanup:
+	nand_cleanup(chip);
 err_out:
-	gpmi_nand_exit(this);
+	gpmi_free_dma_buffer(this);
 	return ret;
 }
 
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a3e86e5..5fb4516 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4785,6 +4785,11 @@
 		goto err_free;
 	}
 	ecc->total = ecc->steps * ecc->bytes;
+	if (ecc->total > mtd->oobsize) {
+		WARN(1, "Total number of ECC bytes exceeded oobsize\n");
+		ret = -EINVAL;
+		goto err_free;
+	}
 
 	/*
 	 * The number of bytes available for a client to place data into
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 1cb3f77..766b2c3 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -193,6 +193,9 @@
 		ops.datbuf    = NULL;
 		ops.oobbuf    = readbuf;
 		err = mtd_read_oob(mtd, addr, &ops);
+		if (mtd_is_bitflip(err))
+			err = 0;
+
 		if (err || ops.oobretlen != use_len) {
 			pr_err("error: readoob failed at %#llx\n",
 			       (long long)addr);
@@ -227,6 +230,9 @@
 			ops.datbuf    = NULL;
 			ops.oobbuf    = readbuf;
 			err = mtd_read_oob(mtd, addr, &ops);
+			if (mtd_is_bitflip(err))
+				err = 0;
+
 			if (err || ops.oobretlen != mtd->oobavail) {
 				pr_err("error: readoob failed at %#llx\n",
 						(long long)addr);
@@ -286,6 +292,9 @@
 
 	/* read entire block's OOB at one go */
 	err = mtd_read_oob(mtd, addr, &ops);
+	if (mtd_is_bitflip(err))
+		err = 0;
+
 	if (err || ops.oobretlen != len) {
 		pr_err("error: readoob failed at %#llx\n",
 		       (long long)addr);
@@ -527,6 +536,9 @@
 	pr_info("attempting to start read past end of OOB\n");
 	pr_info("an error is expected...\n");
 	err = mtd_read_oob(mtd, addr0, &ops);
+	if (mtd_is_bitflip(err))
+		err = 0;
+
 	if (err) {
 		pr_info("error occurred as expected\n");
 		err = 0;
@@ -571,6 +583,9 @@
 		pr_info("attempting to read past end of device\n");
 		pr_info("an error is expected...\n");
 		err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+		if (mtd_is_bitflip(err))
+			err = 0;
+
 		if (err) {
 			pr_info("error occurred as expected\n");
 			err = 0;
@@ -615,6 +630,9 @@
 		pr_info("attempting to read past end of device\n");
 		pr_info("an error is expected...\n");
 		err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+		if (mtd_is_bitflip(err))
+			err = 0;
+
 		if (err) {
 			pr_info("error occurred as expected\n");
 			err = 0;
@@ -684,6 +702,9 @@
 		ops.datbuf    = NULL;
 		ops.oobbuf    = readbuf;
 		err = mtd_read_oob(mtd, addr, &ops);
+		if (mtd_is_bitflip(err))
+			err = 0;
+
 		if (err)
 			goto out;
 		if (memcmpshow(addr, readbuf, writebuf,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 46913ef2..479a5f0 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -244,7 +244,7 @@
 	 * in any case.
 	 */
 	if (mode & FMODE_WRITE) {
-		ret = -EPERM;
+		ret = -EROFS;
 		goto out_unlock;
 	}
 
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index e1da78f..68902b8 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -894,6 +894,17 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
+	 * MLC NAND is different and needs special care, otherwise UBI or UBIFS
+	 * will die soon and you will lose all your data.
+	 */
+	if (mtd->type == MTD_MLCNANDFLASH) {
+		pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
+			mtd->index);
+		return -EINVAL;
+	}
+
 	if (ubi_num == UBI_DEV_NUM_AUTO) {
 		/* Search for an empty slot in the @ubi_devices array */
 		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 4f0bd6b..69dd216 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -362,7 +362,6 @@
 {
 	int i;
 
-	flush_work(&ubi->fm_work);
 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
 
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index c1f5c29..b44c8d3 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -828,6 +828,24 @@
 	return ret;
 }
 
+static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
+				      struct ubi_ainf_peb *old)
+{
+	struct ubi_ainf_peb *new;
+
+	new = ubi_alloc_aeb(ai, old->pnum, old->ec);
+	if (!new)
+		return NULL;
+
+	new->vol_id = old->vol_id;
+	new->sqnum = old->sqnum;
+	new->lnum = old->lnum;
+	new->scrub = old->scrub;
+	new->copy_flag = old->copy_flag;
+
+	return new;
+}
+
 /**
  * ubi_scan_fastmap - scan the fastmap.
  * @ubi: UBI device object
@@ -847,7 +865,7 @@
 	struct ubi_vid_hdr *vh;
 	struct ubi_ec_hdr *ech;
 	struct ubi_fastmap_layout *fm;
-	struct ubi_ainf_peb *tmp_aeb, *aeb;
+	struct ubi_ainf_peb *aeb;
 	int i, used_blocks, pnum, fm_anchor, ret = 0;
 	size_t fm_size;
 	__be32 crc, tmp_crc;
@@ -857,9 +875,16 @@
 	if (fm_anchor < 0)
 		return UBI_NO_FASTMAP;
 
-	/* Move all (possible) fastmap blocks into our new attach structure. */
-	list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list)
-		list_move_tail(&aeb->u.list, &ai->fastmap);
+	/* Copy all (possible) fastmap blocks into our new attach structure. */
+	list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
+		struct ubi_ainf_peb *new;
+
+		new = clone_aeb(ai, aeb);
+		if (!new)
+			return -ENOMEM;
+
+		list_add(&new->u.list, &ai->fastmap);
+	}
 
 	down_write(&ubi->fm_protect);
 	memset(ubi->fm_buf, 0, ubi->fm_size);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 551f0f8..91d8a48 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -450,7 +450,7 @@
 {
 	int i;
 
-	if (!client_info->slave)
+	if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
 		return;
 
 	for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
@@ -944,6 +944,10 @@
 	skb->priority = TC_PRIO_CONTROL;
 	skb->dev = slave->dev;
 
+	netdev_dbg(slave->bond->dev,
+		   "Send learning packet: dev %s mac %pM vlan %d\n",
+		   slave->dev->name, mac_addr, vid);
+
 	if (vid)
 		__vlan_hwaccel_put_tag(skb, vlan_proto, vid);
 
@@ -966,14 +970,13 @@
 	 */
 	rcu_read_lock();
 	netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-		if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
-			if (strict_match &&
-			    ether_addr_equal_64bits(mac_addr,
-						    upper->dev_addr)) {
+		if (is_vlan_dev(upper) &&
+		    bond->nest_level == vlan_get_encap_level(upper) - 1) {
+			if (upper->addr_assign_type == NET_ADDR_STOLEN) {
 				alb_send_lp_vid(slave, mac_addr,
 						vlan_dev_vlan_proto(upper),
 						vlan_dev_vlan_id(upper));
-			} else if (!strict_match) {
+			} else {
 				alb_send_lp_vid(slave, upper->dev_addr,
 						vlan_dev_vlan_proto(upper),
 						vlan_dev_vlan_id(upper));
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4907c9b..1a139d0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1524,39 +1524,6 @@
 			goto err_close;
 	}
 
-	/* If the mode uses primary, then the following is handled by
-	 * bond_change_active_slave().
-	 */
-	if (!bond_uses_primary(bond)) {
-		/* set promiscuity level to new slave */
-		if (bond_dev->flags & IFF_PROMISC) {
-			res = dev_set_promiscuity(slave_dev, 1);
-			if (res)
-				goto err_close;
-		}
-
-		/* set allmulti level to new slave */
-		if (bond_dev->flags & IFF_ALLMULTI) {
-			res = dev_set_allmulti(slave_dev, 1);
-			if (res)
-				goto err_close;
-		}
-
-		netif_addr_lock_bh(bond_dev);
-
-		dev_mc_sync_multiple(slave_dev, bond_dev);
-		dev_uc_sync_multiple(slave_dev, bond_dev);
-
-		netif_addr_unlock_bh(bond_dev);
-	}
-
-	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
-		/* add lacpdu mc addr to mc list */
-		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
-		dev_mc_add(slave_dev, lacpdu_multicast);
-	}
-
 	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
 	if (res) {
 		netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
@@ -1687,8 +1654,7 @@
 	} /* switch(bond_mode) */
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-	slave_dev->npinfo = bond->dev->npinfo;
-	if (slave_dev->npinfo) {
+	if (bond->dev->npinfo) {
 		if (slave_enable_netpoll(new_slave)) {
 			netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
 			res = -EBUSY;
@@ -1719,6 +1685,40 @@
 		goto err_upper_unlink;
 	}
 
+	/* If the mode uses primary, then the following is handled by
+	 * bond_change_active_slave().
+	 */
+	if (!bond_uses_primary(bond)) {
+		/* set promiscuity level to new slave */
+		if (bond_dev->flags & IFF_PROMISC) {
+			res = dev_set_promiscuity(slave_dev, 1);
+			if (res)
+				goto err_sysfs_del;
+		}
+
+		/* set allmulti level to new slave */
+		if (bond_dev->flags & IFF_ALLMULTI) {
+			res = dev_set_allmulti(slave_dev, 1);
+			if (res) {
+				if (bond_dev->flags & IFF_PROMISC)
+					dev_set_promiscuity(slave_dev, -1);
+				goto err_sysfs_del;
+			}
+		}
+
+		netif_addr_lock_bh(bond_dev);
+		dev_mc_sync_multiple(slave_dev, bond_dev);
+		dev_uc_sync_multiple(slave_dev, bond_dev);
+		netif_addr_unlock_bh(bond_dev);
+
+		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+			/* add lacpdu mc addr to mc list */
+			u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+
+			dev_mc_add(slave_dev, lacpdu_multicast);
+		}
+	}
+
 	bond->slave_cnt++;
 	bond_compute_features(bond);
 	bond_set_carrier(bond);
@@ -1732,6 +1732,8 @@
 	if (bond_mode_uses_xmit_hash(bond))
 		bond_update_slave_arr(bond, NULL);
 
+	bond->nest_level = dev_get_nest_level(bond_dev);
+
 	netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
 		    slave_dev->name,
 		    bond_is_active_slave(new_slave) ? "an active" : "a backup",
@@ -1742,6 +1744,9 @@
 	return 0;
 
 /* Undo stages on error */
+err_sysfs_del:
+	bond_sysfs_slave_del(new_slave);
+
 err_upper_unlink:
 	bond_upper_dev_unlink(bond, new_slave);
 
@@ -1749,9 +1754,6 @@
 	netdev_rx_handler_unregister(slave_dev);
 
 err_detach:
-	if (!bond_uses_primary(bond))
-		bond_hw_addr_flush(bond_dev, slave_dev);
-
 	vlan_vids_del_by_dev(slave_dev, bond_dev);
 	if (rcu_access_pointer(bond->primary_slave) == new_slave)
 		RCU_INIT_POINTER(bond->primary_slave, NULL);
@@ -2605,11 +2607,13 @@
 	bond_for_each_slave_rcu(bond, slave, iter) {
 		unsigned long trans_start = dev_trans_start(slave->dev);
 
+		slave->new_link = BOND_LINK_NOCHANGE;
+
 		if (slave->link != BOND_LINK_UP) {
 			if (bond_time_in_interval(bond, trans_start, 1) &&
 			    bond_time_in_interval(bond, slave->last_rx, 1)) {
 
-				slave->link  = BOND_LINK_UP;
+				slave->new_link = BOND_LINK_UP;
 				slave_state_changed = 1;
 
 				/* primary_slave has no meaning in round-robin
@@ -2636,7 +2640,7 @@
 			if (!bond_time_in_interval(bond, trans_start, 2) ||
 			    !bond_time_in_interval(bond, slave->last_rx, 2)) {
 
-				slave->link  = BOND_LINK_DOWN;
+				slave->new_link = BOND_LINK_DOWN;
 				slave_state_changed = 1;
 
 				if (slave->link_failure_count < UINT_MAX)
@@ -2667,6 +2671,11 @@
 		if (!rtnl_trylock())
 			goto re_arm;
 
+		bond_for_each_slave(bond, slave, iter) {
+			if (slave->new_link != BOND_LINK_NOCHANGE)
+				slave->link = slave->new_link;
+		}
+
 		if (slave_state_changed) {
 			bond_slave_state_change(bond);
 			if (BOND_MODE(bond) == BOND_MODE_XOR)
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index c9d61a6..3a75352 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1179,7 +1179,7 @@
 
 	skb = alloc_can_skb(priv->netdev, &cf);
 	if (!skb) {
-		stats->tx_dropped++;
+		stats->rx_dropped++;
 		return;
 	}
 
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index e2512ab..e13c9cd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -61,6 +61,8 @@
 
 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
 
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
 /*****************************************************************************/
 /*****************************************************************************/
 /*****************************************************************************/
@@ -232,11 +234,9 @@
 	tail_masked = admin_queue->sq.tail & queue_size_mask;
 
 	/* In case of queue FULL */
-	cnt = admin_queue->sq.tail - admin_queue->sq.head;
+	cnt = atomic_read(&admin_queue->outstanding_cmds);
 	if (cnt >= admin_queue->q_depth) {
-		pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
-			 admin_queue->sq.tail, admin_queue->sq.head,
-			 admin_queue->q_depth);
+		pr_debug("admin queue is full.\n");
 		admin_queue->stats.out_of_space++;
 		return ERR_PTR(-ENOSPC);
 	}
@@ -508,15 +508,20 @@
 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
 						     struct ena_com_admin_queue *admin_queue)
 {
-	unsigned long flags;
-	u32 start_time;
+	unsigned long flags, timeout;
 	int ret;
 
-	start_time = ((u32)jiffies_to_usecs(jiffies));
+	timeout = jiffies + ADMIN_CMD_TIMEOUT_US;
 
-	while (comp_ctx->status == ENA_CMD_SUBMITTED) {
-		if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
-		    ADMIN_CMD_TIMEOUT_US) {
+	while (1) {
+		spin_lock_irqsave(&admin_queue->q_lock, flags);
+		ena_com_handle_admin_completion(admin_queue);
+		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+		if (comp_ctx->status != ENA_CMD_SUBMITTED)
+			break;
+
+		if (time_is_before_jiffies(timeout)) {
 			pr_err("Wait for completion (polling) timeout\n");
 			/* ENA didn't have any completion */
 			spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -528,10 +533,6 @@
 			goto err;
 		}
 
-		spin_lock_irqsave(&admin_queue->q_lock, flags);
-		ena_com_handle_admin_completion(admin_queue);
-		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
-
 		msleep(100);
 	}
 
@@ -1449,6 +1450,12 @@
 
 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
 {
+	u32 mask_value = 0;
+
+	if (polling)
+		mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+	writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
 	ena_dev->admin_queue.polling = polling;
 }
 
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index bfeaec5..0d9ce08 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1542,6 +1542,7 @@
 			  "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
 			  qid, rc);
 		ena_com_destroy_io_queue(ena_dev, ena_qid);
+		return rc;
 	}
 
 	ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1606,6 +1607,7 @@
 			  "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
 			  qid, rc);
 		ena_com_destroy_io_queue(ena_dev, ena_qid);
+		return rc;
 	}
 
 	ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
@@ -2806,6 +2808,11 @@
 {
 	int release_bars;
 
+	if (ena_dev->mem_bar)
+		devm_iounmap(&pdev->dev, ena_dev->mem_bar);
+
+	devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+
 	release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
 	pci_release_selected_regions(pdev, release_bars);
 }
@@ -2893,8 +2900,9 @@
 		goto err_free_ena_dev;
 	}
 
-	ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR),
-				   pci_resource_len(pdev, ENA_REG_BAR));
+	ena_dev->reg_bar = devm_ioremap(&pdev->dev,
+					pci_resource_start(pdev, ENA_REG_BAR),
+					pci_resource_len(pdev, ENA_REG_BAR));
 	if (!ena_dev->reg_bar) {
 		dev_err(&pdev->dev, "failed to remap regs bar\n");
 		rc = -EFAULT;
@@ -2914,8 +2922,9 @@
 	ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
 
 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
-		ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR),
-					      pci_resource_len(pdev, ENA_MEM_BAR));
+		ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
+						   pci_resource_start(pdev, ENA_MEM_BAR),
+						   pci_resource_len(pdev, ENA_MEM_BAR));
 		if (!ena_dev->mem_bar) {
 			rc = -EFAULT;
 			goto err_device_destroy;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 651f308..fca2e42 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1680,6 +1680,30 @@
 	}
 }
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_enet_acpi_match[] = {
+	{ "APMC0D05", XGENE_ENET1},
+	{ "APMC0D30", XGENE_ENET1},
+	{ "APMC0D31", XGENE_ENET1},
+	{ "APMC0D3F", XGENE_ENET1},
+	{ "APMC0D26", XGENE_ENET2},
+	{ "APMC0D25", XGENE_ENET2},
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
+#endif
+
+static const struct of_device_id xgene_enet_of_match[] = {
+	{.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
+	{.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+	{.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+	{.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+	{.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
+
 static int xgene_enet_probe(struct platform_device *pdev)
 {
 	struct net_device *ndev;
@@ -1826,32 +1850,6 @@
 	xgene_enet_remove(pdev);
 }
 
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_enet_acpi_match[] = {
-	{ "APMC0D05", XGENE_ENET1},
-	{ "APMC0D30", XGENE_ENET1},
-	{ "APMC0D31", XGENE_ENET1},
-	{ "APMC0D3F", XGENE_ENET1},
-	{ "APMC0D26", XGENE_ENET2},
-	{ "APMC0D25", XGENE_ENET2},
-	{ }
-};
-MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id xgene_enet_of_match[] = {
-	{.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
-	{.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
-	{.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
-	{.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
-	{.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
-#endif
-
 static struct platform_driver xgene_enet_driver = {
 	.driver = {
 		   .name = "xgene-enet",
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 49f4cafe..86a32fe 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -529,7 +529,8 @@
 	int i;
 
 	for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
-		int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
+		u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
+		unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
 
 		slot = &ring->slots[i];
 		dev_kfree_skb(slot->skb);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ca6c471..31287ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3887,15 +3887,26 @@
 		/* when transmitting in a vf, start bd must hold the ethertype
 		 * for fw to enforce it
 		 */
+		u16 vlan_tci = 0;
 #ifndef BNX2X_STOP_ON_ERROR
-		if (IS_VF(bp))
+		if (IS_VF(bp)) {
 #endif
-			tx_start_bd->vlan_or_ethertype =
-				cpu_to_le16(ntohs(eth->h_proto));
+			/* Still need to consider inband vlan for enforced */
+			if (__vlan_get_tag(skb, &vlan_tci)) {
+				tx_start_bd->vlan_or_ethertype =
+					cpu_to_le16(ntohs(eth->h_proto));
+			} else {
+				tx_start_bd->bd_flags.as_bitfield |=
+					(X_ETH_INBAND_VLAN <<
+					 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+				tx_start_bd->vlan_or_ethertype =
+					cpu_to_le16(vlan_tci);
+			}
 #ifndef BNX2X_STOP_ON_ERROR
-		else
+		} else {
 			/* used by FW for packet accounting */
 			tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+		}
 #endif
 	}
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3aa993b..ca57eb5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3401,6 +3401,9 @@
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
 	struct hwrm_vnic_tpa_cfg_input req = {0};
 
+	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+		return 0;
+
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
 
 	if (tpa_flags) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 795a133..4ffbe85 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8720,14 +8720,15 @@
 	tg3_mem_rx_release(tp);
 	tg3_mem_tx_release(tp);
 
-	/* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
-	tg3_full_lock(tp, 0);
+	/* tp->hw_stats can be referenced safely:
+	 *     1. under rtnl_lock
+	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
+	 */
 	if (tp->hw_stats) {
 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
 				  tp->hw_stats, tp->stats_mapping);
 		tp->hw_stats = NULL;
 	}
-	tg3_full_unlock(tp);
 }
 
 /*
@@ -14161,7 +14162,7 @@
 	struct tg3 *tp = netdev_priv(dev);
 
 	spin_lock_bh(&tp->lock);
-	if (!tp->hw_stats) {
+	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
 		*stats = tp->net_stats_prev;
 		spin_unlock_bh(&tp->lock);
 		return stats;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0f68118..a36e386 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2845,7 +2845,7 @@
 static void
 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
 {
-	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+	strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
 }
 
 static void
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0c2a32a..c395b21 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -836,8 +836,6 @@
 
 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
 			       adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
-	if (err)
-		t4_free_sge_resources(adap);
 	return err;
 }
 
@@ -2742,6 +2740,16 @@
 	return -EOPNOTSUPP;
 }
 
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+					   netdev_features_t features)
+{
+	/* Disable GRO, if RX_CSUM is disabled */
+	if (!(features & NETIF_F_RXCSUM))
+		features &= ~NETIF_F_GRO;
+
+	return features;
+}
+
 static const struct net_device_ops cxgb4_netdev_ops = {
 	.ndo_open             = cxgb_open,
 	.ndo_stop             = cxgb_close,
@@ -2766,6 +2774,7 @@
 #endif
 	.ndo_set_tx_maxrate   = cxgb_set_tx_maxrate,
 	.ndo_setup_tc         = cxgb_setup_tc,
+	.ndo_fix_features     = cxgb_fix_features,
 };
 
 #ifdef CONFIG_PCI_IOV
@@ -4929,6 +4938,13 @@
 	if (err)
 		goto out_free_dev;
 
+	err = setup_fw_sge_queues(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev,
+			"FW sge queue allocation failed, err %d", err);
+		goto out_free_dev;
+	}
+
 	/*
 	 * The card is now ready to go.  If any errors occur during device
 	 * registration we do not fail the whole card but rather proceed only
@@ -4972,7 +4988,6 @@
 	}
 
 	print_adapter_info(adapter);
-	setup_fw_sge_queues(adapter);
 	return 0;
 
 sriov:
@@ -5024,6 +5039,7 @@
 #endif
 
  out_free_dev:
+	t4_free_sge_resources(adapter);
 	free_some_resources(adapter);
 	if (adapter->flags & USING_MSIX)
 		free_msix_info(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 2471ff4..23d6c44 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -342,6 +342,7 @@
 {
 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
 
+	adap->sge.uld_rxq_info[uld_type] = NULL;
 	kfree(rxq_info->rspq_id);
 	kfree(rxq_info->uldrxq);
 	kfree(rxq_info);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 6fd3be6..ebeeb35 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -6185,13 +6185,18 @@
 	if (!t4_fw_matches_chip(adap, fw_hdr))
 		return -EINVAL;
 
+	/* Disable FW_OK flag so that mbox commands with FW_OK flag set
+	 * wont be sent when we are flashing FW.
+	 */
+	adap->flags &= ~FW_OK;
+
 	ret = t4_fw_halt(adap, mbox, force);
 	if (ret < 0 && !force)
-		return ret;
+		goto out;
 
 	ret = t4_load_fw(adap, fw_data, size);
 	if (ret < 0)
-		return ret;
+		goto out;
 
 	/*
 	 * Older versions of the firmware don't understand the new
@@ -6202,7 +6207,17 @@
 	 * its header flags to see if it advertises the capability.
 	 */
 	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
-	return t4_fw_restart(adap, mbox, reset);
+	ret = t4_fw_restart(adap, mbox, reset);
+
+	/* Grab potentially new Firmware Device Log parameters so we can see
+	 * how healthy the new Firmware is.  It's okay to contact the new
+	 * Firmware for these parameters even though, as far as it's
+	 * concerned, we've never said "HELLO" to it ...
+	 */
+	(void)t4_init_devlog_params(adap);
+out:
+	adap->flags |= FW_OK;
+	return ret;
 }
 
 /**
@@ -8073,7 +8088,16 @@
 		ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
 		if (ret)
 			break;
-		idx = (idx + 1) & UPDBGLARDPTR_M;
+
+		/* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
+		 * identify the 32-bit portion of the full 312-bit data
+		 */
+		if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
+			idx = (idx & 0xff0) + 0x10;
+		else
+			idx++;
+		/* address can't exceed 0xfff */
+		idx &= UPDBGLARDPTR_M;
 	}
 restart:
 	if (cfg & UPDBGLAEN_F) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f3ed9ce..9d64e8e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2616,8 +2616,8 @@
 int t4vf_sge_init(struct adapter *adapter)
 {
 	struct sge_params *sge_params = &adapter->params.sge;
-	u32 fl0 = sge_params->sge_fl_buffer_size[0];
-	u32 fl1 = sge_params->sge_fl_buffer_size[1];
+	u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+	u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
 	struct sge *s = &adapter->sge;
 
 	/*
@@ -2625,9 +2625,20 @@
 	 * the Physical Function Driver.  Ideally we should be able to deal
 	 * with _any_ configuration.  Practice is different ...
 	 */
-	if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+	/* We only bother using the Large Page logic if the Large Page Buffer
+	 * is larger than our Page Size Buffer.
+	 */
+	if (fl_large_pg <= fl_small_pg)
+		fl_large_pg = 0;
+
+	/* The Page Size Buffer must be exactly equal to our Page Size and the
+	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
+	 */
+	if (fl_small_pg != PAGE_SIZE ||
+	    (fl_large_pg & (fl_large_pg - 1)) != 0) {
 		dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
-			fl0, fl1);
+			fl_small_pg, fl_large_pg);
 		return -EINVAL;
 	}
 	if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
@@ -2639,8 +2650,8 @@
 	/*
 	 * Now translate the adapter parameters into our internal forms.
 	 */
-	if (fl1)
-		s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+	if (fl_large_pg)
+		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
 	s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
 			? 128 : 64);
 	s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 48f82ab..dda63b2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1726,6 +1726,8 @@
 	}
 
 	for (i = 0; i < enic->rq_count; i++) {
+		/* enable rq before updating rq desc */
+		vnic_rq_enable(&enic->rq[i]);
 		vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
 		/* Need at least one buffer on ring to get going */
 		if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
@@ -1737,8 +1739,6 @@
 
 	for (i = 0; i < enic->wq_count; i++)
 		vnic_wq_enable(&enic->wq[i]);
-	for (i = 0; i < enic->rq_count; i++)
-		vnic_rq_enable(&enic->rq[i]);
 
 	if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
 		enic_dev_add_station_addr(enic);
@@ -1765,8 +1765,12 @@
 	return 0;
 
 err_out_free_rq:
-	for (i = 0; i < enic->rq_count; i++)
+	for (i = 0; i < enic->rq_count; i++) {
+		err = vnic_rq_disable(&enic->rq[i]);
+		if (err)
+			return err;
 		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+	}
 	enic_dev_notify_unset(enic);
 err_out_free_intr:
 	enic_unset_affinity_hint(enic);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 05e5b38..fe00f71 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2371,6 +2371,10 @@
 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
 {
 }
+
+static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+}
 #endif /* !defined(CONFIG_M5272) */
 
 static int fec_enet_nway_reset(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index c88918c..641b916 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1036,7 +1036,7 @@
 	set_bucket(dtsec->regs, bucket, true);
 
 	/* Create element to be added to the driver hash table */
-	hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+	hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
 	if (!hash_entry)
 		return -ENOMEM;
 	hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 446c7b3..a10de1e 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -381,7 +381,7 @@
 {
 	const struct of_device_id *id =
 		of_match_device(fsl_pq_mdio_match, &pdev->dev);
-	const struct fsl_pq_mdio_data *data = id->data;
+	const struct fsl_pq_mdio_data *data;
 	struct device_node *np = pdev->dev.of_node;
 	struct resource res;
 	struct device_node *tbi;
@@ -389,6 +389,13 @@
 	struct mii_bus *new_bus;
 	int err;
 
+	if (!id) {
+		dev_err(&pdev->dev, "Failed to match device\n");
+		return -ENODEV;
+	}
+
+	data = id->data;
+
 	dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
 
 	new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e3b41ba..60bd1b3 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2935,7 +2935,7 @@
 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
 			     struct sk_buff *skb, bool first)
 {
-	unsigned int size = lstatus & BD_LENGTH_MASK;
+	int size = lstatus & BD_LENGTH_MASK;
 	struct page *page = rxb->page;
 	bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
 
@@ -2950,11 +2950,16 @@
 		if (last)
 			size -= skb->len;
 
-		/* in case the last fragment consisted only of the FCS */
+		/* Add the last fragment if it contains something other than
+		 * the FCS, otherwise drop it and trim off any part of the FCS
+		 * that was already received.
+		 */
 		if (size > 0)
 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
 					rxb->page_offset + RXBUF_ALIGNMENT,
 					size, GFAR_RXB_TRUESIZE);
+		else if (size < 0)
+			pskb_trim(skb, skb->len + size);
 	}
 
 	/* try reuse page */
@@ -3070,9 +3075,6 @@
 	if (ndev->features & NETIF_F_RXCSUM)
 		gfar_rx_checksum(skb, fcb);
 
-	/* Tell the skb what kind of packet this is */
-	skb->protocol = eth_type_trans(skb, ndev);
-
 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
 	 * Even if vlan rx accel is disabled, on some chips
 	 * RXFCB_VLN is pseudo randomly set.
@@ -3143,13 +3145,15 @@
 			continue;
 		}
 
+		gfar_process_frame(ndev, skb);
+
 		/* Increment the number of packets */
 		total_pkts++;
 		total_bytes += skb->len;
 
 		skb_record_rx_queue(skb, rx_queue->qindex);
 
-		gfar_process_frame(ndev, skb);
+		skb->protocol = eth_type_trans(skb, ndev);
 
 		/* Send the packet up the stack */
 		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 34b5e69..02a03bc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -671,7 +671,7 @@
 
 static int hns_gmac_get_sset_count(int stringset)
 {
-	if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+	if (stringset == ETH_SS_STATS)
 		return ARRAY_SIZE(g_gmac_stats_string);
 
 	return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 4ecb809..6ea8722 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -422,7 +422,7 @@
 
 int hns_ppe_get_sset_count(int stringset)
 {
-	if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+	if (stringset == ETH_SS_STATS)
 		return ETH_PPE_STATIC_NUM;
 	return 0;
 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index fbbbbff..f3be9ac 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -798,7 +798,7 @@
  */
 int hns_rcb_get_ring_sset_count(int stringset)
 {
-	if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+	if (stringset == ETH_SS_STATS)
 		return HNS_RING_STATIC_REG_NUM;
 
 	return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 86a496d..6be0cae 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1017,8 +1017,10 @@
 			cnt--;
 
 		return cnt;
-	} else {
+	} else if (stringset == ETH_SS_STATS) {
 		return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+	} else {
+		return -EOPNOTSUPP;
 	}
 }
 
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 8f13919..5977b69 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -342,6 +342,7 @@
 {
 	struct emac_regs __iomem *p = dev->emacp;
 	int n = 20;
+	bool __maybe_unused try_internal_clock = false;
 
 	DBG(dev, "reset" NL);
 
@@ -354,6 +355,7 @@
 	}
 
 #ifdef CONFIG_PPC_DCR_NATIVE
+do_retry:
 	/*
 	 * PPC460EX/GT Embedded Processor Advanced User's Manual
 	 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -361,10 +363,19 @@
 	 * of the EMAC. If none is present, select the internal clock
 	 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
 	 * After a soft reset, select the external clock.
+	 *
+	 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
+	 * ethernet cable is not attached. This causes the reset to timeout
+	 * and the PHY detection code in emac_init_phy() is unable to
+	 * communicate and detect the AR8035-A PHY. As a result, the emac
+	 * driver bails out early and the user has no ethernet.
+	 * In order to stay compatible with existing configurations, the
+	 * driver will temporarily switch to the internal clock, after
+	 * the first reset fails.
 	 */
 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
-		if (dev->phy_address == 0xffffffff &&
-		    dev->phy_map == 0xffffffff) {
+		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+					   dev->phy_map == 0xffffffff)) {
 			/* No PHY: select internal loop clock before reset */
 			dcri_clrset(SDR0, SDR0_ETH_CFG,
 				    0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -382,8 +393,15 @@
 
 #ifdef CONFIG_PPC_DCR_NATIVE
 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
-		if (dev->phy_address == 0xffffffff &&
-		    dev->phy_map == 0xffffffff) {
+		if (!n && !try_internal_clock) {
+			/* first attempt has timed out. */
+			n = 20;
+			try_internal_clock = true;
+			goto do_retry;
+		}
+
+		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+					   dev->phy_map == 0xffffffff)) {
 			/* No PHY: restore external clock source after reset */
 			dcri_clrset(SDR0, SDR0_ETH_CFG,
 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 49094c9..897a87a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -994,6 +994,7 @@
 			netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
 			/* free the entry */
 			next->rx_comp.first = 0;
+			dev_kfree_skb_any(rx_buff->skb);
 			remove_buff_from_pool(adapter, rx_buff);
 			break;
 		}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 8a48656..7ddac95 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1600,7 +1600,7 @@
 	 * we have already determined whether we have link or not.
 	 */
 	if (!mac->autoneg)
-		return -E1000_ERR_CONFIG;
+		return 1;
 
 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
 	 * of MAC speed/duplex configuration.  So we only need to
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index f457c57..db73564 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -450,7 +450,7 @@
 	 * we have already determined whether we have link or not.
 	 */
 	if (!mac->autoneg)
-		return -E1000_ERR_CONFIG;
+		return 1;
 
 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
 	 * of MAC speed/duplex configuration.  So we only need to
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 528a926..9c95222 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1182,6 +1182,7 @@
 	struct e1000_hw *hw = &adapter->hw;
 
 	if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+		struct sk_buff *skb = adapter->tx_hwtstamp_skb;
 		struct skb_shared_hwtstamps shhwtstamps;
 		u64 txstmp;
 
@@ -1190,9 +1191,14 @@
 
 		e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
 
-		skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
-		dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+		/* Clear the global tx_hwtstamp_skb pointer and force writes
+		 * prior to notifying the stack of a Tx timestamp.
+		 */
 		adapter->tx_hwtstamp_skb = NULL;
+		wmb(); /* force write prior to skb_tstamp_tx */
+
+		skb_tstamp_tx(skb, &shhwtstamps);
+		dev_kfree_skb_any(skb);
 	} else if (time_after(jiffies, adapter->tx_hwtstamp_start
 			      + adapter->tx_timeout_factor * HZ)) {
 		dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
@@ -2325,8 +2331,8 @@
 {
 	struct pci_dev *pdev = adapter->pdev;
 
-	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
-					GFP_KERNEL);
+	ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
+					 GFP_KERNEL);
 	if (!ring->desc)
 		return -ENOMEM;
 
@@ -6645,12 +6651,17 @@
 static int e1000e_pm_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
+	int rc;
 
 	e1000e_flush_lpic(pdev);
 
 	e1000e_pm_freeze(dev);
 
-	return __e1000_shutdown(pdev, false);
+	rc = __e1000_shutdown(pdev, false);
+	if (rc)
+		e1000e_pm_thaw(dev);
+
+	return rc;
 }
 
 static int e1000e_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 0562938..ea5ea65 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -803,8 +803,12 @@
 	if (vid >= VLAN_N_VID)
 		return -EINVAL;
 
-	/* Verify we have permission to add VLANs */
-	if (hw->mac.vlan_override)
+	/* Verify that we have permission to add VLANs. If this is a request
+	 * to remove a VLAN, we still want to allow the user to remove the
+	 * VLAN device. In that case, we need to clear the bit in the
+	 * active_vlans bitmask.
+	 */
+	if (set && hw->mac.vlan_override)
 		return -EACCES;
 
 	/* update active_vlans bitmask */
@@ -823,6 +827,12 @@
 			rx_ring->vid &= ~FM10K_VLAN_CLEAR;
 	}
 
+	/* If our VLAN has been overridden, there is no reason to send VLAN
+	 * removal requests as they will be silently ignored.
+	 */
+	if (hw->mac.vlan_override)
+		return 0;
+
 	/* Do not remove default VLAN ID related entries from VLAN and MAC
 	 * tables
 	 */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index ddf478d..614f93e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -154,6 +154,7 @@
 	adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
 	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
 	caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
 	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
 	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
 	       I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index a7895c4..9eb9b68 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -721,6 +721,7 @@
  **/
 static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
 {
+	struct sk_buff *skb = adapter->ptp_tx_skb;
 	struct e1000_hw *hw = &adapter->hw;
 	struct skb_shared_hwtstamps shhwtstamps;
 	u64 regval;
@@ -748,10 +749,17 @@
 	shhwtstamps.hwtstamp =
 		ktime_add_ns(shhwtstamps.hwtstamp, adjust);
 
-	skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
-	dev_kfree_skb_any(adapter->ptp_tx_skb);
+	/* Clear the lock early before calling skb_tstamp_tx so that
+	 * applications are not woken up before the lock bit is clear. We use
+	 * a copy of the skb pointer to ensure other threads can't change it
+	 * while we're notifying the stack.
+	 */
 	adapter->ptp_tx_skb = NULL;
 	clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
+
+	/* Notify the stack and free the skb after we've unlocked */
+	skb_tstamp_tx(skb, &shhwtstamps);
+	dev_kfree_skb_any(skb);
 }
 
 /**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fa46326..17b8178 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1080,6 +1080,7 @@
 	}
 	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
 
+	q_map = 0;
 	/* Enable all initialized RXQs. */
 	for (queue = 0; queue < rxq_number; queue++) {
 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 941c8e2..93ab0b3 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5079,7 +5079,7 @@
 	INIT_WORK(&hw->restart_work, sky2_restart);
 
 	pci_set_drvdata(pdev, hw);
-	pdev->d3_delay = 150;
+	pdev->d3_delay = 200;
 
 	return 0;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index b04760a..af2e6ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -156,57 +156,63 @@
 static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
 {
 	struct mlx4_en_priv *priv = netdev_priv(netdev);
+	struct mlx4_en_port_profile *prof = priv->prof;
 	struct mlx4_en_dev *mdev = priv->mdev;
+	u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
 
 	if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
 		return 1;
 
 	if (priv->cee_config.pfc_state) {
 		int tc;
+		rx_ppp = prof->rx_ppp;
+		tx_ppp = prof->tx_ppp;
 
-		priv->prof->rx_pause = 0;
-		priv->prof->tx_pause = 0;
 		for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
 			u8 tc_mask = 1 << tc;
 
 			switch (priv->cee_config.dcb_pfc[tc]) {
 			case pfc_disabled:
-				priv->prof->tx_ppp &= ~tc_mask;
-				priv->prof->rx_ppp &= ~tc_mask;
+				tx_ppp &= ~tc_mask;
+				rx_ppp &= ~tc_mask;
 				break;
 			case pfc_enabled_full:
-				priv->prof->tx_ppp |= tc_mask;
-				priv->prof->rx_ppp |= tc_mask;
+				tx_ppp |= tc_mask;
+				rx_ppp |= tc_mask;
 				break;
 			case pfc_enabled_tx:
-				priv->prof->tx_ppp |= tc_mask;
-				priv->prof->rx_ppp &= ~tc_mask;
+				tx_ppp |= tc_mask;
+				rx_ppp &= ~tc_mask;
 				break;
 			case pfc_enabled_rx:
-				priv->prof->tx_ppp &= ~tc_mask;
-				priv->prof->rx_ppp |= tc_mask;
+				tx_ppp &= ~tc_mask;
+				rx_ppp |= tc_mask;
 				break;
 			default:
 				break;
 			}
 		}
-		en_dbg(DRV, priv, "Set pfc on\n");
+		rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
+		tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
 	} else {
-		priv->prof->rx_pause = 1;
-		priv->prof->tx_pause = 1;
-		en_dbg(DRV, priv, "Set pfc off\n");
+		rx_ppp = 0;
+		tx_ppp = 0;
+		rx_pause = prof->rx_pause;
+		tx_pause = prof->tx_pause;
 	}
 
 	if (mlx4_SET_PORT_general(mdev->dev, priv->port,
 				  priv->rx_skb_size + ETH_FCS_LEN,
-				  priv->prof->tx_pause,
-				  priv->prof->tx_ppp,
-				  priv->prof->rx_pause,
-				  priv->prof->rx_ppp)) {
+				  tx_pause, tx_ppp, rx_pause, rx_ppp)) {
 		en_err(priv, "Failed setting pause params\n");
 		return 1;
 	}
 
+	prof->tx_ppp = tx_ppp;
+	prof->rx_ppp = rx_ppp;
+	prof->tx_pause = tx_pause;
+	prof->rx_pause = rx_pause;
+
 	return 0;
 }
 
@@ -310,6 +316,7 @@
 		}
 
 		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_VENDOR:
 		case IEEE_8021QAZ_TSA_STRICT:
 			break;
 		case IEEE_8021QAZ_TSA_ETS:
@@ -347,6 +354,10 @@
 	/* higher TC means higher priority => lower pg */
 	for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
 		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_VENDOR:
+			pg[i] = MLX4_EN_TC_VENDOR;
+			tc_tx_bw[i] = MLX4_EN_BW_MAX;
+			break;
 		case IEEE_8021QAZ_TSA_STRICT:
 			pg[i] = num_strict++;
 			tc_tx_bw[i] = MLX4_EN_BW_MAX;
@@ -403,6 +414,7 @@
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_port_profile *prof = priv->prof;
 	struct mlx4_en_dev *mdev = priv->mdev;
+	u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
 	int err;
 
 	en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
@@ -411,23 +423,26 @@
 			pfc->mbc,
 			pfc->delay);
 
-	prof->rx_pause = !pfc->pfc_en;
-	prof->tx_pause = !pfc->pfc_en;
-	prof->rx_ppp = pfc->pfc_en;
-	prof->tx_ppp = pfc->pfc_en;
+	rx_pause = prof->rx_pause && !pfc->pfc_en;
+	tx_pause = prof->tx_pause && !pfc->pfc_en;
+	rx_ppp = pfc->pfc_en;
+	tx_ppp = pfc->pfc_en;
 
 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
 				    priv->rx_skb_size + ETH_FCS_LEN,
-				    prof->tx_pause,
-				    prof->tx_ppp,
-				    prof->rx_pause,
-				    prof->rx_ppp);
-	if (err)
+				    tx_pause, tx_ppp, rx_pause, rx_ppp);
+	if (err) {
 		en_err(priv, "Failed setting pause params\n");
-	else
-		mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
-						prof->rx_ppp, prof->rx_pause,
-						prof->tx_ppp, prof->tx_pause);
+		return err;
+	}
+
+	mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+					rx_ppp, rx_pause, tx_ppp, tx_pause);
+
+	prof->tx_ppp = tx_ppp;
+	prof->rx_ppp = rx_ppp;
+	prof->rx_pause = rx_pause;
+	prof->tx_pause = tx_pause;
 
 	return err;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bdda17d..9a4c4f8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -970,6 +970,22 @@
 	if (!coal->tx_max_coalesced_frames_irq)
 		return -EINVAL;
 
+	if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+	    coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+	    coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
+	    coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
+		netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
+			    __func__, MLX4_EN_MAX_COAL_TIME);
+		return -ERANGE;
+	}
+
+	if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
+	    coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
+		netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
+			    __func__, MLX4_EN_MAX_COAL_PKTS);
+		return -ERANGE;
+	}
+
 	priv->rx_frames = (coal->rx_max_coalesced_frames ==
 			   MLX4_EN_AUTO_CONF) ?
 				MLX4_EN_RX_COAL_TARGET :
@@ -1003,27 +1019,32 @@
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
+	u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
 	int err;
 
 	if (pause->autoneg)
 		return -EINVAL;
 
-	priv->prof->tx_pause = pause->tx_pause != 0;
-	priv->prof->rx_pause = pause->rx_pause != 0;
+	tx_pause = !!(pause->tx_pause);
+	rx_pause = !!(pause->rx_pause);
+	rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
+	tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
+
 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
 				    priv->rx_skb_size + ETH_FCS_LEN,
-				    priv->prof->tx_pause,
-				    priv->prof->tx_ppp,
-				    priv->prof->rx_pause,
-				    priv->prof->rx_ppp);
-	if (err)
-		en_err(priv, "Failed setting pause params\n");
-	else
-		mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
-						priv->prof->rx_ppp,
-						priv->prof->rx_pause,
-						priv->prof->tx_ppp,
-						priv->prof->tx_pause);
+				    tx_pause, tx_ppp, rx_pause, rx_ppp);
+	if (err) {
+		en_err(priv, "Failed setting pause params, err = %d\n", err);
+		return err;
+	}
+
+	mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+					rx_ppp, rx_pause, tx_ppp, tx_pause);
+
+	priv->prof->tx_pause = tx_pause;
+	priv->prof->rx_pause = rx_pause;
+	priv->prof->tx_ppp = tx_ppp;
+	priv->prof->rx_ppp = rx_ppp;
 
 	return err;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index bf7628d..22c3fdd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -163,9 +163,9 @@
 		params->udp_rss = 0;
 	}
 	for (i = 1; i <= MLX4_MAX_PORTS; i++) {
-		params->prof[i].rx_pause = 1;
+		params->prof[i].rx_pause = !(pfcrx || pfctx);
 		params->prof[i].rx_ppp = pfcrx;
-		params->prof[i].tx_pause = 1;
+		params->prof[i].tx_pause = !(pfcrx || pfctx);
 		params->prof[i].tx_ppp = pfctx;
 		params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
 		params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index d223e7c..0160c93 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3125,6 +3125,13 @@
 	priv->msg_enable = MLX4_EN_MSG_LEVEL;
 #ifdef CONFIG_MLX4_EN_DCB
 	if (!mlx4_is_slave(priv->mdev->dev)) {
+		u8 prio;
+
+		for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
+			priv->ets.prio_tc[prio] = prio;
+			priv->ets.tc_tsa[prio]  = IEEE_8021QAZ_TSA_VENDOR;
+		}
+
 		priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
 			DCB_CAP_DCBX_VER_IEEE;
 		priv->flags |= MLX4_EN_DCB_ENABLED;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5411ca4..cb7c3ef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2983,6 +2983,7 @@
 		mlx4_err(dev, "Failed to create file for port %d\n", port);
 		devlink_port_unregister(&info->devlink_port);
 		info->port = -1;
+		return err;
 	}
 
 	sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
@@ -3004,9 +3005,10 @@
 				   &info->port_attr);
 		devlink_port_unregister(&info->devlink_port);
 		info->port = -1;
+		return err;
 	}
 
-	return err;
+	return 0;
 }
 
 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1a670b6..0710b36 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
 #include <linux/etherdevice.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 #include <linux/export.h>
 
 #include "mlx4.h"
@@ -985,16 +986,21 @@
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 
+	if (!mlx4_qp_lookup(dev, rule->qpn)) {
+		mlx4_err_rule(dev, "QP doesn't exist\n", rule);
+		ret = -EINVAL;
+		goto out;
+	}
+
 	trans_rule_ctrl_to_hw(rule, mailbox->buf);
 
 	size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
 
 	list_for_each_entry(cur, &rule->list, list) {
 		ret = parse_trans_rule(dev, cur, mailbox->buf + size);
-		if (ret < 0) {
-			mlx4_free_cmd_mailbox(dev, mailbox);
-			return ret;
-		}
+		if (ret < 0)
+			goto out;
+
 		size += ret;
 	}
 
@@ -1021,6 +1027,7 @@
 		}
 	}
 
+out:
 	mlx4_free_cmd_mailbox(dev, mailbox);
 
 	return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index df0f396..247d340 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -141,6 +141,9 @@
 #define MLX4_EN_TX_COAL_PKTS	16
 #define MLX4_EN_TX_COAL_TIME	0x10
 
+#define MLX4_EN_MAX_COAL_PKTS	U16_MAX
+#define MLX4_EN_MAX_COAL_TIME	U16_MAX
+
 #define MLX4_EN_RX_RATE_LOW		400000
 #define MLX4_EN_RX_COAL_TIME_LOW	0
 #define MLX4_EN_RX_RATE_HIGH		450000
@@ -472,6 +475,7 @@
 #define MLX4_EN_BW_MIN 1
 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
 
+#define MLX4_EN_TC_VENDOR 0
 #define MLX4_EN_TC_ETS 7
 
 enum dcb_pfc_type {
@@ -542,8 +546,8 @@
 	u16 rx_usecs_low;
 	u32 pkt_rate_high;
 	u16 rx_usecs_high;
-	u16 sample_interval;
-	u16 adaptive_rx_coal;
+	u32 sample_interval;
+	u32 adaptive_rx_coal;
 	u32 msg_enable;
 	u32 loopback_ok;
 	u32 validate_loopback;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 6143113..474ff36 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -387,6 +387,19 @@
 		__mlx4_qp_free_icm(dev, qpn);
 }
 
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+	struct mlx4_qp *qp;
+
+	spin_lock(&qp_table->lock);
+
+	qp = __mlx4_qp_lookup(dev, qpn);
+
+	spin_unlock(&qp_table->lock);
+	return qp;
+}
+
 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -474,6 +487,12 @@
 	}
 
 	if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+		if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
+			mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
+			err = -EOPNOTSUPP;
+			goto out;
+		}
+
 		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
 		cmd->qp_context.qos_vport = params->qos_vport;
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1822382..d6b06be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5048,6 +5048,7 @@
 						 &tracker->res_tree[RES_FS_RULE]);
 					list_del(&fs_rule->com.list);
 					spin_unlock_irq(mlx4_tlock(dev));
+					kfree(fs_rule->mirr_mbox);
 					kfree(fs_rule);
 					state = 0;
 					break;
@@ -5214,6 +5215,13 @@
 	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
 }
 
+static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
+			   struct mlx4_vf_immed_vlan_work *work)
+{
+	ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
+	ctx->qp_context.qos_vport = work->qos_vport;
+}
+
 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
 {
 	struct mlx4_vf_immed_vlan_work *work =
@@ -5328,11 +5336,10 @@
 					qp->sched_queue & 0xC7;
 				upd_context->qp_context.pri_path.sched_queue |=
 					((work->qos & 0x7) << 3);
-				upd_context->qp_mask |=
-					cpu_to_be64(1ULL <<
-						    MLX4_UPD_QP_MASK_QOS_VPP);
-				upd_context->qp_context.qos_vport =
-					work->qos_vport;
+
+				if (dev->caps.flags2 &
+				    MLX4_DEV_CAP_FLAG2_QOS_VPP)
+					update_qos_vpp(upd_context, work);
 			}
 
 			err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 4c3f1cb..6631fb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1765,7 +1765,7 @@
 
 	cmd->checksum_disabled = 1;
 	cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
-	cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
+	cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
 
 	cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
 	if (cmd->cmdif_rev > CMD_IF_REV) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 38981db..2d235e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2741,6 +2741,9 @@
 
 	mutex_unlock(&priv->state_lock);
 
+	if (mlx5e_vxlan_allowed(priv->mdev))
+		udp_tunnel_get_rx_info(netdev);
+
 	return err;
 }
 
@@ -3785,13 +3788,6 @@
 	if (netdev->reg_state != NETREG_REGISTERED)
 		return;
 
-	/* Device already registered: sync netdev system state */
-	if (mlx5e_vxlan_allowed(mdev)) {
-		rtnl_lock();
-		udp_tunnel_get_rx_info(netdev);
-		rtnl_unlock();
-	}
-
 	queue_work(priv->wq, &priv->set_rx_mode_work);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index a8966e6..5d6eab1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1924,26 +1924,35 @@
 	memset(vf_stats, 0, sizeof(*vf_stats));
 	vf_stats->rx_packets =
 		MLX5_GET_CTR(out, received_eth_unicast.packets) +
+		MLX5_GET_CTR(out, received_ib_unicast.packets) +
 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
+		MLX5_GET_CTR(out, received_ib_multicast.packets) +
 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
 
 	vf_stats->rx_bytes =
 		MLX5_GET_CTR(out, received_eth_unicast.octets) +
+		MLX5_GET_CTR(out, received_ib_unicast.octets) +
 		MLX5_GET_CTR(out, received_eth_multicast.octets) +
+		MLX5_GET_CTR(out, received_ib_multicast.octets) +
 		MLX5_GET_CTR(out, received_eth_broadcast.octets);
 
 	vf_stats->tx_packets =
 		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
+		MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
 		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
+		MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
 		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
 
 	vf_stats->tx_bytes =
 		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
+		MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
 		MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
+		MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
 		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
 
 	vf_stats->multicast =
-		MLX5_GET_CTR(out, received_eth_multicast.packets);
+		MLX5_GET_CTR(out, received_eth_multicast.packets) +
+		MLX5_GET_CTR(out, received_ib_multicast.packets);
 
 	vf_stats->broadcast =
 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 331a6ca..5f3402b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -153,6 +153,7 @@
 static void del_flow_table(struct fs_node *node);
 static void del_flow_group(struct fs_node *node);
 static void del_fte(struct fs_node *node);
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
 
 static void tree_init_node(struct fs_node *node,
 			   unsigned int refcount,
@@ -1690,24 +1691,28 @@
 
 static int init_root_ns(struct mlx5_flow_steering *steering)
 {
+	int err;
 
 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
 	if (!steering->root_ns)
-		goto cleanup;
+		return -ENOMEM;
 
-	if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
-		goto cleanup;
+	err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
+	if (err)
+		goto out_err;
 
 	set_prio_attrs(steering->root_ns);
 
-	if (create_anchor_flow_table(steering))
-		goto cleanup;
+	err = create_anchor_flow_table(steering);
+	if (err)
+		goto out_err;
 
 	return 0;
 
-cleanup:
-	mlx5_cleanup_fs(steering->dev);
-	return -ENOMEM;
+out_err:
+	cleanup_root_ns(steering->root_ns);
+	steering->root_ns = NULL;
+	return err;
 }
 
 static void clean_tree(struct fs_node *node)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 981cd1d..3c183b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -548,7 +548,6 @@
 	struct mlx5_priv *priv  = &mdev->priv;
 	struct msix_entry *msix = priv->msix_arr;
 	int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-	int err;
 
 	if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
 		mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@@ -558,18 +557,11 @@
 	cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
 			priv->irq_info[i].mask);
 
-	err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
-	if (err) {
-		mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
-			       irq);
-		goto err_clear_mask;
-	}
+	if (IS_ENABLED(CONFIG_SMP) &&
+	    irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+		mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
 
 	return 0;
-
-err_clear_mask:
-	free_cpumask_var(priv->irq_info[i].mask);
-	return err;
 }
 
 static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index bea9ae3..60e1edc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1448,8 +1448,7 @@
 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
 				      adding, true);
 	if (err) {
-		if (net_ratelimit())
-			netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
+		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
 		return;
 	}
 
@@ -1509,8 +1508,7 @@
 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
 					  adding, true);
 	if (err) {
-		if (net_ratelimit())
-			netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
+		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
 		return;
 	}
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 4ca82bd..eee6e59 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -854,6 +854,8 @@
 
 	netdev_tx_sent_queue(nd_q, txbuf->real_len);
 
+	skb_tx_timestamp(skb);
+
 	tx_ring->wr_p += nr_frags + 1;
 	if (nfp_net_tx_ring_should_stop(tx_ring))
 		nfp_net_tx_ring_stop(nd_q, tx_ring);
@@ -866,8 +868,6 @@
 		tx_ring->wr_ptr_add = 0;
 	}
 
-	skb_tx_timestamp(skb);
-
 	return NETDEV_TX_OK;
 
 err_unmap:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index b8d5270..e306765 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -247,7 +247,7 @@
 	cmd.req.arg3 = 0;
 
 	if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
-		netxen_issue_cmd(adapter, &cmd);
+		rcode = netxen_issue_cmd(adapter, &cmd);
 
 	if (rcode != NX_RCODE_SUCCESS)
 		return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index afe5e57..d023137 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -850,7 +850,7 @@
 						   NULL) +
 		       qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
 						   NULL);
-	norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
+	norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE);
 	min_addr_reg1 = norm_regsize / 4096;
 	pwm_regsize = db_bar_size - norm_regsize;
 
@@ -1628,6 +1628,9 @@
 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
 	}
 
+	p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
+		link->speed.autoneg;
+
 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
 	link->pause.autoneg = !!(link_temp &
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index dba3fbe..0b949c6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1240,7 +1240,7 @@
 
 	/* TODO - at the moment assume supported and advertised speed equal */
 	if_link->supported_caps = QED_LM_FIBRE_BIT;
-	if (params.speed.autoneg)
+	if (link_caps.default_speed_autoneg)
 		if_link->supported_caps |= QED_LM_Autoneg_BIT;
 	if (params.pause.autoneg ||
 	    (params.pause.forced_rx && params.pause.forced_tx))
@@ -1250,6 +1250,10 @@
 		if_link->supported_caps |= QED_LM_Pause_BIT;
 
 	if_link->advertised_caps = if_link->supported_caps;
+	if (params.speed.autoneg)
+		if_link->advertised_caps |= QED_LM_Autoneg_BIT;
+	else
+		if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
 	if (params.speed.advertised_speeds &
 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
 		if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index dff520e..7b7a84d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -35,6 +35,7 @@
 
 struct qed_mcp_link_capabilities {
 	u32 speed_capabilities;
+	bool default_speed_autoneg;
 };
 
 struct qed_mcp_link_state {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 509b596..bd1ec70 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@
 			}
 			return -EIO;
 		}
-		usleep_range(1000, 1500);
+		udelay(1200);
 	}
 
 	if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index be258d9..e3223f2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -765,7 +765,7 @@
 		sizeof(struct mpi_coredump_global_header);
 	mpi_coredump->mpi_global_header.imageSize =
 		sizeof(struct ql_mpi_coredump);
-	memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+	strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
 		sizeof(mpi_coredump->mpi_global_header.idString));
 
 	/* Get generic NIC reg dump */
@@ -1255,7 +1255,7 @@
 		sizeof(struct mpi_coredump_global_header);
 	mpi_coredump->mpi_global_header.imageSize =
 		sizeof(struct ql_reg_dump);
-	memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+	strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
 		sizeof(mpi_coredump->mpi_global_header.idString));
 
 
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index f683bfb..9d223ff 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1250,9 +1250,9 @@
 	while (tx_q->tpd.consume_idx != hw_consume_idx) {
 		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
 		if (tpbuf->dma_addr) {
-			dma_unmap_single(adpt->netdev->dev.parent,
-					 tpbuf->dma_addr, tpbuf->length,
-					 DMA_TO_DEVICE);
+			dma_unmap_page(adpt->netdev->dev.parent,
+				       tpbuf->dma_addr, tpbuf->length,
+				       DMA_TO_DEVICE);
 			tpbuf->dma_addr = 0;
 		}
 
@@ -1409,9 +1409,11 @@
 
 		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
 		tpbuf->length = mapped_len;
-		tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
-						 skb->data, tpbuf->length,
-						 DMA_TO_DEVICE);
+		tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+					       virt_to_page(skb->data),
+					       offset_in_page(skb->data),
+					       tpbuf->length,
+					       DMA_TO_DEVICE);
 		ret = dma_mapping_error(adpt->netdev->dev.parent,
 					tpbuf->dma_addr);
 		if (ret)
@@ -1427,9 +1429,12 @@
 	if (mapped_len < len) {
 		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
 		tpbuf->length = len - mapped_len;
-		tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
-						 skb->data + mapped_len,
-						 tpbuf->length, DMA_TO_DEVICE);
+		tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+					       virt_to_page(skb->data +
+							    mapped_len),
+					       offset_in_page(skb->data +
+							      mapped_len),
+					       tpbuf->length, DMA_TO_DEVICE);
 		ret = dma_mapping_error(adpt->netdev->dev.parent,
 					tpbuf->dma_addr);
 		if (ret)
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 6e2add9..8bbb55f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -296,8 +296,9 @@
 
 	/* Allocate rx SKB if we don't have one available. */
 	if (!qca->rx_skb) {
-		qca->rx_skb = netdev_alloc_skb(net_dev,
-					       net_dev->mtu + VLAN_ETH_HLEN);
+		qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
+							net_dev->mtu +
+							VLAN_ETH_HLEN);
 		if (!qca->rx_skb) {
 			netdev_dbg(net_dev, "out of RX resources\n");
 			qca->stats.out_of_mem++;
@@ -377,7 +378,7 @@
 					qca->rx_skb, qca->rx_skb->dev);
 				qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
 				netif_rx_ni(qca->rx_skb);
-				qca->rx_skb = netdev_alloc_skb(net_dev,
+				qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
 					net_dev->mtu + VLAN_ETH_HLEN);
 				if (!qca->rx_skb) {
 					netdev_dbg(net_dev, "out of RX resources\n");
@@ -759,7 +760,8 @@
 	if (!qca->rx_buffer)
 		return -ENOBUFS;
 
-	qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN);
+	qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
+						VLAN_ETH_HLEN);
 	if (!qca->rx_skb) {
 		kfree(qca->rx_buffer);
 		netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index da4c2d8..1420dfb 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2233,7 +2233,7 @@
 	struct rtl8139_private *tp = netdev_priv(dev);
 	const int irq = tp->pci_dev->irq;
 
-	disable_irq(irq);
+	disable_irq_nosync(irq);
 	rtl8139_interrupt(irq, dev);
 	enable_irq(irq);
 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 18e68c9..59b932d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4861,6 +4861,9 @@
 static void rtl_pll_power_up(struct rtl8169_private *tp)
 {
 	rtl_generic_op(tp, tp->pll_power_ops.up);
+
+	/* give MAC/PHY some time to resume */
+	msleep(20);
 }
 
 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
@@ -8446,12 +8449,12 @@
 		goto err_out_msi_5;
 	}
 
+	pci_set_drvdata(pdev, dev);
+
 	rc = register_netdev(dev);
 	if (rc < 0)
 		goto err_out_cnt_6;
 
-	pci_set_drvdata(pdev, dev);
-
 	netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
 		   rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
 		   (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b6816ae..c8fd99b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3133,7 +3133,7 @@
 	/* MDIO bus init */
 	ret = sh_mdio_init(mdp, pd);
 	if (ret) {
-		dev_err(&ndev->dev, "failed to initialise MDIO\n");
+		dev_err(&pdev->dev, "failed to initialise MDIO\n");
 		goto out_release;
 	}
 
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8b0016a..734caa7 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2330,14 +2330,14 @@
 	pdata = netdev_priv(dev);
 	BUG_ON(!pdata);
 	BUG_ON(!pdata->ioaddr);
-	WARN_ON(dev->phydev);
 
 	SMSC_TRACE(pdata, ifdown, "Stopping driver");
 
+	unregister_netdev(dev);
+
 	mdiobus_unregister(pdata->mii_bus);
 	mdiobus_free(pdata->mii_bus);
 
-	unregister_netdev(dev);
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 					   "smsc911x-memory");
 	if (!res)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index ffaed1f..f356a44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -118,7 +118,7 @@
 	snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
 	init.name = clk_name;
 	init.ops = &clk_mux_ops;
-	init.flags = 0;
+	init.flags = CLK_SET_RATE_PARENT;
 	init.parent_names = mux_parent_names;
 	init.num_parents = MUX_CLK_NUM_PARENTS;
 
@@ -146,7 +146,9 @@
 	dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
 	dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
 	dwmac->m250_div.hw.init = &init;
-	dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+	dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
+				CLK_DIVIDER_ALLOW_ZERO |
+				CLK_DIVIDER_ROUND_CLOSEST;
 
 	dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
 	if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 98bbb91..b3bc128 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -478,7 +478,10 @@
 			/* PTP v1, UDP, any kind of event packet */
 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 			/* take time stamp for all event messages */
-			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+			if (priv->plat->has_gmac4)
+				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+			else
+				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 
 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -510,7 +513,10 @@
 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 			ptp_v2 = PTP_TCR_TSVER2ENA;
 			/* take time stamp for all event messages */
-			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+			if (priv->plat->has_gmac4)
+				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+			else
+				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 
 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -544,7 +550,10 @@
 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 			ptp_v2 = PTP_TCR_TSVER2ENA;
 			/* take time stamp for all event messages */
-			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+			if (priv->plat->has_gmac4)
+				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+			else
+				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 
 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -1334,6 +1343,11 @@
 		if (unlikely(status & tx_dma_own))
 			break;
 
+		/* Make sure descriptor fields are read after reading
+		 * the own bit.
+		 */
+		dma_rmb();
+
 		/* Just consider the last segment and ...*/
 		if (likely(!(status & tx_not_ls))) {
 			/* ... verify the status error condition */
@@ -2127,8 +2141,15 @@
 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
 
 	/* If context desc is used to change MSS */
-	if (mss_desc)
+	if (mss_desc) {
+		/* Make sure that first descriptor has been completely
+		 * written, including its own bit. This is because MSS is
+		 * actually before first descriptor, so we need to make
+		 * sure that MSS's own bit is the last thing written.
+		 */
+		dma_wmb();
 		priv->hw->desc->set_tx_owner(mss_desc);
+	}
 
 	/* The own bit must be the latest setting done when prepare the
 	 * descriptor and then barrier is needed to make sure that
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index c06938c..174777c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -63,7 +63,8 @@
 /* Enable Snapshot for Messages Relevant to Master */
 #define	PTP_TCR_TSMSTRENA	BIT(15)
 /* Select PTP packets for Taking Snapshots */
-#define	PTP_TCR_SNAPTYPSEL_1	GENMASK(17, 16)
+#define	PTP_TCR_SNAPTYPSEL_1	BIT(16)
+#define	PTP_GMAC4_TCR_SNAPTYPSEL_1	GENMASK(17, 16)
 /* Enable MAC address for PTP Frame Filtering */
 #define	PTP_TCR_TSENMACADDR	BIT(18)
 
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a2371aa..e45e2f1 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3442,7 +3442,7 @@
 
 		len = (val & RCR_ENTRY_L2_LEN) >>
 			RCR_ENTRY_L2_LEN_SHIFT;
-		len -= ETH_FCS_LEN;
+		append_size = len + ETH_HLEN + ETH_FCS_LEN;
 
 		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
@@ -3452,7 +3452,6 @@
 					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 
 		off = addr & ~PAGE_MASK;
-		append_size = rcr_size;
 		if (num_rcr == 1) {
 			int ptype;
 
@@ -3465,7 +3464,7 @@
 			else
 				skb_checksum_none_assert(skb);
 		} else if (!(val & RCR_ENTRY_MULTI))
-			append_size = len - skb->len;
+			append_size = append_size - skb->len;
 
 		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
 		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index a2f9b47..e36c700 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -198,7 +198,7 @@
 	dev->ethtool_ops = &vnet_ethtool_ops;
 	dev->watchdog_timeo = VNET_TX_TIMEOUT;
 
-	dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
+	dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
 			   NETIF_F_HW_CSUM | NETIF_F_SG;
 	dev->features = dev->hw_features;
 
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 2bd1282..d7cb205 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -124,7 +124,7 @@
 
 #define RX_PRIORITY_MAPPING	0x76543210
 #define TX_PRIORITY_MAPPING	0x33221100
-#define CPDMA_TX_PRIORITY_MAP	0x01234567
+#define CPDMA_TX_PRIORITY_MAP	0x76543210
 
 #define CPSW_VLAN_AWARE		BIT(1)
 #define CPSW_ALE_VLAN_AWARE	1
@@ -282,6 +282,10 @@
 /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
 #define CPSW_V1_SEQ_ID_OFS_SHIFT	16
 
+#define CPSW_MAX_BLKS_TX		15
+#define CPSW_MAX_BLKS_TX_SHIFT		4
+#define CPSW_MAX_BLKS_RX		5
+
 struct cpsw_host_regs {
 	u32	max_blks;
 	u32	blk_cnt;
@@ -1137,6 +1141,8 @@
 	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 			   HOST_PORT_NUM, ALE_VLAN |
 			   ALE_SECURE, slave->port_vlan);
+	cpsw_ale_control_set(cpsw->ale, slave_port,
+			     ALE_PORT_DROP_UNKNOWN_VLAN, 1);
 }
 
 static void soft_reset_slave(struct cpsw_slave *slave)
@@ -1160,11 +1166,23 @@
 	switch (cpsw->version) {
 	case CPSW_VERSION_1:
 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+		/* Increase RX FIFO size to 5 for supporting fullduplex
+		 * flow control mode
+		 */
+		slave_write(slave,
+			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+			    CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
 		break;
 	case CPSW_VERSION_2:
 	case CPSW_VERSION_3:
 	case CPSW_VERSION_4:
 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+		/* Increase RX FIFO size to 5 for supporting fullduplex
+		 * flow control mode
+		 */
+		slave_write(slave,
+			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+			    CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
 		break;
 	}
 
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 3c1f89a..92ad43e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -209,6 +209,7 @@
 	struct genevehdr *gnvh = geneve_hdr(skb);
 	struct metadata_dst *tun_dst = NULL;
 	struct pcpu_sw_netstats *stats;
+	unsigned int len;
 	int err = 0;
 	void *oiph;
 
@@ -222,8 +223,10 @@
 		tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
 					 vni_to_tunnel_id(gnvh->vni),
 					 gnvh->opt_len * 4);
-		if (!tun_dst)
+		if (!tun_dst) {
+			geneve->dev->stats.rx_dropped++;
 			goto drop;
+		}
 		/* Update tunnel dst according to Geneve options. */
 		ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
 					gnvh->options, gnvh->opt_len * 4);
@@ -231,8 +234,11 @@
 		/* Drop packets w/ critical options,
 		 * since we don't support any...
 		 */
-		if (gnvh->critical)
+		if (gnvh->critical) {
+			geneve->dev->stats.rx_frame_errors++;
+			geneve->dev->stats.rx_errors++;
 			goto drop;
+		}
 	}
 
 	skb_reset_mac_header(skb);
@@ -243,8 +249,10 @@
 		skb_dst_set(skb, &tun_dst->dst);
 
 	/* Ignore packet loops (and multicast echo) */
-	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
+	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
+		geneve->dev->stats.rx_errors++;
 		goto drop;
+	}
 
 	oiph = skb_network_header(skb);
 	skb_reset_network_header(skb);
@@ -276,13 +284,15 @@
 		}
 	}
 
-	stats = this_cpu_ptr(geneve->dev->tstats);
-	u64_stats_update_begin(&stats->syncp);
-	stats->rx_packets++;
-	stats->rx_bytes += skb->len;
-	u64_stats_update_end(&stats->syncp);
-
-	gro_cells_receive(&geneve->gro_cells, skb);
+	len = skb->len;
+	err = gro_cells_receive(&geneve->gro_cells, skb);
+	if (likely(err == NET_RX_SUCCESS)) {
+		stats = this_cpu_ptr(geneve->dev->tstats);
+		u64_stats_update_begin(&stats->syncp);
+		stats->rx_packets++;
+		stats->rx_bytes += len;
+		u64_stats_update_end(&stats->syncp);
+	}
 	return;
 drop:
 	/* Consume bad packet */
@@ -332,7 +342,7 @@
 	struct geneve_sock *gs;
 	int opts_len;
 
-	/* Need Geneve and inner Ethernet header to be present */
+	/* Need UDP and Geneve header to be present */
 	if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
 		goto drop;
 
@@ -355,8 +365,10 @@
 	opts_len = geneveh->opt_len * 4;
 	if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
 				 htons(ETH_P_TEB),
-				 !net_eq(geneve->net, dev_net(geneve->dev))))
+				 !net_eq(geneve->net, dev_net(geneve->dev)))) {
+		geneve->dev->stats.rx_dropped++;
 		goto drop;
+	}
 
 	geneve_rx(geneve, gs, skb);
 	return 0;
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 4bad0b8..27160d1 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -576,6 +576,8 @@
 	case HDLCDRVCTL_CALIBRATE:
 		if(!capable(CAP_SYS_RAWIO))
 			return -EPERM;
+		if (s->par.bitrate <= 0)
+			return -EINVAL;
 		if (bi.data.calibrate > INT_MAX / s->par.bitrate)
 			return -EINVAL;
 		s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 2caac0c..365a48c 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -742,7 +742,12 @@
 	macsec_fill_iv(iv, secy->sci, pn);
 
 	sg_init_table(sg, ret);
-	skb_to_sgvec(skb, sg, 0, skb->len);
+	ret = skb_to_sgvec(skb, sg, 0, skb->len);
+	if (unlikely(ret < 0)) {
+		macsec_txsa_put(tx_sa);
+		kfree_skb(skb);
+		return ERR_PTR(ret);
+	}
 
 	if (tx_sc->encrypt) {
 		int len = skb->len - macsec_hdr_len(sci_present) -
@@ -949,7 +954,11 @@
 	macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
 
 	sg_init_table(sg, ret);
-	skb_to_sgvec(skb, sg, 0, skb->len);
+	ret = skb_to_sgvec(skb, sg, 0, skb->len);
+	if (unlikely(ret < 0)) {
+		kfree_skb(skb);
+		return ERR_PTR(ret);
+	}
 
 	if (hdr->tci_an & MACSEC_TCI_E) {
 		/* confidentiality: ethernet + macsec header
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index e8ad4d0..6237236 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1384,7 +1384,7 @@
 	/* the macvlan port may be freed by macvlan_uninit when fail to register.
 	 * so we destroy the macvlan port only when it's valid.
 	 */
-	if (create && macvlan_port_get_rtnl(dev))
+	if (create && macvlan_port_get_rtnl(lowerdev))
 		macvlan_port_destroy(port->dev);
 	return err;
 }
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b88f7d6..482ea40 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1205,6 +1205,23 @@
 	kfree(dp83640);
 }
 
+static int dp83640_soft_reset(struct phy_device *phydev)
+{
+	int ret;
+
+	ret = genphy_soft_reset(phydev);
+	if (ret < 0)
+		return ret;
+
+	/* From DP83640 datasheet: "Software driver code must wait 3 us
+	 * following a software reset before allowing further serial MII
+	 * operations with the DP83640."
+	 */
+	udelay(10);		/* Taking udelay inaccuracy into account */
+
+	return 0;
+}
+
 static int dp83640_config_init(struct phy_device *phydev)
 {
 	struct dp83640_private *dp83640 = phydev->priv;
@@ -1498,6 +1515,7 @@
 	.flags		= PHY_HAS_INTERRUPT,
 	.probe		= dp83640_probe,
 	.remove		= dp83640_remove,
+	.soft_reset	= dp83640_soft_reset,
 	.config_init	= dp83640_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 963838d..599ce24 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -122,10 +122,9 @@
 	pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
 	if (pb == NULL) {
 		ret_val = -ENOMEM;
-		goto err_parent_bus;
+		goto err_pb_kz;
 	}
 
-
 	pb->switch_data = data;
 	pb->switch_fn = switch_fn;
 	pb->current_child = -1;
@@ -154,6 +153,7 @@
 		cb->mii_bus = mdiobus_alloc();
 		if (!cb->mii_bus) {
 			ret_val = -ENOMEM;
+			devm_kfree(dev, cb);
 			of_node_put(child_bus_node);
 			break;
 		}
@@ -170,7 +170,6 @@
 			mdiobus_free(cb->mii_bus);
 			devm_kfree(dev, cb);
 		} else {
-			of_node_get(child_bus_node);
 			cb->next = pb->children;
 			pb->children = cb;
 		}
@@ -181,9 +180,11 @@
 		return 0;
 	}
 
+	devm_kfree(dev, pb);
+err_pb_kz:
 	/* balance the reference of_mdio_find_bus() took */
-	put_device(&pb->mii_bus->dev);
-
+	if (!mux_bus)
+		put_device(&parent_bus->dev);
 err_parent_bus:
 	of_node_put(parent_bus_node);
 	return ret_val;
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 39be3b8..20fbcc9 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -314,6 +314,30 @@
 }
 #endif
 
+static const struct of_device_id xgene_mdio_of_match[] = {
+	{
+		.compatible = "apm,xgene-mdio-rgmii",
+		.data = (void *)XGENE_MDIO_RGMII
+	},
+	{
+		.compatible = "apm,xgene-mdio-xfi",
+		.data = (void *)XGENE_MDIO_XFI
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, xgene_mdio_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_mdio_acpi_match[] = {
+	{ "APMC0D65", XGENE_MDIO_RGMII },
+	{ "APMC0D66", XGENE_MDIO_XFI },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match);
+#endif
+
+
 static int xgene_mdio_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -439,32 +463,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_OF
-static const struct of_device_id xgene_mdio_of_match[] = {
-	{
-		.compatible = "apm,xgene-mdio-rgmii",
-		.data = (void *)XGENE_MDIO_RGMII
-	},
-	{
-		.compatible = "apm,xgene-mdio-xfi",
-		.data = (void *)XGENE_MDIO_XFI
-	},
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, xgene_mdio_of_match);
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_mdio_acpi_match[] = {
-	{ "APMC0D65", XGENE_MDIO_RGMII },
-	{ "APMC0D66", XGENE_MDIO_XFI },
-	{ }
-};
-
-MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match);
-#endif
-
 static struct platform_driver xgene_mdio_driver = {
 	.driver = {
 		.name = "xgene-mdio",
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
index 354241b..594a11d 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/drivers/net/phy/mdio-xgene.h
@@ -132,10 +132,6 @@
 #define GET_BIT(field, src) \
 		xgene_enet_get_field_value(field ## _POS, 1, src)
 
-static const struct of_device_id xgene_mdio_of_match[];
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_mdio_acpi_match[];
-#endif
 int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
 int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
 struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e2d9ca6..4d21764 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -148,6 +148,12 @@
 	if (phydev->drv->aneg_done)
 		return phydev->drv->aneg_done(phydev);
 
+	/* Avoid genphy_aneg_done() if the Clause 45 PHY does not
+	 * implement Clause 22 registers
+	 */
+	if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+		return -EINVAL;
+
 	return genphy_aneg_done(phydev);
 }
 
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index dc36c2e..fa2c7bd 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -620,6 +620,10 @@
 	lock_sock(sk);
 
 	error = -EINVAL;
+
+	if (sockaddr_len != sizeof(struct sockaddr_pppox))
+		goto end;
+
 	if (sp->sa_protocol != PX_PROTO_OE)
 		goto end;
 
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1951b10..3045c96 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -465,7 +465,6 @@
 	po->chan.mtu = dst_mtu(&rt->dst);
 	if (!po->chan.mtu)
 		po->chan.mtu = PPP_MRU;
-	ip_rt_put(rt);
 	po->chan.mtu -= PPTP_HEADER_OVERHEAD;
 
 	po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 27ed252..cfd81eb 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -509,6 +509,10 @@
 		if(x < 0 || x > comp->rslot_limit)
 			goto bad;
 
+		/* Check if the cstate is initialized */
+		if (!comp->rstate[x].initialized)
+			goto bad;
+
 		comp->flags &=~ SLF_TOSS;
 		comp->recv_current = x;
 	} else {
@@ -673,6 +677,7 @@
 	if (cs->cs_tcp.doff > 5)
 	  memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
 	cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
+	cs->initialized = true;
 	/* Put headers back on packet
 	 * Neither header checksum is recalculated
 	 */
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a0a9c9d..3696368 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -261,6 +261,17 @@
 	}
 }
 
+static bool __team_option_inst_tmp_find(const struct list_head *opts,
+					const struct team_option_inst *needle)
+{
+	struct team_option_inst *opt_inst;
+
+	list_for_each_entry(opt_inst, opts, tmp_list)
+		if (opt_inst == needle)
+			return true;
+	return false;
+}
+
 static int __team_options_register(struct team *team,
 				   const struct team_option *option,
 				   size_t option_count)
@@ -1067,14 +1078,11 @@
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int __team_port_enable_netpoll(struct team_port *port)
 {
 	struct netpoll *np;
 	int err;
 
-	if (!team->dev->npinfo)
-		return 0;
-
 	np = kzalloc(sizeof(*np), GFP_KERNEL);
 	if (!np)
 		return -ENOMEM;
@@ -1088,6 +1096,14 @@
 	return err;
 }
 
+static int team_port_enable_netpoll(struct team_port *port)
+{
+	if (!port->team->dev->npinfo)
+		return 0;
+
+	return __team_port_enable_netpoll(port);
+}
+
 static void team_port_disable_netpoll(struct team_port *port)
 {
 	struct netpoll *np = port->np;
@@ -1102,7 +1118,7 @@
 	kfree(np);
 }
 #else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team_port *port)
 {
 	return 0;
 }
@@ -1203,11 +1219,6 @@
 		goto err_dev_open;
 	}
 
-	netif_addr_lock_bh(dev);
-	dev_uc_sync_multiple(port_dev, dev);
-	dev_mc_sync_multiple(port_dev, dev);
-	netif_addr_unlock_bh(dev);
-
 	err = vlan_vids_add_by_dev(port_dev, dev);
 	if (err) {
 		netdev_err(dev, "Failed to add vlan ids to device %s\n",
@@ -1215,7 +1226,7 @@
 		goto err_vids_add;
 	}
 
-	err = team_port_enable_netpoll(team, port);
+	err = team_port_enable_netpoll(port);
 	if (err) {
 		netdev_err(dev, "Failed to enable netpoll on device %s\n",
 			   portname);
@@ -1247,6 +1258,11 @@
 		goto err_option_port_add;
 	}
 
+	netif_addr_lock_bh(dev);
+	dev_uc_sync_multiple(port_dev, dev);
+	dev_mc_sync_multiple(port_dev, dev);
+	netif_addr_unlock_bh(dev);
+
 	port->index = -1;
 	list_add_tail_rcu(&port->list, &team->port_list);
 	team_port_enable(team, port);
@@ -1271,8 +1287,6 @@
 	vlan_vids_del_by_dev(port_dev, dev);
 
 err_vids_add:
-	dev_uc_unsync(port_dev, dev);
-	dev_mc_unsync(port_dev, dev);
 	dev_close(port_dev);
 
 err_dev_open:
@@ -1910,7 +1924,7 @@
 
 	mutex_lock(&team->lock);
 	list_for_each_entry(port, &team->port_list, list) {
-		err = team_port_enable_netpoll(team, port);
+		err = __team_port_enable_netpoll(port);
 		if (err) {
 			__team_netpoll_cleanup(team);
 			break;
@@ -2571,6 +2585,14 @@
 			if (err)
 				goto team_put;
 			opt_inst->changed = true;
+
+			/* dumb/evil user-space can send us duplicate opt,
+			 * keep only the last one
+			 */
+			if (__team_option_inst_tmp_find(&opt_inst_list,
+							opt_inst))
+				continue;
+
 			list_add(&opt_inst->tmp_list, &opt_inst_list);
 		}
 		if (!opt_found) {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 1fca002..99424c8 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -530,6 +530,7 @@
 #define REALTEK_VENDOR_ID	0x0bda
 #define SAMSUNG_VENDOR_ID	0x04e8
 #define LENOVO_VENDOR_ID	0x17ef
+#define LINKSYS_VENDOR_ID	0x13b1
 #define NVIDIA_VENDOR_ID	0x0955
 #define HP_VENDOR_ID		0x03f0
 
@@ -719,6 +720,15 @@
 	.driver_info = 0,
 },
 
+#if IS_ENABLED(CONFIG_USB_RTL8152)
+/* Linksys USB3GIGV1 Ethernet Adapter */
+{
+	USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+	.driver_info = 0,
+},
+#endif
+
 /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
 {
 	USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
@@ -774,6 +784,12 @@
 				      USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long)&wwan_info,
 }, {
+	/* Cinterion AHS3 modem by GEMALTO */
+	USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
+				      USB_CDC_SUBCLASS_ETHERNET,
+				      USB_CDC_PROTO_NONE),
+	.driver_info = (unsigned long)&wwan_info,
+}, {
 	/* Telit modules */
 	USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index dc6d3b0..feb61ea 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1118,6 +1118,7 @@
 	u16 n = 0, index, ndplen;
 	u8 ready2send = 0;
 	u32 delayed_ndp_size;
+	size_t padding_count;
 
 	/* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
 	 * accordingly. Otherwise, we should check here.
@@ -1274,11 +1275,13 @@
 	 * a ZLP after full sized NTBs.
 	 */
 	if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
-	    skb_out->len > ctx->min_tx_pkt)
-		memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
-		       ctx->tx_max - skb_out->len);
-	else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
+	    skb_out->len > ctx->min_tx_pkt) {
+		padding_count = ctx->tx_max - skb_out->len;
+		memset(skb_put(skb_out, padding_count), 0, padding_count);
+	} else if (skb_out->len < ctx->tx_max &&
+		   (skb_out->len % dev->maxpacket) == 0) {
 		*skb_put(skb_out, 1) = 0;	/* force short packet */
+	}
 
 	/* set final frame length */
 	nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index c53385a..f5a9667 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -873,7 +873,8 @@
 			offset += 0x100;
 		else
 			ret = -EINVAL;
-		ret = lan78xx_read_raw_otp(dev, offset, length, data);
+		if (!ret)
+			ret = lan78xx_read_raw_otp(dev, offset, length, data);
 	}
 
 	return ret;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 973e90f..1d56c73 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -803,12 +803,16 @@
 	{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
 	{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
 	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x90b2, 3)},    /* ublox R410M */
 	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
 	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
 	{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)},	/* YUGA CLM920-NC5 */
 	{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
 	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */
 	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */
+	{QMI_FIXED_INTF(0x1435, 0xd181, 3)},	/* Wistron NeWeb D18Q1 */
+	{QMI_FIXED_INTF(0x1435, 0xd181, 4)},	/* Wistron NeWeb D18Q1 */
+	{QMI_FIXED_INTF(0x1435, 0xd181, 5)},	/* Wistron NeWeb D18Q1 */
 	{QMI_FIXED_INTF(0x16d8, 0x6003, 0)},	/* CMOTech 6003 */
 	{QMI_FIXED_INTF(0x16d8, 0x6007, 0)},	/* CMOTech CHE-628S */
 	{QMI_FIXED_INTF(0x16d8, 0x6008, 0)},	/* CMOTech CMU-301 */
@@ -885,6 +889,7 @@
 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
 	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
 	{QMI_FIXED_INTF(0x2001, 0x7e35, 4)},	/* D-Link DWM-222 */
+	{QMI_FIXED_INTF(0x2020, 0x2033, 4)},	/* BroadMobi BM806U */
 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
@@ -941,6 +946,7 @@
 	{QMI_FIXED_INTF(0x413c, 0x81b6, 8)},	/* Dell Wireless 5811e */
 	{QMI_FIXED_INTF(0x413c, 0x81b6, 10)},	/* Dell Wireless 5811e */
 	{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+	{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},	/* HP lt4120 Snapdragon X5 LTE */
 	{QMI_FIXED_INTF(0x22de, 0x9061, 3)},	/* WeTelecom WPD-600N */
 	{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},	/* SIMCom 7230E */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
@@ -1038,6 +1044,18 @@
 		id->driver_info = (unsigned long)&qmi_wwan_info;
 	}
 
+	/* There are devices where the same interface number can be
+	 * configured as different functions. We should only bind to
+	 * vendor specific functions when matching on interface number
+	 */
+	if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
+	    desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
+		dev_dbg(&intf->dev,
+			"Rejecting interface number match for class %02x\n",
+			desc->bInterfaceClass);
+		return -ENODEV;
+	}
+
 	/* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
 	if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
 		dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index b2d7c7e..d3d89b0 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -519,6 +519,7 @@
 #define VENDOR_ID_REALTEK		0x0bda
 #define VENDOR_ID_SAMSUNG		0x04e8
 #define VENDOR_ID_LENOVO		0x17ef
+#define VENDOR_ID_LINKSYS		0x13b1
 #define VENDOR_ID_NVIDIA		0x0955
 
 #define MCU_TYPE_PLA			0x0100
@@ -1692,7 +1693,7 @@
 
 		tx_data += len;
 		agg->skb_len += len;
-		agg->skb_num++;
+		agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
 
 		dev_kfree_skb_any(skb);
 
@@ -4506,6 +4507,7 @@
 	{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
 	{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
 	{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f)},
+	{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
 	{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
 	{}
 };
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 4cb9b11..2cc0f28 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -957,10 +957,11 @@
 	/* it's racing here! */
 
 	ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
-	if (ret < 0)
+	if (ret < 0) {
 		netdev_warn(dev->net, "Error writing RFE_CTL\n");
-
-	return ret;
+		return ret;
+	}
+	return 0;
 }
 
 static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1568aed..7118b82 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -529,7 +529,12 @@
 	hdr = skb_vnet_hdr(skb);
 	sg_init_table(rq->sg, 2);
 	sg_set_buf(rq->sg, hdr, vi->hdr_len);
-	skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+
+	err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+	if (unlikely(err < 0)) {
+		dev_kfree_skb(skb);
+		return err;
+	}
 
 	err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
 	if (err < 0)
@@ -831,7 +836,7 @@
 	struct virtio_net_hdr_mrg_rxbuf *hdr;
 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
 	struct virtnet_info *vi = sq->vq->vdev->priv;
-	unsigned num_sg;
+	int num_sg;
 	unsigned hdr_len = vi->hdr_len;
 	bool can_push;
 
@@ -858,11 +863,16 @@
 	if (can_push) {
 		__skb_push(skb, hdr_len);
 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
+		if (unlikely(num_sg < 0))
+			return num_sg;
 		/* Pull header back to avoid skew in tx bytes calculations. */
 		__skb_pull(skb, hdr_len);
 	} else {
 		sg_set_buf(sq->sg, hdr, hdr_len);
-		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+		if (unlikely(num_sg < 0))
+			return num_sg;
+		num_sg++;
 	}
 	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
 }
@@ -1939,8 +1949,8 @@
 
 	/* Assume link up if device can't report link status,
 	   otherwise get link status from config. */
+	netif_carrier_off(dev);
 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
-		netif_carrier_off(dev);
 		schedule_work(&vi->config_work);
 	} else {
 		vi->status = VIRTIO_NET_S_LINK_UP;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 4afba17..c999b10 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -369,6 +369,11 @@
 
 	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
 	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
+		/* Prevent any &gdesc->tcd field from being (speculatively)
+		 * read before (&gdesc->tcd)->gen is read.
+		 */
+		dma_rmb();
+
 		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
 					       &gdesc->tcd), tq, adapter->pdev,
 					       adapter);
@@ -1099,6 +1104,11 @@
 		gdesc->txd.tci = skb_vlan_tag_get(skb);
 	}
 
+	/* Ensure that the write to (&gdesc->txd)->gen will be observed after
+	 * all other writes to &gdesc->txd.
+	 */
+	dma_wmb();
+
 	/* finally flips the GEN bit of the SOP desc. */
 	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
 						  VMXNET3_TXD_GEN);
@@ -1286,6 +1296,12 @@
 			 */
 			break;
 		}
+
+		/* Prevent any rcd field from being (speculatively) read before
+		 * rcd->gen is read.
+		 */
+		dma_rmb();
+
 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
 		       rcd->rqID != rq->dataRingQid);
 		idx = rcd->rxdIdx;
@@ -1515,6 +1531,12 @@
 		ring->next2comp = idx;
 		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
 		ring = rq->rx_ring + ring_idx;
+
+		/* Ensure that the writes to rxd->gen bits will be observed
+		 * after all other writes to rxd objects.
+		 */
+		dma_wmb();
+
 		while (num_to_alloc) {
 			vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
 					  &rxCmdDesc);
@@ -2675,7 +2697,7 @@
 /* ==================== initialization and cleanup routines ============ */
 
 static int
-vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
+vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
 {
 	int err;
 	unsigned long mmio_start, mmio_len;
@@ -2687,30 +2709,12 @@
 		return err;
 	}
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
-		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
-			dev_err(&pdev->dev,
-				"pci_set_consistent_dma_mask failed\n");
-			err = -EIO;
-			goto err_set_mask;
-		}
-		*dma64 = true;
-	} else {
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
-			dev_err(&pdev->dev,
-				"pci_set_dma_mask failed\n");
-			err = -EIO;
-			goto err_set_mask;
-		}
-		*dma64 = false;
-	}
-
 	err = pci_request_selected_regions(pdev, (1 << 2) - 1,
 					   vmxnet3_driver_name);
 	if (err) {
 		dev_err(&pdev->dev,
 			"Failed to request region for adapter: error %d\n", err);
-		goto err_set_mask;
+		goto err_enable_device;
 	}
 
 	pci_set_master(pdev);
@@ -2738,7 +2742,7 @@
 	iounmap(adapter->hw_addr0);
 err_ioremap:
 	pci_release_selected_regions(pdev, (1 << 2) - 1);
-err_set_mask:
+err_enable_device:
 	pci_disable_device(pdev);
 	return err;
 }
@@ -2962,6 +2966,11 @@
 	/* we need to enable NAPI, otherwise dev_close will deadlock */
 	for (i = 0; i < adapter->num_rx_queues; i++)
 		napi_enable(&adapter->rx_queue[i].napi);
+	/*
+	 * Need to clear the quiesce bit to ensure that vmxnet3_close
+	 * can quiesce the device properly
+	 */
+	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
 	dev_close(adapter->netdev);
 }
 
@@ -3241,7 +3250,7 @@
 #endif
 	};
 	int err;
-	bool dma64 = false; /* stupid gcc */
+	bool dma64;
 	u32 ver;
 	struct net_device *netdev;
 	struct vmxnet3_adapter *adapter;
@@ -3287,6 +3296,24 @@
 	adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
 	adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
 
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+			dev_err(&pdev->dev,
+				"pci_set_consistent_dma_mask failed\n");
+			err = -EIO;
+			goto err_set_mask;
+		}
+		dma64 = true;
+	} else {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+			dev_err(&pdev->dev,
+				"pci_set_dma_mask failed\n");
+			err = -EIO;
+			goto err_set_mask;
+		}
+		dma64 = false;
+	}
+
 	spin_lock_init(&adapter->cmd_lock);
 	adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
 					     sizeof(struct vmxnet3_adapter),
@@ -3294,7 +3321,7 @@
 	if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
 		dev_err(&pdev->dev, "Failed to map dma\n");
 		err = -EFAULT;
-		goto err_dma_map;
+		goto err_set_mask;
 	}
 	adapter->shared = dma_alloc_coherent(
 				&adapter->pdev->dev,
@@ -3345,7 +3372,7 @@
 	}
 #endif /* VMXNET3_RSS */
 
-	err = vmxnet3_alloc_pci_resources(adapter, &dma64);
+	err = vmxnet3_alloc_pci_resources(adapter);
 	if (err < 0)
 		goto err_alloc_pci;
 
@@ -3487,7 +3514,7 @@
 err_alloc_shared:
 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
 			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
-err_dma_map:
+err_set_mask:
 	free_netdev(netdev);
 	return err;
 }
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 346e486..42c9480 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -585,13 +585,15 @@
 	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
 	if (unlikely(!neigh))
 		neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
-	if (!IS_ERR(neigh))
+	if (!IS_ERR(neigh)) {
 		ret = dst_neigh_output(dst, neigh, skb);
+		rcu_read_unlock_bh();
+		return ret;
+	}
 
 	rcu_read_unlock_bh();
 err:
-	if (unlikely(ret < 0))
-		vrf_tx_error(skb->dev, skb);
+	vrf_tx_error(skb->dev, skb);
 	return ret;
 }
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 0f5dfb8..28afdf2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -930,7 +930,7 @@
 			return false;
 
 		/* Don't migrate static entries, drop packets */
-		if (f->state & NUD_NOARP)
+		if (f->state & (NUD_PERMANENT | NUD_NOARP))
 			return true;
 
 		if (net_ratelimit())
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 6564753..a8bd68f 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -137,7 +137,7 @@
 	priv->tx_ring_size = TX_BD_RING_LEN;
 	/* Alloc Rx BD */
 	priv->rx_bd_base = dma_alloc_coherent(priv->dev,
-			RX_BD_RING_LEN * sizeof(struct qe_bd *),
+			RX_BD_RING_LEN * sizeof(struct qe_bd),
 			&priv->dma_rx_bd, GFP_KERNEL);
 
 	if (!priv->rx_bd_base) {
@@ -148,7 +148,7 @@
 
 	/* Alloc Tx BD */
 	priv->tx_bd_base = dma_alloc_coherent(priv->dev,
-			TX_BD_RING_LEN * sizeof(struct qe_bd *),
+			TX_BD_RING_LEN * sizeof(struct qe_bd),
 			&priv->dma_tx_bd, GFP_KERNEL);
 
 	if (!priv->tx_bd_base) {
@@ -158,7 +158,7 @@
 	}
 
 	/* Alloc parameter ram for ucc hdlc */
-	priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram),
+	priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
 				ALIGNMENT_OF_UCC_HDLC_PRAM);
 
 	if (priv->ucc_pram_offset < 0) {
@@ -295,11 +295,11 @@
 	qe_muram_free(priv->ucc_pram_offset);
 free_tx_bd:
 	dma_free_coherent(priv->dev,
-			  TX_BD_RING_LEN * sizeof(struct qe_bd *),
+			  TX_BD_RING_LEN * sizeof(struct qe_bd),
 			  priv->tx_bd_base, priv->dma_tx_bd);
 free_rx_bd:
 	dma_free_coherent(priv->dev,
-			  RX_BD_RING_LEN * sizeof(struct qe_bd *),
+			  RX_BD_RING_LEN * sizeof(struct qe_bd),
 			  priv->rx_bd_base, priv->dma_rx_bd);
 free_uccf:
 	ucc_fast_free(priv->uccf);
@@ -454,7 +454,7 @@
 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
 {
 	struct net_device *dev = priv->ndev;
-	struct sk_buff *skb;
+	struct sk_buff *skb = NULL;
 	hdlc_device *hdlc = dev_to_hdlc(dev);
 	struct qe_bd *bd;
 	u32 bd_status;
@@ -688,7 +688,7 @@
 
 	if (priv->rx_bd_base) {
 		dma_free_coherent(priv->dev,
-				  RX_BD_RING_LEN * sizeof(struct qe_bd *),
+				  RX_BD_RING_LEN * sizeof(struct qe_bd),
 				  priv->rx_bd_base, priv->dma_rx_bd);
 
 		priv->rx_bd_base = NULL;
@@ -697,7 +697,7 @@
 
 	if (priv->tx_bd_base) {
 		dma_free_coherent(priv->dev,
-				  TX_BD_RING_LEN * sizeof(struct qe_bd *),
+				  TX_BD_RING_LEN * sizeof(struct qe_bd),
 				  priv->tx_bd_base, priv->dma_tx_bd);
 
 		priv->tx_bd_base = NULL;
@@ -1002,7 +1002,7 @@
 	struct device_node *np = pdev->dev.of_node;
 	struct ucc_hdlc_private *uhdlc_priv = NULL;
 	struct ucc_tdm_info *ut_info;
-	struct ucc_tdm *utdm;
+	struct ucc_tdm *utdm = NULL;
 	struct resource res;
 	struct net_device *dev;
 	hdlc_device *hdlc;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 7d3231a..82bdec7 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -83,6 +83,8 @@
 #define BMI_NVRAM_SEG_NAME_SZ 16
 
 #define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
+#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
+#define BMI_PARAM_FLASH_SECTION_ALL 0x10000
 
 #define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK   0x7c00
 #define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB    10
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 7b3017f..65ad7a1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -652,7 +652,7 @@
 {
 	u32 result, address;
 	u8 board_id, chip_id;
-	int ret;
+	int ret, bmi_board_id_param;
 
 	address = ar->hw_params.patch_load_addr;
 
@@ -676,8 +676,13 @@
 		return ret;
 	}
 
-	ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID,
-				 &result);
+	if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+	    ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+		bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID;
+	else
+		bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID;
+
+	ret = ath10k_bmi_execute(ar, address, bmi_board_id_param, &result);
 	if (ret) {
 		ath10k_err(ar, "could not execute otp for board id check: %d\n",
 			   ret);
@@ -739,6 +744,11 @@
 		return ret;
 	}
 
+	/* As of now pre-cal is valid for 10_4 variants */
+	if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+	    ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+		bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
+
 	ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
 	if (ret) {
 		ath10k_err(ar, "could not execute otp (%d)\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a497bf3..d68f4f2 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5819,9 +5819,8 @@
 				    sta->addr, smps, err);
 	}
 
-	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
-	    changed & IEEE80211_RC_NSS_CHANGED) {
-		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
+	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
 			   sta->addr);
 
 		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
@@ -6929,10 +6928,20 @@
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k_vif *arvif = (void *)vif->drv_priv;
+	struct ath10k_peer *peer;
 	u32 bw, smps;
 
 	spin_lock_bh(&ar->data_lock);
 
+	peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+	if (!peer) {
+		spin_unlock_bh(&ar->data_lock);
+		ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
+			    sta->addr, arvif->vdev_id);
+		return;
+	}
+
 	ath10k_dbg(ar, ATH10K_DBG_MAC,
 		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
 		   sta->addr, changed, sta->bandwidth, sta->rx_nss,
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 4f8d9ed..4a10544 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -939,7 +939,10 @@
 	}
 
 	for (i = 0; i < eesize; ++i) {
-		AR5K_EEPROM_READ(i, val);
+		if (!ath5k_hw_nvram_read(ah, i, &val)) {
+			ret = -EIO;
+			goto freebuf;
+		}
 		buf[i] = val;
 	}
 
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index a35f78b..acef4ec9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1603,6 +1603,10 @@
 	int count = 50;
 	u32 reg, last_val;
 
+	/* Check if chip failed to wake up */
+	if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
+		return false;
+
 	if (AR_SREV_9300(ah))
 		return !ath9k_hw_detect_mac_hang(ah);
 
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index f7c63cf..f70420f 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -21,11 +21,16 @@
 #include "ftm.h"
 
 #define WIL_MAX_ROC_DURATION_MS 5000
+#define CTRY_CHINA "CN"
 
 bool disable_ap_sme;
 module_param(disable_ap_sme, bool, 0444);
 MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
 
+static bool country_specific_board_file;
+module_param(country_specific_board_file, bool, 0444);
+MODULE_PARM_DESC(country_specific_board_file, " switch board file upon regulatory domain change (Default: false)");
+
 static bool ignore_reg_hints = true;
 module_param(ignore_reg_hints, bool, 0444);
 MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true)");
@@ -1984,6 +1989,64 @@
 	return 0;
 }
 
+static int wil_switch_board_file(struct wil6210_priv *wil,
+				 const u8 *new_regdomain)
+{
+	int rc = 0;
+
+	if (!country_specific_board_file)
+		return 0;
+
+	if (memcmp(wil->regdomain, CTRY_CHINA, 2) == 0) {
+		wil_info(wil, "moving out of China reg domain, use default board file\n");
+		wil->board_file_country[0] = '\0';
+	} else if (memcmp(new_regdomain, CTRY_CHINA, 2) == 0) {
+		wil_info(wil, "moving into China reg domain, use country specific board file\n");
+		strlcpy(wil->board_file_country, CTRY_CHINA,
+			sizeof(wil->board_file_country));
+	} else {
+		return 0;
+	}
+
+	/* need to switch board file - reset the device */
+
+	mutex_lock(&wil->mutex);
+
+	if (!netif_running(wil_to_ndev(wil)) || wil_is_recovery_blocked(wil))
+		/* new board file will be used in next FW load */
+		goto out;
+
+	__wil_down(wil);
+	rc = __wil_up(wil);
+
+out:
+	mutex_unlock(&wil->mutex);
+	return rc;
+}
+
+static void wil_cfg80211_reg_notify(struct wiphy *wiphy,
+				    struct regulatory_request *request)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int rc;
+
+	wil_info(wil, "cfg reg_notify %c%c%s%s initiator %d hint_type %d\n",
+		 request->alpha2[0], request->alpha2[1],
+		 request->intersect ? " intersect" : "",
+		 request->processed ? " processed" : "",
+		 request->initiator, request->user_reg_hint_type);
+
+	if (memcmp(wil->regdomain, request->alpha2, 2) == 0)
+		/* reg domain did not change */
+		return;
+
+	rc = wil_switch_board_file(wil, request->alpha2);
+	if (rc)
+		wil_err(wil, "switch board file failed %d\n", rc);
+
+	memcpy(wil->regdomain, request->alpha2, 2);
+}
+
 static struct cfg80211_ops wil_cfg80211_ops = {
 	.add_virtual_intf = wil_cfg80211_add_iface,
 	.del_virtual_intf = wil_cfg80211_del_iface,
@@ -2055,6 +2118,8 @@
 	wiphy->mgmt_stypes = wil_mgmt_stypes;
 	wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
 
+	wiphy->reg_notifier = wil_cfg80211_reg_notify;
+
 	wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
 	wiphy->vendor_commands = wil_nl80211_vendor_commands;
 	wiphy->vendor_events = wil_nl80211_vendor_events;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 2baa6cf..f37254d 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -26,6 +26,7 @@
 
 #define WAIT_FOR_HALP_VOTE_MS 100
 #define WAIT_FOR_SCAN_ABORT_MS 1000
+#define WIL_BOARD_FILE_MAX_NAMELEN 128
 
 bool debug_fw; /* = false; */
 module_param(debug_fw, bool, 0444);
@@ -946,6 +947,30 @@
 	le32_to_cpus(&r->head);
 }
 
+/* construct actual board file name to use */
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
+{
+	const char *board_file = WIL_BOARD_FILE_NAME;
+	const char *ext;
+	int prefix_len;
+
+	if (wil->board_file_country[0] == '\0') {
+		strlcpy(buf, board_file, len);
+		return;
+	}
+
+	/* use country specific board file */
+	if (len < strlen(board_file) + 4 /* for _XX and terminating null */)
+		return;
+
+	ext = strrchr(board_file, '.');
+	prefix_len = (ext ? ext - board_file : strlen(board_file));
+	snprintf(buf, len, "%.*s_%.2s",
+		 prefix_len, board_file, wil->board_file_country);
+	if (ext)
+		strlcat(buf, ext, len);
+}
+
 static int wil_get_bl_info(struct wil6210_priv *wil)
 {
 	struct net_device *ndev = wil_to_ndev(wil);
@@ -1260,8 +1285,12 @@
 
 	wil_set_oob_mode(wil, oob_mode);
 	if (load_fw) {
+		char board_file[WIL_BOARD_FILE_MAX_NAMELEN];
+
+		board_file[0] = '\0';
+		wil_get_board_file(wil, board_file, sizeof(board_file));
 		wil_info(wil, "Use firmware <%s> + board <%s>\n",
-			 wil->wil_fw_name, WIL_BOARD_FILE_NAME);
+			 wil->wil_fw_name, board_file);
 
 		if (!no_flash)
 			wil_bl_prepare_halt(wil);
@@ -1273,11 +1302,9 @@
 		if (rc)
 			goto out;
 		if (wil->brd_file_addr)
-			rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+			rc = wil_request_board(wil, board_file);
 		else
-			rc = wil_request_firmware(wil,
-						  WIL_BOARD_FILE_NAME,
-						  true);
+			rc = wil_request_firmware(wil, board_file, true);
 		if (rc)
 			goto out;
 
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 1875387..0f34c43 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -33,10 +33,8 @@
 MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 static int wil6210_pm_notify(struct notifier_block *notify_block,
 			     unsigned long mode, void *unused);
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
 static
@@ -354,7 +352,6 @@
 	}
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 	wil->pm_notify.notifier_call = wil6210_pm_notify;
 	rc = register_pm_notifier(&wil->pm_notify);
 	if (rc)
@@ -362,7 +359,6 @@
 		 * be prevented in a later phase if needed
 		 */
 		wil_err(wil, "register_pm_notifier failed: %d\n", rc);
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
 	wil6210_debugfs_init(wil);
@@ -397,9 +393,7 @@
 	wil_dbg_misc(wil, "pcie_remove\n");
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 	unregister_pm_notifier(&wil->pm_notify);
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
 	wil_pm_runtime_forbid(wil);
@@ -428,7 +422,6 @@
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 
 static int wil6210_suspend(struct device *dev, bool is_runtime)
 {
@@ -546,7 +539,6 @@
 {
 	return wil6210_resume(dev, false);
 }
-#endif /* CONFIG_PM_SLEEP */
 
 static int wil6210_pm_runtime_idle(struct device *dev)
 {
@@ -578,10 +570,12 @@
 #endif /* CONFIG_PM */
 
 static const struct dev_pm_ops wil6210_pm_ops = {
+#ifdef CONFIG_PM
 	SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
 	SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
 			   wil6210_pm_runtime_resume,
 			   wil6210_pm_runtime_idle)
+#endif /* CONFIG_PM */
 };
 
 static struct pci_driver wil6210_driver = {
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 14533ed..0a96518 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -33,7 +33,11 @@
 	if (wmi_only || debug_fw) {
 		wil_dbg_pm(wil, "Deny any suspend - %s mode\n",
 			   wmi_only ? "wmi_only" : "debug_fw");
-		rc = -EPERM;
+		rc = -EBUSY;
+		goto out;
+	}
+	if (is_runtime && !wil->platform_ops.suspend) {
+		rc = -EBUSY;
 		goto out;
 	}
 	if (!(ndev->flags & IFF_UP)) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index dcec343..d0fecd9 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -323,6 +323,8 @@
 		return -ENOMEM;
 	}
 
+	d->mac.pn_15_0 = 0;
+	d->mac.pn_47_16 = 0;
 	d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
 	wil_desc_addr_set(&d->dma.addr, pa);
 	/* ip_length don't care */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 9b68663..2b71deb 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -683,6 +683,7 @@
 	const char *hw_name;
 	const char *wil_fw_name;
 	char *board_file;
+	char board_file_country[3]; /* alpha2 */
 	u32 brd_file_addr;
 	u32 brd_file_max_size;
 	DECLARE_BITMAP(hw_capa, hw_capa_last);
@@ -796,11 +797,11 @@
 	} snr_thresh;
 
 	int fw_calib_result;
+	/* current reg domain configured in kernel */
+	char regdomain[3]; /* alpha2 */
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 	struct notifier_block pm_notify;
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
 	bool suspend_resp_rcvd;
@@ -875,6 +876,8 @@
 	wil_w(wil, reg, wil_r(wil, reg) & ~val);
 }
 
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
+
 #if defined(CONFIG_DYNAMIC_DEBUG)
 #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize,	\
 			  groupsize, buf, len, ascii)		\
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 54354a3..8352465 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2776,7 +2776,6 @@
 				   struct brcmf_bss_info_le *bi)
 {
 	struct wiphy *wiphy = cfg_to_wiphy(cfg);
-	struct ieee80211_channel *notify_channel;
 	struct cfg80211_bss *bss;
 	struct ieee80211_supported_band *band;
 	struct brcmu_chan ch;
@@ -2786,7 +2785,7 @@
 	u16 notify_interval;
 	u8 *notify_ie;
 	size_t notify_ielen;
-	s32 notify_signal;
+	struct cfg80211_inform_bss bss_data = {};
 
 	if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
 		brcmf_err("Bss info is larger than buffer. Discarding\n");
@@ -2806,27 +2805,28 @@
 		band = wiphy->bands[NL80211_BAND_5GHZ];
 
 	freq = ieee80211_channel_to_frequency(channel, band->band);
-	notify_channel = ieee80211_get_channel(wiphy, freq);
+	bss_data.chan = ieee80211_get_channel(wiphy, freq);
+	bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20;
+	bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime());
 
 	notify_capability = le16_to_cpu(bi->capability);
 	notify_interval = le16_to_cpu(bi->beacon_period);
 	notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
 	notify_ielen = le32_to_cpu(bi->ie_length);
-	notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
+	bss_data.signal = (s16)le16_to_cpu(bi->RSSI) * 100;
 
 	brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID);
 	brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq);
 	brcmf_dbg(CONN, "Capability: %X\n", notify_capability);
 	brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval);
-	brcmf_dbg(CONN, "Signal: %d\n", notify_signal);
+	brcmf_dbg(CONN, "Signal: %d\n", bss_data.signal);
 
-	bss = cfg80211_inform_bss(wiphy, notify_channel,
-				  CFG80211_BSS_FTYPE_UNKNOWN,
-				  (const u8 *)bi->BSSID,
-				  0, notify_capability,
-				  notify_interval, notify_ie,
-				  notify_ielen, notify_signal,
-				  GFP_KERNEL);
+	bss = cfg80211_inform_bss_data(wiphy, &bss_data,
+				       CFG80211_BSS_FTYPE_UNKNOWN,
+				       (const u8 *)bi->BSSID,
+				       0, notify_capability,
+				       notify_interval, notify_ie,
+				       notify_ielen, GFP_KERNEL);
 
 	if (!bss)
 		return -ENOMEM;
@@ -6798,7 +6798,7 @@
 	int i;
 
 	/* ignore non-ISO3166 country codes */
-	for (i = 0; i < sizeof(req->alpha2); i++)
+	for (i = 0; i < 2; i++)
 		if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
 			brcmf_err("not a ISO3166 code (0x%02x 0x%02x)\n",
 				  req->alpha2[0], req->alpha2[1]);
diff --git a/drivers/net/wireless/cnss2/Kconfig b/drivers/net/wireless/cnss2/Kconfig
index daa343e..e2d3926 100644
--- a/drivers/net/wireless/cnss2/Kconfig
+++ b/drivers/net/wireless/cnss2/Kconfig
@@ -38,3 +38,27 @@
 	  during system pm.
 	  This config flag controls the feature per target based. The feature
 	  requires CNSS driver support.
+
+config CNSS_QCA6290
+	bool "Enable CNSS QCA6290 chipset specific changes"
+	---help---
+	  This enables the changes from WLAN host driver that are specific to
+	  CNSS QCA6290 chipset.
+	  These changes are needed to support the new hardware architecture
+	  for CNSS QCA6290 chipset.
+
+config CNSS_QCA6390
+	bool "Enable CNSS QCA6390 chipset specific changes"
+	---help---
+	  This enables the changes from WLAN host driver that are specific to
+	  CNSS QCA6390 chipset.
+	  These changes are needed to support the new hardware architecture
+	  for CNSS QCA6390 chipset.
+
+config CNSS_EMULATION
+	bool "Enable specific changes for emulation hardware"
+	---help---
+	  This enables the changes from WLAN drivers that are specific to
+	  emulation hardware.
+	  These changes are needed for WLAN drivers to support and meet the
+	  requirement of emulation hardware.
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
index b49d089..318076f 100644
--- a/drivers/net/wireless/cnss2/Makefile
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -1,6 +1,7 @@
 obj-$(CONFIG_CNSS2) += cnss2.o
 
 cnss2-y := main.o
+cnss2-y += bus.o
 cnss2-y += debug.o
 cnss2-y += pci.o
 cnss2-y += power.o
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
new file mode 100644
index 0000000..6a8e67c
--- /dev/null
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -0,0 +1,334 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "bus.h"
+#include "debug.h"
+#include "pci.h"
+
+enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev)
+{
+	if (!dev)
+		return CNSS_BUS_NONE;
+
+	if (!dev->bus)
+		return CNSS_BUS_NONE;
+
+	if (memcmp(dev->bus->name, "pci", 3) == 0)
+		return CNSS_BUS_PCI;
+	else
+		return CNSS_BUS_NONE;
+}
+
+enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id)
+{
+	switch (device_id) {
+	case QCA6174_DEVICE_ID:
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		return CNSS_BUS_PCI;
+	default:
+		cnss_pr_err("Unknown device_id: 0x%lx\n", device_id);
+		return CNSS_BUS_NONE;
+	}
+}
+
+void *cnss_bus_dev_to_bus_priv(struct device *dev)
+{
+	if (!dev)
+		return NULL;
+
+	switch (cnss_get_dev_bus_type(dev)) {
+	case CNSS_BUS_PCI:
+		return cnss_get_pci_priv(to_pci_dev(dev));
+	default:
+		return NULL;
+	}
+}
+
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev)
+{
+	void *bus_priv;
+
+	if (!dev)
+		return cnss_get_plat_priv(NULL);
+
+	bus_priv = cnss_bus_dev_to_bus_priv(dev);
+	if (!bus_priv)
+		return NULL;
+
+	switch (cnss_get_dev_bus_type(dev)) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_priv_to_plat_priv(bus_priv);
+	default:
+		return NULL;
+	}
+}
+
+int cnss_bus_init(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_init(plat_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+void cnss_bus_deinit(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		cnss_pci_deinit(plat_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return;
+	}
+}
+
+int cnss_bus_load_m3(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_load_m3(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_alloc_fw_mem(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_get_wake_msi(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_force_fw_assert_hdlr(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+void cnss_bus_fw_boot_timeout_hdlr(unsigned long data)
+{
+	struct cnss_plat_data *plat_priv = (struct cnss_plat_data *)data;
+
+	if (!plat_priv)
+		return;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_fw_boot_timeout_hdlr(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return;
+	}
+}
+
+void cnss_bus_collect_dump_info(struct cnss_plat_data *plat_priv, bool in_panic)
+{
+	if (!plat_priv)
+		return;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_collect_dump_info(plat_priv->bus_priv,
+						  in_panic);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return;
+	}
+}
+
+int cnss_bus_call_driver_probe(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_call_driver_probe(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_call_driver_remove(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_call_driver_remove(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_dev_powerup(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_dev_powerup(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_dev_shutdown(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_dev_shutdown(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_dev_crash_shutdown(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_dev_crash_shutdown(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_dev_ramdump(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_dev_ramdump(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_register_driver_hdlr(struct cnss_plat_data *plat_priv, void *data)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_register_driver_hdlr(plat_priv->bus_priv, data);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_unregister_driver_hdlr(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv,
+				      int modem_current_status)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_call_driver_modem_status(plat_priv->bus_priv,
+							 modem_current_status);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
new file mode 100644
index 0000000..91356e9
--- /dev/null
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_BUS_H
+#define _CNSS_BUS_H
+
+#include "main.h"
+
+#define QCA6174_VENDOR_ID		0x168C
+#define QCA6174_DEVICE_ID		0x003E
+#define QCA6174_REV_ID_OFFSET		0x08
+#define QCA6174_REV3_VERSION		0x5020000
+#define QCA6174_REV3_2_VERSION		0x5030000
+#define QCA6290_VENDOR_ID		0x17CB
+#define QCA6290_DEVICE_ID		0x1100
+#define QCA6290_EMULATION_VENDOR_ID	0x168C
+#define QCA6290_EMULATION_DEVICE_ID	0xABCD
+#define QCA6390_VENDOR_ID		0x17CB
+#define QCA6390_EMULATION_DEVICE_ID	0x0108
+#define QCA6390_DEVICE_ID		0x1101
+
+enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev);
+enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id);
+void *cnss_bus_dev_to_bus_priv(struct device *dev);
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
+int cnss_bus_init(struct cnss_plat_data *plat_priv);
+void cnss_bus_deinit(struct cnss_plat_data *plat_priv);
+int cnss_bus_load_m3(struct cnss_plat_data *plat_priv);
+int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv);
+u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv);
+int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv);
+void cnss_bus_fw_boot_timeout_hdlr(unsigned long data);
+void cnss_bus_collect_dump_info(struct cnss_plat_data *plat_priv,
+				bool in_panic);
+int cnss_bus_call_driver_probe(struct cnss_plat_data *plat_priv);
+int cnss_bus_call_driver_remove(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_powerup(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_shutdown(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_crash_shutdown(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_ramdump(struct cnss_plat_data *plat_priv);
+int cnss_bus_register_driver_hdlr(struct cnss_plat_data *plat_priv, void *data);
+int cnss_bus_unregister_driver_hdlr(struct cnss_plat_data *plat_priv);
+int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv,
+				      int modem_current_status);
+
+#endif /* _CNSS_BUS_H */
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index b1fbbd8..2e6b996 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -147,6 +147,8 @@
 		return -ENODEV;
 
 	pci_priv = plat_priv->bus_priv;
+	if (!pci_priv)
+		return -ENODEV;
 
 	len = min(count, sizeof(buf) - 1);
 	if (copy_from_user(buf, user_buf, len))
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 4c4c761..5a082d8 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -23,8 +23,8 @@
 #include <soc/qcom/subsystem_notif.h>
 
 #include "main.h"
+#include "bus.h"
 #include "debug.h"
-#include "pci.h"
 
 #define CNSS_DUMP_FORMAT_VER		0x11
 #define CNSS_DUMP_FORMAT_VER_V2		0x22
@@ -37,7 +37,6 @@
 #define FW_READY_TIMEOUT		20000
 #define FW_ASSERT_TIMEOUT		5000
 #define CNSS_EVENT_PENDING		2989
-#define WAKE_MSI_NAME			"WAKE"
 
 static struct cnss_plat_data *plat_env;
 
@@ -55,13 +54,6 @@
 MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest");
 #endif
 
-enum cnss_debug_quirks {
-	LINK_DOWN_SELF_RECOVERY,
-	SKIP_DEVICE_BOOT,
-	USE_CORE_ONLY_FW,
-	SKIP_RECOVERY,
-};
-
 unsigned long quirks;
 #ifdef CONFIG_CNSS2_DEBUG
 module_param(quirks, ulong, 0600);
@@ -87,64 +79,17 @@
 	void *data;
 };
 
-static enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev)
-{
-	if (!dev)
-		return CNSS_BUS_NONE;
-
-	if (!dev->bus)
-		return CNSS_BUS_NONE;
-
-	if (memcmp(dev->bus->name, "pci", 3) == 0)
-		return CNSS_BUS_PCI;
-	else
-		return CNSS_BUS_NONE;
-}
-
 static void cnss_set_plat_priv(struct platform_device *plat_dev,
 			       struct cnss_plat_data *plat_priv)
 {
 	plat_env = plat_priv;
 }
 
-static struct cnss_plat_data *cnss_get_plat_priv(struct platform_device
-						 *plat_dev)
+struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev)
 {
 	return plat_env;
 }
 
-void *cnss_bus_dev_to_bus_priv(struct device *dev)
-{
-	if (!dev)
-		return NULL;
-
-	switch (cnss_get_dev_bus_type(dev)) {
-	case CNSS_BUS_PCI:
-		return cnss_get_pci_priv(to_pci_dev(dev));
-	default:
-		return NULL;
-	}
-}
-
-struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev)
-{
-	void *bus_priv;
-
-	if (!dev)
-		return cnss_get_plat_priv(NULL);
-
-	bus_priv = cnss_bus_dev_to_bus_priv(dev);
-	if (!bus_priv)
-		return NULL;
-
-	switch (cnss_get_dev_bus_type(dev)) {
-	case CNSS_BUS_PCI:
-		return cnss_pci_priv_to_plat_priv(bus_priv);
-	default:
-		return NULL;
-	}
-}
-
 static int cnss_pm_notify(struct notifier_block *b,
 			  unsigned long event, void *p)
 {
@@ -274,23 +219,6 @@
 }
 EXPORT_SYMBOL(cnss_get_platform_cap);
 
-int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
-{
-	int ret = 0;
-	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
-	void *bus_priv = cnss_bus_dev_to_bus_priv(dev);
-
-	if (!plat_priv)
-		return -ENODEV;
-
-	ret = cnss_pci_get_bar_info(bus_priv, &info->va, &info->pa);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-EXPORT_SYMBOL(cnss_get_soc_info);
-
 void cnss_request_pm_qos(struct device *dev, u32 qos_val)
 {
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
@@ -506,22 +434,14 @@
 }
 EXPORT_SYMBOL(cnss_set_fw_log_mode);
 
-u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv)
+bool *cnss_get_qmi_bypass(void)
 {
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-	int ret, num_vectors;
-	u32 user_base_data, base_vector;
+	return &qmi_bypass;
+}
 
-	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
-					   WAKE_MSI_NAME, &num_vectors,
-					   &user_base_data, &base_vector);
-
-	if (ret) {
-		cnss_pr_err("WAKE MSI is not valid\n");
-		return 0;
-	}
-
-	return user_base_data;
+unsigned long *cnss_get_debug_quirks(void)
+{
+	return &quirks;
 }
 
 static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
@@ -541,7 +461,7 @@
 	if (ret)
 		goto out;
 
-	ret = cnss_pci_load_m3(plat_priv->bus_priv);
+	ret = cnss_bus_load_m3(plat_priv);
 	if (ret)
 		goto out;
 
@@ -554,79 +474,6 @@
 	return ret;
 }
 
-static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
-		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
-		cnss_pr_dbg("Skip driver probe\n");
-		goto out;
-	}
-
-	if (!plat_priv->driver_ops) {
-		cnss_pr_err("driver_ops is NULL\n");
-		ret = -EINVAL;
-		goto out;
-	}
-
-	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
-	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
-		ret = plat_priv->driver_ops->reinit(pci_priv->pci_dev,
-						    pci_priv->pci_device_id);
-		if (ret) {
-			cnss_pr_err("Failed to reinit host driver, err = %d\n",
-				    ret);
-			goto out;
-		}
-		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
-	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
-		ret = plat_priv->driver_ops->probe(pci_priv->pci_dev,
-						   pci_priv->pci_device_id);
-		if (ret) {
-			cnss_pr_err("Failed to probe host driver, err = %d\n",
-				    ret);
-			goto out;
-		}
-		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
-		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
-		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
-	}
-
-	return 0;
-
-out:
-	return ret;
-}
-
-static int cnss_driver_call_remove(struct cnss_plat_data *plat_priv)
-{
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
-	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
-	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
-		cnss_pr_dbg("Skip driver remove\n");
-		return 0;
-	}
-
-	if (!plat_priv->driver_ops) {
-		cnss_pr_err("driver_ops is NULL\n");
-		return -EINVAL;
-	}
-
-	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
-	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
-		plat_priv->driver_ops->shutdown(pci_priv->pci_dev);
-	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
-		plat_priv->driver_ops->remove(pci_priv->pci_dev);
-		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
-	}
-
-	return 0;
-}
-
 static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -650,7 +497,7 @@
 						    QMI_WLFW_CALIBRATION_V01);
 	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
 		   test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
-		ret = cnss_driver_call_probe(plat_priv);
+		ret = cnss_bus_call_driver_probe(plat_priv);
 	} else {
 		complete(&plat_priv->power_up_complete);
 	}
@@ -663,9 +510,7 @@
 	return 0;
 
 shutdown:
-	cnss_pci_stop_mhi(plat_priv->bus_priv);
-	cnss_suspend_pci_link(plat_priv->bus_priv);
-	cnss_power_off_device(plat_priv);
+	cnss_bus_dev_shutdown(plat_priv);
 
 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
@@ -837,44 +682,6 @@
 }
 EXPORT_SYMBOL(cnss_power_down);
 
-int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
-{
-	int ret = 0;
-	struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
-
-	if (!plat_priv) {
-		cnss_pr_err("plat_priv is NULL!\n");
-		return -ENODEV;
-	}
-
-	if (plat_priv->driver_ops) {
-		cnss_pr_err("Driver has already registered!\n");
-		return -EEXIST;
-	}
-
-	ret = cnss_driver_event_post(plat_priv,
-				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
-				     CNSS_EVENT_SYNC_UNINTERRUPTIBLE,
-				     driver_ops);
-	return ret;
-}
-EXPORT_SYMBOL(cnss_wlan_register_driver);
-
-void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
-{
-	struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
-
-	if (!plat_priv) {
-		cnss_pr_err("plat_priv is NULL!\n");
-		return;
-	}
-
-	cnss_driver_event_post(plat_priv,
-			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
-			       CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
-}
-EXPORT_SYMBOL(cnss_wlan_unregister_driver);
-
 static int cnss_get_resources(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -906,13 +713,11 @@
 {
 	struct cnss_plat_data *plat_priv =
 		container_of(nb, struct cnss_plat_data, modem_nb);
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
 	struct cnss_esoc_info *esoc_info;
-	struct cnss_wlan_driver *driver_ops;
 
 	cnss_pr_dbg("Modem notifier: event %lu\n", code);
 
-	if (!pci_priv)
+	if (!plat_priv)
 		return NOTIFY_DONE;
 
 	esoc_info = &plat_priv->esoc_info;
@@ -924,13 +729,10 @@
 	else
 		return NOTIFY_DONE;
 
-	driver_ops = plat_priv->driver_ops;
-	if (!driver_ops || !driver_ops->modem_status)
+	if (!cnss_bus_call_driver_modem_status(plat_priv,
+					       esoc_info->modem_current_status))
 		return NOTIFY_DONE;
 
-	driver_ops->modem_status(pci_priv->pci_dev,
-				 esoc_info->modem_current_status);
-
 	return NOTIFY_OK;
 }
 
@@ -1004,237 +806,6 @@
 		devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
 }
 
-static int cnss_qca6174_powerup(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (!pci_priv) {
-		cnss_pr_err("pci_priv is NULL!\n");
-		return -ENODEV;
-	}
-
-	ret = cnss_power_on_device(plat_priv);
-	if (ret) {
-		cnss_pr_err("Failed to power on device, err = %d\n", ret);
-		goto out;
-	}
-
-	ret = cnss_resume_pci_link(pci_priv);
-	if (ret) {
-		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
-		goto power_off;
-	}
-
-	ret = cnss_driver_call_probe(plat_priv);
-	if (ret)
-		goto suspend_link;
-
-	return 0;
-suspend_link:
-	cnss_suspend_pci_link(pci_priv);
-power_off:
-	cnss_power_off_device(plat_priv);
-out:
-	return ret;
-}
-
-static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (!pci_priv)
-		return -ENODEV;
-
-	cnss_pm_request_resume(pci_priv);
-
-	cnss_driver_call_remove(plat_priv);
-
-	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
-				   CNSS_BUS_WIDTH_NONE);
-	cnss_pci_set_monitor_wake_intr(pci_priv, false);
-	cnss_pci_set_auto_suspended(pci_priv, 0);
-
-	ret = cnss_suspend_pci_link(pci_priv);
-	if (ret)
-		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
-
-	cnss_power_off_device(plat_priv);
-
-	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
-
-	return ret;
-}
-
-static void cnss_qca6174_crash_shutdown(struct cnss_plat_data *plat_priv)
-{
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (!plat_priv->driver_ops)
-		return;
-
-	plat_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
-}
-
-static int cnss_qca6290_powerup(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-	unsigned int timeout;
-
-	if (!pci_priv) {
-		cnss_pr_err("pci_priv is NULL!\n");
-		return -ENODEV;
-	}
-
-	if (plat_priv->ramdump_info_v2.dump_data_valid ||
-	    test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
-		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
-		cnss_pci_clear_dump_info(pci_priv);
-	}
-
-	ret = cnss_power_on_device(plat_priv);
-	if (ret) {
-		cnss_pr_err("Failed to power on device, err = %d\n", ret);
-		goto out;
-	}
-
-	ret = cnss_resume_pci_link(pci_priv);
-	if (ret) {
-		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
-		goto power_off;
-	}
-
-	timeout = cnss_get_qmi_timeout();
-
-	ret = cnss_pci_start_mhi(pci_priv);
-	if (ret) {
-		cnss_pr_err("Failed to start MHI, err = %d\n", ret);
-		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
-		    !pci_priv->pci_link_down_ind && timeout)
-			mod_timer(&plat_priv->fw_boot_timer,
-				  jiffies + msecs_to_jiffies(timeout >> 1));
-		return 0;
-	}
-
-	if (test_bit(USE_CORE_ONLY_FW, &quirks)) {
-		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
-		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
-		return 0;
-	}
-
-	cnss_set_pin_connect_status(plat_priv);
-
-	if (qmi_bypass) {
-		ret = cnss_driver_call_probe(plat_priv);
-		if (ret)
-			goto stop_mhi;
-	} else if (timeout) {
-		mod_timer(&plat_priv->fw_boot_timer,
-			  jiffies + msecs_to_jiffies(timeout << 1));
-	}
-
-	return 0;
-
-stop_mhi:
-	cnss_pci_stop_mhi(pci_priv);
-	cnss_suspend_pci_link(pci_priv);
-power_off:
-	cnss_power_off_device(plat_priv);
-out:
-	return ret;
-}
-
-static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (!pci_priv)
-		return -ENODEV;
-
-	cnss_pm_request_resume(pci_priv);
-
-	cnss_driver_call_remove(plat_priv);
-
-	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
-				   CNSS_BUS_WIDTH_NONE);
-	cnss_pci_set_monitor_wake_intr(pci_priv, false);
-	cnss_pci_set_auto_suspended(pci_priv, 0);
-
-	cnss_pci_stop_mhi(pci_priv);
-
-	ret = cnss_suspend_pci_link(pci_priv);
-	if (ret)
-		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
-
-	cnss_power_off_device(plat_priv);
-
-	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
-	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
-	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
-
-	return ret;
-}
-
-static void cnss_qca6290_crash_shutdown(struct cnss_plat_data *plat_priv)
-{
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
-		    plat_priv->driver_state);
-
-	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
-		cnss_pr_dbg("Ignore crash shutdown\n");
-		return;
-	}
-
-	cnss_pci_collect_dump_info(pci_priv, true);
-}
-
-static int cnss_powerup(struct cnss_plat_data *plat_priv)
-{
-	int ret;
-
-	switch (plat_priv->device_id) {
-	case QCA6174_DEVICE_ID:
-		ret = cnss_qca6174_powerup(plat_priv);
-		break;
-	case QCA6290_EMULATION_DEVICE_ID:
-	case QCA6290_DEVICE_ID:
-		ret = cnss_qca6290_powerup(plat_priv);
-		break;
-	default:
-		cnss_pr_err("Unknown device_id found: 0x%lx\n",
-			    plat_priv->device_id);
-		ret = -ENODEV;
-	}
-
-	return ret;
-}
-
-static int cnss_shutdown(struct cnss_plat_data *plat_priv)
-{
-	int ret;
-
-	switch (plat_priv->device_id) {
-	case QCA6174_DEVICE_ID:
-		ret = cnss_qca6174_shutdown(plat_priv);
-		break;
-	case QCA6290_EMULATION_DEVICE_ID:
-	case QCA6290_DEVICE_ID:
-		ret = cnss_qca6290_shutdown(plat_priv);
-		break;
-	default:
-		cnss_pr_err("Unknown device_id found: 0x%lx\n",
-			    plat_priv->device_id);
-		ret = -ENODEV;
-	}
-
-	return ret;
-}
-
 static int cnss_subsys_powerup(const struct subsys_desc *subsys_desc)
 {
 	struct cnss_plat_data *plat_priv;
@@ -1255,7 +826,7 @@
 		return 0;
 	}
 
-	return cnss_powerup(plat_priv);
+	return cnss_bus_dev_powerup(plat_priv);
 }
 
 static int cnss_subsys_shutdown(const struct subsys_desc *subsys_desc,
@@ -1279,110 +850,9 @@
 		return 0;
 	}
 
-	return cnss_shutdown(plat_priv);
+	return cnss_bus_dev_shutdown(plat_priv);
 }
 
-static int cnss_qca6290_ramdump(struct cnss_plat_data *plat_priv)
-{
-	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
-	struct cnss_dump_data *dump_data = &info_v2->dump_data;
-	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
-	struct ramdump_segment *ramdump_segs, *s;
-	int i, ret = 0;
-
-	if (!info_v2->dump_data_valid ||
-	    dump_data->nentries == 0)
-		return 0;
-
-	ramdump_segs = kcalloc(dump_data->nentries,
-			       sizeof(*ramdump_segs),
-			       GFP_KERNEL);
-	if (!ramdump_segs)
-		return -ENOMEM;
-
-	s = ramdump_segs;
-	for (i = 0; i < dump_data->nentries; i++) {
-		s->address = dump_seg->address;
-		s->v_address = dump_seg->v_address;
-		s->size = dump_seg->size;
-		s++;
-		dump_seg++;
-	}
-
-	ret = do_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
-			     dump_data->nentries);
-	kfree(ramdump_segs);
-
-	cnss_pci_set_mhi_state(plat_priv->bus_priv, CNSS_MHI_DEINIT);
-	cnss_pci_clear_dump_info(plat_priv->bus_priv);
-
-	return ret;
-}
-
-static int cnss_qca6174_ramdump(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_ramdump_info *ramdump_info;
-	struct ramdump_segment segment;
-
-	ramdump_info = &plat_priv->ramdump_info;
-	if (!ramdump_info->ramdump_size)
-		return -EINVAL;
-
-	memset(&segment, 0, sizeof(segment));
-	segment.v_address = ramdump_info->ramdump_va;
-	segment.size = ramdump_info->ramdump_size;
-	ret = do_ramdump(ramdump_info->ramdump_dev, &segment, 1);
-
-	return ret;
-}
-
-static int cnss_subsys_ramdump(int enable,
-			       const struct subsys_desc *subsys_desc)
-{
-	int ret = 0;
-	struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
-
-	if (!plat_priv) {
-		cnss_pr_err("plat_priv is NULL!\n");
-		return -ENODEV;
-	}
-
-	if (!enable)
-		return 0;
-
-	switch (plat_priv->device_id) {
-	case QCA6174_DEVICE_ID:
-		ret = cnss_qca6174_ramdump(plat_priv);
-		break;
-	case QCA6290_EMULATION_DEVICE_ID:
-	case QCA6290_DEVICE_ID:
-		ret = cnss_qca6290_ramdump(plat_priv);
-		break;
-	default:
-		cnss_pr_err("Unknown device_id found: 0x%lx\n",
-			    plat_priv->device_id);
-		ret = -ENODEV;
-	}
-
-	return ret;
-}
-
-void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
-{
-	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
-	struct cnss_ramdump_info *ramdump_info;
-
-	if (!plat_priv)
-		return NULL;
-
-	ramdump_info = &plat_priv->ramdump_info;
-	*size = ramdump_info->ramdump_size;
-
-	return ramdump_info->ramdump_va;
-}
-EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
-
 void cnss_device_crashed(struct device *dev)
 {
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
@@ -1405,24 +875,44 @@
 	struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
 
 	if (!plat_priv) {
-		cnss_pr_err("plat_priv is NULL!\n");
+		cnss_pr_err("plat_priv is NULL\n");
 		return;
 	}
 
-	switch (plat_priv->device_id) {
-	case QCA6174_DEVICE_ID:
-		cnss_qca6174_crash_shutdown(plat_priv);
-		break;
-	case QCA6290_EMULATION_DEVICE_ID:
-	case QCA6290_DEVICE_ID:
-		cnss_qca6290_crash_shutdown(plat_priv);
-		break;
-	default:
-		cnss_pr_err("Unknown device_id found: 0x%lx\n",
-			    plat_priv->device_id);
-	}
+	cnss_bus_dev_crash_shutdown(plat_priv);
 }
 
+static int cnss_subsys_ramdump(int enable,
+			       const struct subsys_desc *subsys_desc)
+{
+	struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!enable)
+		return 0;
+
+	return cnss_bus_dev_ramdump(plat_priv);
+}
+
+void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	struct cnss_ramdump_info *ramdump_info;
+
+	if (!plat_priv)
+		return NULL;
+
+	ramdump_info = &plat_priv->ramdump_info;
+	*size = ramdump_info->ramdump_size;
+
+	return ramdump_info->ramdump_va;
+}
+EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
+
 static const char *cnss_recovery_reason_to_str(enum cnss_recovery_reason reason)
 {
 	switch (reason) {
@@ -1442,7 +932,6 @@
 static int cnss_do_recovery(struct cnss_plat_data *plat_priv,
 			    enum cnss_recovery_reason reason)
 {
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
 	struct cnss_subsys_info *subsys_info =
 		&plat_priv->subsys_info;
 
@@ -1451,11 +940,6 @@
 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
 		goto self_recovery;
 
-	if (plat_priv->driver_ops &&
-	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state))
-		plat_priv->driver_ops->update_status(pci_priv->pci_dev,
-						     CNSS_RECOVERY);
-
 	if (test_bit(SKIP_RECOVERY, &quirks)) {
 		cnss_pr_dbg("Skip device recovery\n");
 		return 0;
@@ -1468,7 +952,7 @@
 		break;
 	case CNSS_REASON_RDDM:
 		clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
-		cnss_pci_collect_dump_info(pci_priv, false);
+		cnss_bus_collect_dump_info(plat_priv, false);
 		break;
 	case CNSS_REASON_DEFAULT:
 	case CNSS_REASON_TIMEOUT:
@@ -1488,8 +972,8 @@
 	return 0;
 
 self_recovery:
-	cnss_shutdown(plat_priv);
-	cnss_powerup(plat_priv);
+	cnss_bus_dev_shutdown(plat_priv);
+	cnss_bus_dev_powerup(plat_priv);
 
 	return 0;
 }
@@ -1575,28 +1059,6 @@
 }
 EXPORT_SYMBOL(cnss_schedule_recovery);
 
-static int cnss_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv)
-{
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-	int ret;
-
-	ret = cnss_pci_set_mhi_state(plat_priv->bus_priv,
-				     CNSS_MHI_TRIGGER_RDDM);
-	if (ret) {
-		cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
-		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
-				       CNSS_REASON_DEFAULT);
-		return 0;
-	}
-
-	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
-		mod_timer(&plat_priv->fw_boot_timer,
-			  jiffies + msecs_to_jiffies(FW_ASSERT_TIMEOUT));
-	}
-
-	return 0;
-}
-
 int cnss_force_fw_assert(struct device *dev)
 {
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
@@ -1624,49 +1086,12 @@
 }
 EXPORT_SYMBOL(cnss_force_fw_assert);
 
-void fw_boot_timeout(unsigned long data)
-{
-	struct cnss_plat_data *plat_priv = (struct cnss_plat_data *)data;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	cnss_pr_err("Timeout waiting for FW ready indication!\n");
-
-	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
-			       CNSS_REASON_TIMEOUT);
-}
-
-static int cnss_register_driver_hdlr(struct cnss_plat_data *plat_priv,
-				     void *data)
-{
-	int ret = 0;
-
-	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
-	plat_priv->driver_ops = data;
-
-	ret = cnss_powerup(plat_priv);
-	if (ret) {
-		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
-		plat_priv->driver_ops = NULL;
-	}
-
-	return ret;
-}
-
-static int cnss_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
-{
-	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
-	cnss_shutdown(plat_priv);
-	plat_priv->driver_ops = NULL;
-
-	return 0;
-}
-
 static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
 
 	set_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
-	ret = cnss_powerup(plat_priv);
+	ret = cnss_bus_dev_powerup(plat_priv);
 	if (ret)
 		clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
 
@@ -1675,8 +1100,9 @@
 
 static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
 {
+	plat_priv->cal_done = true;
 	cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01);
-	cnss_shutdown(plat_priv);
+	cnss_bus_dev_shutdown(plat_priv);
 	clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
 
 	return 0;
@@ -1684,12 +1110,12 @@
 
 static int cnss_power_up_hdlr(struct cnss_plat_data *plat_priv)
 {
-	return cnss_powerup(plat_priv);
+	return cnss_bus_dev_powerup(plat_priv);
 }
 
 static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
 {
-	cnss_shutdown(plat_priv);
+	cnss_bus_dev_shutdown(plat_priv);
 
 	return 0;
 }
@@ -1730,7 +1156,7 @@
 			ret = cnss_wlfw_server_exit(plat_priv);
 			break;
 		case CNSS_DRIVER_EVENT_REQUEST_MEM:
-			ret = cnss_pci_alloc_fw_mem(plat_priv->bus_priv);
+			ret = cnss_bus_alloc_fw_mem(plat_priv);
 			if (ret)
 				break;
 			ret = cnss_wlfw_respond_mem_send_sync(plat_priv);
@@ -1748,18 +1174,18 @@
 			ret = cnss_cold_boot_cal_done_hdlr(plat_priv);
 			break;
 		case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
-			ret = cnss_register_driver_hdlr(plat_priv,
-							event->data);
+			ret = cnss_bus_register_driver_hdlr(plat_priv,
+							    event->data);
 			break;
 		case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
-			ret = cnss_unregister_driver_hdlr(plat_priv);
+			ret = cnss_bus_unregister_driver_hdlr(plat_priv);
 			break;
 		case CNSS_DRIVER_EVENT_RECOVERY:
 			ret = cnss_driver_recovery_hdlr(plat_priv,
 							event->data);
 			break;
 		case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
-			ret = cnss_force_fw_assert_hdlr(plat_priv);
+			ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
 			break;
 		case CNSS_DRIVER_EVENT_POWER_UP:
 			ret = cnss_power_up_hdlr(plat_priv);
@@ -1861,7 +1287,7 @@
 	return msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
 }
 
-static int cnss_qca6174_register_ramdump(struct cnss_plat_data *plat_priv)
+static int cnss_register_ramdump_v1(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
 	struct device *dev;
@@ -1912,7 +1338,7 @@
 	return ret;
 }
 
-static void cnss_qca6174_unregister_ramdump(struct cnss_plat_data *plat_priv)
+static void cnss_unregister_ramdump_v1(struct cnss_plat_data *plat_priv)
 {
 	struct device *dev;
 	struct cnss_ramdump_info *ramdump_info;
@@ -1929,7 +1355,7 @@
 				  ramdump_info->ramdump_pa);
 }
 
-static int cnss_qca6290_register_ramdump(struct cnss_plat_data *plat_priv)
+static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
 	struct cnss_subsys_info *subsys_info;
@@ -1985,7 +1411,7 @@
 	return ret;
 }
 
-static void cnss_qca6290_unregister_ramdump(struct cnss_plat_data *plat_priv)
+static void cnss_unregister_ramdump_v2(struct cnss_plat_data *plat_priv)
 {
 	struct cnss_ramdump_info_v2 *info_v2;
 
@@ -2005,11 +1431,13 @@
 
 	switch (plat_priv->device_id) {
 	case QCA6174_DEVICE_ID:
-		ret = cnss_qca6174_register_ramdump(plat_priv);
+		ret = cnss_register_ramdump_v1(plat_priv);
 		break;
 	case QCA6290_EMULATION_DEVICE_ID:
 	case QCA6290_DEVICE_ID:
-		ret = cnss_qca6290_register_ramdump(plat_priv);
+	case QCA6390_EMULATION_DEVICE_ID:
+	case QCA6390_DEVICE_ID:
+		ret = cnss_register_ramdump_v2(plat_priv);
 		break;
 	default:
 		cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
@@ -2023,11 +1451,13 @@
 {
 	switch (plat_priv->device_id) {
 	case QCA6174_DEVICE_ID:
-		cnss_qca6174_unregister_ramdump(plat_priv);
+		cnss_unregister_ramdump_v1(plat_priv);
 		break;
 	case QCA6290_EMULATION_DEVICE_ID:
 	case QCA6290_DEVICE_ID:
-		cnss_qca6290_unregister_ramdump(plat_priv);
+	case QCA6390_EMULATION_DEVICE_ID:
+	case QCA6390_DEVICE_ID:
+		cnss_unregister_ramdump_v2(plat_priv);
 		break;
 	default:
 		cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
@@ -2097,6 +1527,8 @@
 	switch (plat_priv->device_id) {
 	case QCA6290_EMULATION_DEVICE_ID:
 	case QCA6290_DEVICE_ID:
+	case QCA6390_EMULATION_DEVICE_ID:
+	case QCA6390_DEVICE_ID:
 		break;
 	default:
 		cnss_pr_err("Not supported for device ID 0x%lx\n",
@@ -2159,6 +1591,7 @@
 static const struct platform_device_id cnss_platform_id_table[] = {
 	{ .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
 	{ .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
+	{ .name = "qca6390", .driver_data = QCA6390_DEVICE_ID, },
 };
 
 static const struct of_device_id cnss_of_match_table[] = {
@@ -2168,6 +1601,9 @@
 	{
 		.compatible = "qcom,cnss-qca6290",
 		.data = (void *)&cnss_platform_id_table[1]},
+	{
+		.compatible = "qcom,cnss-qca6390",
+		.data = (void *)&cnss_platform_id_table[2]},
 	{ },
 };
 MODULE_DEVICE_TABLE(of, cnss_of_match_table);
@@ -2203,6 +1639,7 @@
 
 	plat_priv->plat_dev = plat_dev;
 	plat_priv->device_id = device_id->driver_data;
+	plat_priv->bus_type = cnss_get_bus_type(plat_priv->device_id);
 	cnss_set_plat_priv(plat_dev, plat_priv);
 	platform_set_drvdata(plat_dev, plat_priv);
 
@@ -2215,14 +1652,14 @@
 		if (ret)
 			goto free_res;
 
-		ret = cnss_pci_init(plat_priv);
+		ret = cnss_bus_init(plat_priv);
 		if (ret)
 			goto power_off;
 	}
 
 	ret = cnss_register_esoc(plat_priv);
 	if (ret)
-		goto deinit_pci;
+		goto deinit_bus;
 
 	ret = cnss_register_bus_scale(plat_priv);
 	if (ret)
@@ -2244,8 +1681,8 @@
 	if (ret)
 		goto deinit_qmi;
 
-	setup_timer(&plat_priv->fw_boot_timer,
-		    fw_boot_timeout, (unsigned long)plat_priv);
+	setup_timer(&plat_priv->fw_boot_timer, cnss_bus_fw_boot_timeout_hdlr,
+		    (unsigned long)plat_priv);
 
 	register_pm_notifier(&cnss_pm_notifier);
 
@@ -2271,9 +1708,9 @@
 	cnss_unregister_bus_scale(plat_priv);
 unreg_esoc:
 	cnss_unregister_esoc(plat_priv);
-deinit_pci:
+deinit_bus:
 	if (!test_bit(SKIP_DEVICE_BOOT, &quirks))
-		cnss_pci_deinit(plat_priv);
+		cnss_bus_deinit(plat_priv);
 power_off:
 	if (!test_bit(SKIP_DEVICE_BOOT, &quirks))
 		cnss_power_off_device(plat_priv);
@@ -2300,7 +1737,7 @@
 	cnss_remove_sysfs(plat_priv);
 	cnss_unregister_bus_scale(plat_priv);
 	cnss_unregister_esoc(plat_priv);
-	cnss_pci_deinit(plat_priv);
+	cnss_bus_deinit(plat_priv);
 	cnss_put_resources(plat_priv);
 	platform_set_drvdata(plat_dev, NULL);
 	plat_env = NULL;
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index c11b206..f394f1f 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -107,6 +107,7 @@
 	void *va;
 	phys_addr_t pa;
 	bool valid;
+	u32 type;
 };
 
 enum cnss_fw_dump_type {
@@ -169,9 +170,17 @@
 	u32 host_pin_result;
 };
 
+enum cnss_debug_quirks {
+	LINK_DOWN_SELF_RECOVERY,
+	SKIP_DEVICE_BOOT,
+	USE_CORE_ONLY_FW,
+	SKIP_RECOVERY,
+};
+
 struct cnss_plat_data {
 	struct platform_device *plat_dev;
 	void *bus_priv;
+	enum cnss_dev_bus_type bus_type;
 	struct cnss_vreg_info *vreg_info;
 	struct cnss_pinctrl_info pinctrl_info;
 	struct cnss_subsys_info subsys_info;
@@ -183,7 +192,6 @@
 	struct cnss_platform_cap cap;
 	struct pm_qos_request qos_request;
 	unsigned long device_id;
-	struct cnss_wlan_driver *driver_ops;
 	enum cnss_driver_status driver_status;
 	u32 recovery_count;
 	unsigned long driver_state;
@@ -198,7 +206,8 @@
 	struct wlfw_rf_board_info_s_v01 board_info;
 	struct wlfw_soc_info_s_v01 soc_info;
 	struct wlfw_fw_version_info_s_v01 fw_version_info;
-	struct cnss_fw_mem fw_mem;
+	u32 fw_mem_seg_len;
+	struct cnss_fw_mem fw_mem[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
 	struct cnss_fw_mem m3_mem;
 	struct cnss_pin_connect_result pin_result;
 	struct dentry *root_dentry;
@@ -210,10 +219,13 @@
 	u32 diag_reg_read_mem_type;
 	u32 diag_reg_read_len;
 	u8 *diag_reg_read_buf;
+	bool cal_done;
+	char firmware_name[13];
 };
 
-void *cnss_bus_dev_to_bus_priv(struct device *dev);
-struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
+struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev);
+bool *cnss_get_qmi_bypass(void);
+unsigned long *cnss_get_debug_quirks(void);
 int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
 			   enum cnss_driver_event_type type,
 			   u32 flags, void *data);
@@ -226,6 +238,5 @@
 int cnss_register_ramdump(struct cnss_plat_data *plat_priv);
 void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv);
 void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
-u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv);
 
 #endif /* _CNSS_MAIN_H */
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 9d1fbf9..dde3319 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -17,8 +17,10 @@
 #include <linux/of.h>
 #include <linux/pm_runtime.h>
 #include <linux/memblock.h>
+#include <soc/qcom/ramdump.h>
 
 #include "main.h"
+#include "bus.h"
 #include "debug.h"
 #include "pci.h"
 
@@ -42,6 +44,11 @@
 
 #define MAX_M3_FILE_NAME_LENGTH		13
 #define DEFAULT_M3_FILE_NAME		"m3.bin"
+#define DEFAULT_FW_FILE_NAME		"amss.bin"
+
+#define WAKE_MSI_NAME			"WAKE"
+
+#define FW_ASSERT_TIMEOUT		5000
 
 static DEFINE_SPINLOCK(pci_link_down_lock);
 
@@ -222,6 +229,518 @@
 }
 EXPORT_SYMBOL(cnss_pci_link_down);
 
+int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	plat_priv = pci_priv->plat_priv;
+
+	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+		cnss_pr_dbg("Skip driver probe\n");
+		goto out;
+	}
+
+	if (!pci_priv->driver_ops) {
+		cnss_pr_err("driver_ops is NULL\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+		ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
+						   pci_priv->pci_device_id);
+		if (ret) {
+			cnss_pr_err("Failed to reinit host driver, err = %d\n",
+				    ret);
+			goto out;
+		}
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
+		ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
+						  pci_priv->pci_device_id);
+		if (ret) {
+			cnss_pr_err("Failed to probe host driver, err = %d\n",
+				    ret);
+			goto out;
+		}
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+	}
+
+	return 0;
+
+out:
+	return ret;
+}
+
+int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	plat_priv = pci_priv->plat_priv;
+
+	if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
+	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+		cnss_pr_dbg("Skip driver remove\n");
+		return 0;
+	}
+
+	if (!pci_priv->driver_ops) {
+		cnss_pr_err("driver_ops is NULL\n");
+		return -EINVAL;
+	}
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+		pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
+	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+		pci_priv->driver_ops->remove(pci_priv->pci_dev);
+		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+	}
+
+	return 0;
+}
+
+int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
+				      int modem_current_status)
+{
+	struct cnss_wlan_driver *driver_ops;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	driver_ops = pci_priv->driver_ops;
+	if (!driver_ops || !driver_ops->modem_status)
+		return -EINVAL;
+
+	driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
+
+	return 0;
+}
+
+static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	ret = cnss_power_on_device(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to power on device, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = cnss_resume_pci_link(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+		goto power_off;
+	}
+
+	ret = cnss_pci_call_driver_probe(pci_priv);
+	if (ret)
+		goto suspend_link;
+
+	return 0;
+suspend_link:
+	cnss_suspend_pci_link(pci_priv);
+power_off:
+	cnss_power_off_device(plat_priv);
+out:
+	return ret;
+}
+
+static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	cnss_pm_request_resume(pci_priv);
+
+	cnss_pci_call_driver_remove(pci_priv);
+
+	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
+				   CNSS_BUS_WIDTH_NONE);
+	cnss_pci_set_monitor_wake_intr(pci_priv, false);
+	cnss_pci_set_auto_suspended(pci_priv, 0);
+
+	ret = cnss_suspend_pci_link(pci_priv);
+	if (ret)
+		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+	cnss_power_off_device(plat_priv);
+
+	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+
+	return ret;
+}
+
+static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
+{
+	if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
+		pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
+}
+
+static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_ramdump_info *ramdump_info;
+	struct ramdump_segment segment;
+
+	ramdump_info = &plat_priv->ramdump_info;
+	if (!ramdump_info->ramdump_size)
+		return -EINVAL;
+
+	memset(&segment, 0, sizeof(segment));
+	segment.v_address = ramdump_info->ramdump_va;
+	segment.size = ramdump_info->ramdump_size;
+	ret = do_ramdump(ramdump_info->ramdump_dev, &segment, 1);
+
+	return ret;
+}
+
+static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	unsigned int timeout;
+
+	if (plat_priv->ramdump_info_v2.dump_data_valid ||
+	    test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+		cnss_pci_clear_dump_info(pci_priv);
+	}
+
+	ret = cnss_power_on_device(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to power on device, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = cnss_resume_pci_link(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+		goto power_off;
+	}
+
+	timeout = cnss_get_qmi_timeout();
+
+	ret = cnss_pci_start_mhi(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to start MHI, err = %d\n", ret);
+		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
+		    !pci_priv->pci_link_down_ind && timeout)
+			mod_timer(&plat_priv->fw_boot_timer,
+				  jiffies + msecs_to_jiffies(timeout >> 1));
+		return 0;
+	}
+
+	if (test_bit(USE_CORE_ONLY_FW, cnss_get_debug_quirks())) {
+		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+		return 0;
+	}
+
+	cnss_set_pin_connect_status(plat_priv);
+
+	if (*cnss_get_qmi_bypass()) {
+		ret = cnss_pci_call_driver_probe(pci_priv);
+		if (ret)
+			goto stop_mhi;
+	} else if (timeout) {
+		mod_timer(&plat_priv->fw_boot_timer,
+			  jiffies + msecs_to_jiffies(timeout << 1));
+	}
+
+	return 0;
+
+stop_mhi:
+	cnss_pci_stop_mhi(pci_priv);
+	cnss_suspend_pci_link(pci_priv);
+power_off:
+	cnss_power_off_device(plat_priv);
+out:
+	return ret;
+}
+
+static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	cnss_pm_request_resume(pci_priv);
+
+	cnss_pci_call_driver_remove(pci_priv);
+
+	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
+				   CNSS_BUS_WIDTH_NONE);
+	cnss_pci_set_monitor_wake_intr(pci_priv, false);
+	cnss_pci_set_auto_suspended(pci_priv, 0);
+
+	cnss_pci_stop_mhi(pci_priv);
+
+	ret = cnss_suspend_pci_link(pci_priv);
+	if (ret)
+		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+	cnss_power_off_device(plat_priv);
+
+	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
+	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+
+	return ret;
+}
+
+static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
+		    plat_priv->driver_state);
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		cnss_pr_dbg("Ignore crash shutdown\n");
+		return;
+	}
+
+	cnss_pci_collect_dump_info(pci_priv, true);
+}
+
+static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
+	struct cnss_dump_data *dump_data = &info_v2->dump_data;
+	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
+	struct ramdump_segment *ramdump_segs, *s;
+	int i, ret = 0;
+
+	if (!info_v2->dump_data_valid ||
+	    dump_data->nentries == 0)
+		return 0;
+
+	ramdump_segs = kcalloc(dump_data->nentries,
+			       sizeof(*ramdump_segs),
+			       GFP_KERNEL);
+	if (!ramdump_segs)
+		return -ENOMEM;
+
+	s = ramdump_segs;
+	for (i = 0; i < dump_data->nentries; i++) {
+		s->address = dump_seg->address;
+		s->v_address = dump_seg->v_address;
+		s->size = dump_seg->size;
+		s++;
+		dump_seg++;
+	}
+
+	ret = do_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
+			     dump_data->nentries);
+	kfree(ramdump_segs);
+
+	cnss_pci_set_mhi_state(plat_priv->bus_priv, CNSS_MHI_DEINIT);
+	cnss_pci_clear_dump_info(plat_priv->bus_priv);
+
+	return ret;
+}
+
+int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	switch (pci_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		ret = cnss_qca6174_powerup(pci_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+	case QCA6390_EMULATION_DEVICE_ID:
+	case QCA6390_DEVICE_ID:
+		ret = cnss_qca6290_powerup(pci_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%x\n",
+			    pci_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	switch (pci_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		ret = cnss_qca6174_shutdown(pci_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+	case QCA6390_EMULATION_DEVICE_ID:
+	case QCA6390_DEVICE_ID:
+		ret = cnss_qca6290_shutdown(pci_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%x\n",
+			    pci_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	switch (pci_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		cnss_qca6174_crash_shutdown(pci_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+	case QCA6390_EMULATION_DEVICE_ID:
+	case QCA6390_DEVICE_ID:
+		cnss_qca6290_crash_shutdown(pci_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%x\n",
+			    pci_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	switch (pci_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		ret = cnss_qca6174_ramdump(pci_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+	case QCA6390_EMULATION_DEVICE_ID:
+	case QCA6390_DEVICE_ID:
+		ret = cnss_qca6290_ramdump(pci_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%x\n",
+			    pci_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+	struct cnss_pci_data *pci_priv;
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	pci_priv = plat_priv->bus_priv;
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	if (pci_priv->driver_ops) {
+		cnss_pr_err("Driver has already registered\n");
+		return -EEXIST;
+	}
+
+	ret = cnss_driver_event_post(plat_priv,
+				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+				     CNSS_EVENT_SYNC_UNINTERRUPTIBLE,
+				     driver_ops);
+	return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_register_driver);
+
+void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return;
+	}
+
+	cnss_driver_event_post(plat_priv,
+			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+			       CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
+}
+EXPORT_SYMBOL(cnss_wlan_unregister_driver);
+
+int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
+				  void *data)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+	pci_priv->driver_ops = data;
+
+	ret = cnss_pci_dev_powerup(pci_priv);
+	if (ret) {
+		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+		pci_priv->driver_ops = NULL;
+	}
+
+	return ret;
+}
+
+int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+	cnss_pci_dev_shutdown(pci_priv);
+	pci_priv->driver_ops = NULL;
+
+	return 0;
+}
+
 static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -377,7 +896,6 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	pm_message_t state = { .event = PM_EVENT_SUSPEND };
@@ -385,11 +903,7 @@
 	if (!pci_priv)
 		goto out;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		goto out;
-
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->suspend) {
 		ret = driver_ops->suspend(pci_dev, state);
 		if (ret) {
@@ -400,7 +914,7 @@
 		}
 	}
 
-	if (pci_priv->pci_link_state) {
+	if (pci_priv->pci_link_state == PCI_LINK_UP) {
 		ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND);
 		if (ret) {
 			if (driver_ops && driver_ops->resume)
@@ -432,31 +946,29 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		goto out;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		goto out;
-
 	if (pci_priv->pci_link_down_ind)
 		goto out;
 
-	ret = pci_enable_device(pci_dev);
-	if (ret)
-		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+	if (pci_priv->pci_link_state == PCI_LINK_UP) {
+		ret = pci_enable_device(pci_dev);
+		if (ret)
+			cnss_pr_err("Failed to enable PCI device, err = %d\n",
+				    ret);
 
-	if (pci_priv->saved_state)
-		cnss_set_pci_config_space(pci_priv,
-					  RESTORE_PCI_CONFIG_SPACE);
+		if (pci_priv->saved_state)
+			cnss_set_pci_config_space(pci_priv,
+						  RESTORE_PCI_CONFIG_SPACE);
 
-	pci_set_master(pci_dev);
-	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+		pci_set_master(pci_dev);
+		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+	}
 
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->resume) {
 		ret = driver_ops->resume(pci_dev);
 		if (ret)
@@ -475,20 +987,20 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		goto out;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		goto out;
-
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->suspend_noirq)
 		ret = driver_ops->suspend_noirq(pci_dev);
 
+	ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
+	if (ret)
+		goto out;
+	pci_priv->pci_link_state = PCI_LINK_DOWN;
+
 out:
 	return ret;
 }
@@ -498,17 +1010,17 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		goto out;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
+	ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
+	if (ret)
 		goto out;
+	pci_priv->pci_link_state = PCI_LINK_UP;
 
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->resume_noirq &&
 	    !pci_priv->pci_link_down_ind)
 		ret = driver_ops->resume_noirq(pci_dev);
@@ -522,16 +1034,11 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		return -EAGAIN;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		return -EAGAIN;
-
 	if (pci_priv->pci_link_down_ind) {
 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
 		return -EAGAIN;
@@ -539,7 +1046,7 @@
 
 	cnss_pr_dbg("Runtime suspend start\n");
 
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->runtime_ops &&
 	    driver_ops->runtime_ops->runtime_suspend)
 		ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
@@ -554,16 +1061,11 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		return -EAGAIN;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		return -EAGAIN;
-
 	if (pci_priv->pci_link_down_ind) {
 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
 		return -EAGAIN;
@@ -571,7 +1073,7 @@
 
 	cnss_pr_dbg("Runtime resume start\n");
 
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->runtime_ops &&
 	    driver_ops->runtime_ops->runtime_resume)
 		ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
@@ -592,18 +1094,7 @@
 
 int cnss_wlan_pm_control(struct device *dev, bool vote)
 {
-	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
-	struct cnss_pci_data *pci_priv;
-	struct pci_dev *pci_dev;
-
-	if (!plat_priv)
-		return -ENODEV;
-
-	pci_priv = plat_priv->bus_priv;
-	if (!pci_priv)
-		return -ENODEV;
-
-	pci_dev = pci_priv->pci_dev;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
 
 	return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
 				   MSM_PCIE_ENABLE_PC,
@@ -615,19 +1106,17 @@
 int cnss_auto_suspend(struct device *dev)
 {
 	int ret = 0;
-	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
-	struct pci_dev *pci_dev;
-	struct cnss_pci_data *pci_priv;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
 	struct cnss_bus_bw_info *bus_bw_info;
 
-	if (!plat_priv)
-		return -ENODEV;
-
-	pci_priv = plat_priv->bus_priv;
 	if (!pci_priv)
 		return -ENODEV;
 
-	pci_dev = pci_priv->pci_dev;
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return -ENODEV;
 
 	if (pci_priv->pci_link_state) {
 		if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
@@ -673,19 +1162,18 @@
 int cnss_auto_resume(struct device *dev)
 {
 	int ret = 0;
-	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
-	struct pci_dev *pci_dev;
-	struct cnss_pci_data *pci_priv;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
 	struct cnss_bus_bw_info *bus_bw_info;
 
-	if (!plat_priv)
-		return -ENODEV;
-
-	pci_priv = plat_priv->bus_priv;
 	if (!pci_priv)
 		return -ENODEV;
 
-	pci_dev = pci_priv->pci_dev;
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return -ENODEV;
+
 	if (!pci_priv->pci_link_state) {
 		cnss_pr_dbg("Resuming PCI link\n");
 		if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
@@ -732,18 +1220,21 @@
 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
-	struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+	int i;
 
-	if (!fw_mem->va && fw_mem->size) {
-		fw_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
-						fw_mem->size, &fw_mem->pa,
-						GFP_KERNEL);
-		if (!fw_mem->va) {
-			cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx\n",
-				    fw_mem->size);
-			fw_mem->size = 0;
+	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+		if (!fw_mem[i].va && fw_mem[i].size) {
+			fw_mem[i].va =
+				dma_alloc_coherent(&pci_priv->pci_dev->dev,
+						   fw_mem[i].size,
+						   &fw_mem[i].pa, GFP_KERNEL);
+			if (!fw_mem[i].va) {
+				cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
+					    fw_mem[i].size, fw_mem[i].type);
 
-			return -ENOMEM;
+				return -ENOMEM;
+			}
 		}
 	}
 
@@ -753,17 +1244,25 @@
 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
-	struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+	int i;
 
-	if (fw_mem->va && fw_mem->size) {
-		cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n",
-			    fw_mem->va, &fw_mem->pa, fw_mem->size);
-		dma_free_coherent(&pci_priv->pci_dev->dev, fw_mem->size,
-				  fw_mem->va, fw_mem->pa);
-		fw_mem->va = NULL;
-		fw_mem->pa = 0;
-		fw_mem->size = 0;
+	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+		if (fw_mem[i].va && fw_mem[i].size) {
+			cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+				    fw_mem[i].va, &fw_mem[i].pa,
+				    fw_mem[i].size, fw_mem[i].type);
+			dma_free_coherent(&pci_priv->pci_dev->dev,
+					  fw_mem[i].size, fw_mem[i].va,
+					  fw_mem[i].pa);
+			fw_mem[i].va = NULL;
+			fw_mem[i].pa = 0;
+			fw_mem[i].size = 0;
+			fw_mem[i].type = 0;
+		}
 	}
+
+	plat_priv->fw_mem_seg_len = 0;
 }
 
 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
@@ -819,18 +1318,59 @@
 	m3_mem->size = 0;
 }
 
-int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
-			  phys_addr_t *pa)
+int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
 {
+	int ret;
+	struct cnss_plat_data *plat_priv;
+
 	if (!pci_priv)
 		return -ENODEV;
 
-	*va = pci_priv->bar;
-	*pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return -ENODEV;
+
+	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
+	if (ret) {
+		cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
+		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+				       CNSS_REASON_DEFAULT);
+		return 0;
+	}
+
+	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
+		mod_timer(&plat_priv->fw_boot_timer,
+			  jiffies + msecs_to_jiffies(FW_ASSERT_TIMEOUT));
+	}
 
 	return 0;
 }
 
+void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
+{
+	if (!pci_priv)
+		return;
+
+	cnss_pr_err("Timeout waiting for FW ready indication\n");
+
+	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+			       CNSS_REASON_TIMEOUT);
+}
+
+int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
+{
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	info->va = pci_priv->bar;
+	info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_get_soc_info);
+
 static struct cnss_msi_config msi_config = {
 	.total_vectors = 32,
 	.total_users = 4,
@@ -916,7 +1456,7 @@
 				 int *num_vectors, u32 *user_base_data,
 				 u32 *base_vector)
 {
-	struct cnss_pci_data *pci_priv = dev_get_drvdata(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
 	struct cnss_msi_config *msi_config;
 	int idx;
 
@@ -975,6 +1515,25 @@
 }
 EXPORT_SYMBOL(cnss_get_msi_address);
 
+u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
+{
+	int ret, num_vectors;
+	u32 user_base_data, base_vector;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
+					   WAKE_MSI_NAME, &num_vectors,
+					   &user_base_data, &base_vector);
+	if (ret) {
+		cnss_pr_err("WAKE MSI is not valid\n");
+		return 0;
+	}
+
+	return user_base_data;
+}
+
 static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -1107,7 +1666,7 @@
 	struct cnss_dump_seg *dump_seg =
 		plat_priv->ramdump_info_v2.dump_data_vaddr;
 	struct image_info *fw_image, *rddm_image;
-	struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
 	int ret, i;
 
 	ret = mhi_download_rddm_img(pci_priv->mhi_ctrl, in_panic);
@@ -1152,18 +1711,20 @@
 
 	dump_data->nentries += rddm_image->entries;
 
-	if (fw_mem->pa && fw_mem->va && fw_mem->size) {
-		cnss_pr_dbg("Collect remote heap dump segment, nentries 1\n");
+	cnss_pr_dbg("Collect remote heap dump segment\n");
 
-		dump_seg->address = fw_mem->pa;
-		dump_seg->v_address = fw_mem->va;
-		dump_seg->size = fw_mem->size;
-		dump_seg->type = CNSS_FW_REMOTE_HEAP;
-		cnss_pr_dbg("seg-0: address 0x%lx, v_address %pK, size 0x%lx\n",
-			    dump_seg->address, dump_seg->v_address,
-			    dump_seg->size);
-		dump_seg++;
-		dump_data->nentries++;
+	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+		if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01) {
+			dump_seg->address = fw_mem[i].pa;
+			dump_seg->v_address = fw_mem[i].va;
+			dump_seg->size = fw_mem[i].size;
+			dump_seg->type = CNSS_FW_REMOTE_HEAP;
+			cnss_pr_dbg("seg-%d: address 0x%lx, v_address %pK, size 0x%lx\n",
+				    i, dump_seg->address, dump_seg->v_address,
+				    dump_seg->size);
+			dump_seg++;
+			dump_data->nentries++;
+		}
 	}
 
 	if (dump_data->nentries > 0)
@@ -1214,9 +1775,9 @@
 
 	cnss_pr_dbg("MHI status cb is called with reason %d\n", reason);
 
-	if (plat_priv->driver_ops && plat_priv->driver_ops->update_status)
-		plat_priv->driver_ops->update_status(pci_priv->pci_dev,
-						     CNSS_FW_DOWN);
+	if (pci_priv->driver_ops && pci_priv->driver_ops->update_status)
+		pci_priv->driver_ops->update_status(pci_priv->pci_dev,
+						    CNSS_FW_DOWN);
 
 	switch (reason) {
 	case MHI_CB_EE_RDDM:
@@ -1286,6 +1847,8 @@
 	mhi_ctrl->bus = pci_dev->bus->number;
 	mhi_ctrl->slot = PCI_SLOT(pci_dev->devfn);
 
+	mhi_ctrl->fw_image = plat_priv->firmware_name;
+
 	mhi_ctrl->regs = pci_priv->bar;
 	cnss_pr_dbg("BAR starts at %pa\n",
 		    &pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM));
@@ -1364,7 +1927,10 @@
 			return 0;
 		break;
 	case CNSS_MHI_TRIGGER_RDDM:
-		return 0;
+		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
+		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
+			return 0;
+		break;
 	default:
 		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
 			    cnss_mhi_state_to_str(mhi_state), mhi_state);
@@ -1557,6 +2123,8 @@
 	cnss_set_pci_priv(pci_dev, pci_priv);
 	plat_priv->device_id = pci_dev->device;
 	plat_priv->bus_priv = pci_priv;
+	snprintf(plat_priv->firmware_name, sizeof(plat_priv->firmware_name),
+		 DEFAULT_FW_FILE_NAME);
 
 	ret = cnss_register_subsys(plat_priv);
 	if (ret)
@@ -1611,6 +2179,8 @@
 		break;
 	case QCA6290_EMULATION_DEVICE_ID:
 	case QCA6290_DEVICE_ID:
+	case QCA6390_EMULATION_DEVICE_ID:
+	case QCA6390_DEVICE_ID:
 		ret = cnss_pci_enable_msi(pci_priv);
 		if (ret)
 			goto disable_bus;
@@ -1663,6 +2233,8 @@
 	switch (pci_dev->device) {
 	case QCA6290_EMULATION_DEVICE_ID:
 	case QCA6290_DEVICE_ID:
+	case QCA6390_EMULATION_DEVICE_ID:
+	case QCA6390_DEVICE_ID:
 		cnss_pci_unregister_mhi(pci_priv);
 		cnss_pci_disable_msi(pci_priv);
 		break;
@@ -1686,6 +2258,9 @@
 	{ QCA6290_EMULATION_VENDOR_ID, QCA6290_EMULATION_DEVICE_ID,
 	  PCI_ANY_ID, PCI_ANY_ID },
 	{ QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+	{ QCA6390_VENDOR_ID, QCA6390_EMULATION_DEVICE_ID, PCI_ANY_ID,
+	  PCI_ANY_ID },
+	{ QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 4b2f6bc..79f66ac 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -21,16 +21,6 @@
 
 #include "main.h"
 
-#define QCA6174_VENDOR_ID		0x168C
-#define QCA6174_DEVICE_ID		0x003E
-#define QCA6174_REV_ID_OFFSET		0x08
-#define QCA6174_REV3_VERSION		0x5020000
-#define QCA6174_REV3_2_VERSION		0x5030000
-#define QCA6290_VENDOR_ID		0x17CB
-#define QCA6290_DEVICE_ID		0x1100
-#define QCA6290_EMULATION_VENDOR_ID	0x168C
-#define QCA6290_EMULATION_DEVICE_ID	0xABCD
-
 enum cnss_mhi_state {
 	CNSS_MHI_INIT,
 	CNSS_MHI_DEINIT,
@@ -61,6 +51,7 @@
 	const struct pci_device_id *pci_device_id;
 	u32 device_id;
 	u16 revision_id;
+	struct cnss_wlan_driver *driver_ops;
 	bool pci_link_state;
 	bool pci_link_down_ind;
 	struct pci_saved_state *saved_state;
@@ -130,8 +121,6 @@
 void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv);
-int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
-			  phys_addr_t *pa);
 int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
 			   enum cnss_mhi_state state);
 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv);
@@ -139,5 +128,18 @@
 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic);
 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
 int cnss_pm_request_resume(struct cnss_pci_data *pci_priv);
+u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv);
+int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv);
+void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv);
+int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv);
+int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv);
+int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv, void *data);
+int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv);
+int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
+				      int modem_current_status);
 
 #endif /* _CNSS_PCI_H */
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index f4344ae..669816c 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,8 +15,9 @@
 #include <linux/qmi_encdec.h>
 #include <soc/qcom/msm_qmi_interface.h>
 
-#include "main.h"
+#include "bus.h"
 #include "debug.h"
+#include "main.h"
 #include "qmi.h"
 
 #define WLFW_SERVICE_INS_ID_V01		1
@@ -159,17 +160,29 @@
 	memset(&req, 0, sizeof(req));
 	memset(&resp, 0, sizeof(resp));
 
-	req.daemon_support_valid = 1;
-	req.daemon_support = daemon_support;
+	req.num_clients_valid = 1;
+	req.num_clients = daemon_support ? 2 : 1;
+	cnss_pr_dbg("Number of clients is %d\n", req.num_clients);
 
-	cnss_pr_dbg("daemon_support is %d\n", req.daemon_support);
-
-	req.wake_msi = cnss_get_wake_msi(plat_priv);
+	req.wake_msi = cnss_bus_get_wake_irq(plat_priv);
 	if (req.wake_msi) {
 		cnss_pr_dbg("WAKE MSI base data is %d\n", req.wake_msi);
 		req.wake_msi_valid = 1;
 	}
 
+	req.bdf_support_valid = 1;
+	req.bdf_support = 1;
+
+	req.m3_support_valid = 1;
+	req.m3_support = 1;
+
+	req.m3_cache_support_valid = 1;
+	req.m3_cache_support = 1;
+
+	req.cal_done_valid = 1;
+	req.cal_done = plat_priv->cal_done;
+	cnss_pr_dbg("Calibration done is %d\n", plat_priv->cal_done);
+
 	req_desc.max_msg_len = WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN;
 	req_desc.msg_id = QMI_WLFW_HOST_CAP_REQ_V01;
 	req_desc.ei_array = wlfw_host_cap_req_msg_v01_ei;
@@ -221,8 +234,8 @@
 	req.request_mem_enable = 1;
 	req.fw_mem_ready_enable_valid = 1;
 	req.fw_mem_ready_enable = 1;
-	req.cold_boot_cal_done_enable_valid = 1;
-	req.cold_boot_cal_done_enable = 1;
+	req.fw_init_done_enable_valid = 1;
+	req.fw_init_done_enable = 1;
 	req.pin_connect_result_enable_valid = 1;
 	req.pin_connect_result_enable = 1;
 
@@ -260,27 +273,48 @@
 					  void *msg, unsigned int msg_len)
 {
 	struct msg_desc ind_desc;
-	struct wlfw_request_mem_ind_msg_v01 ind_msg;
-	struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
-	int ret = 0;
+	struct wlfw_request_mem_ind_msg_v01 *ind_msg;
+	int ret = 0, i;
+
+	ind_msg = kzalloc(sizeof(*ind_msg), GFP_KERNEL);
+	if (!ind_msg)
+		return -ENOMEM;
 
 	ind_desc.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01;
 	ind_desc.max_msg_len = WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN;
 	ind_desc.ei_array = wlfw_request_mem_ind_msg_v01_ei;
 
-	ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+	ret = qmi_kernel_decode(&ind_desc, ind_msg, msg, msg_len);
 	if (ret < 0) {
 		cnss_pr_err("Failed to decode request memory indication, msg_len: %u, err = %d\n",
 			    ret, msg_len);
-		return ret;
+		goto out;
 	}
 
-	fw_mem->size = ind_msg.size;
+	if (ind_msg->mem_seg_len == 0 ||
+	    ind_msg->mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) {
+		cnss_pr_err("Invalid memory segment length: %u\n",
+			    ind_msg->mem_seg_len);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	cnss_pr_dbg("FW memory segment count is %u\n", ind_msg->mem_seg_len);
+	plat_priv->fw_mem_seg_len = ind_msg->mem_seg_len;
+	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+		plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type;
+		plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size;
+	}
 
 	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
 			       0, NULL);
 
+	kfree(ind_msg);
 	return 0;
+
+out:
+	kfree(ind_msg);
+	return ret;
 }
 
 static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv,
@@ -317,30 +351,47 @@
 
 int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
 {
-	struct wlfw_respond_mem_req_msg_v01 req;
-	struct wlfw_respond_mem_resp_msg_v01 resp;
+	struct wlfw_respond_mem_req_msg_v01 *req;
+	struct wlfw_respond_mem_resp_msg_v01 *resp;
 	struct msg_desc req_desc, resp_desc;
-	struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
-	int ret = 0;
+	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+	int ret = 0, i;
 
 	cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n",
 		    plat_priv->driver_state);
 
-	if (!fw_mem->pa || !fw_mem->size) {
-		cnss_pr_err("Memory for FW is not available!\n");
-		ret = -ENOMEM;
-		goto out;
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	req->mem_seg_len = plat_priv->fw_mem_seg_len;
+	for (i = 0; i < req->mem_seg_len; i++) {
+		if (!fw_mem[i].pa || !fw_mem[i].size) {
+			if (fw_mem[i].type == 0) {
+				cnss_pr_err("Invalid memory for FW type, segment = %d\n",
+					    i);
+				ret = -EINVAL;
+				goto out;
+			}
+			cnss_pr_err("Memory for FW is not available for type: %u\n",
+				    fw_mem[i].type);
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+			    fw_mem[i].va, &fw_mem[i].pa,
+			    fw_mem[i].size, fw_mem[i].type);
+
+		req->mem_seg[i].addr = fw_mem[i].pa;
+		req->mem_seg[i].size = fw_mem[i].size;
+		req->mem_seg[i].type = fw_mem[i].type;
 	}
 
-	cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n",
-		    fw_mem->va, &fw_mem->pa, fw_mem->size);
-
-	memset(&req, 0, sizeof(req));
-	memset(&resp, 0, sizeof(resp));
-
-	req.addr = fw_mem->pa;
-	req.size = fw_mem->size;
-
 	req_desc.max_msg_len = WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN;
 	req_desc.msg_id = QMI_WLFW_RESPOND_MEM_REQ_V01;
 	req_desc.ei_array = wlfw_respond_mem_req_msg_v01_ei;
@@ -349,8 +400,8 @@
 	resp_desc.msg_id = QMI_WLFW_RESPOND_MEM_RESP_V01;
 	resp_desc.ei_array = wlfw_respond_mem_resp_msg_v01_ei;
 
-	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
-				sizeof(req), &resp_desc, &resp, sizeof(resp),
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, req,
+				sizeof(*req), &resp_desc, resp, sizeof(*resp),
 				QMI_WLFW_TIMEOUT_MS);
 	if (ret < 0) {
 		cnss_pr_err("Failed to send respond memory request, err = %d\n",
@@ -358,16 +409,21 @@
 		goto out;
 	}
 
-	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
 		cnss_pr_err("Respond memory request failed, result: %d, err: %d\n",
-			    resp.resp.result, resp.resp.error);
-		ret = resp.resp.result;
+			    resp->resp.result, resp->resp.error);
+		ret = resp->resp.result;
 		goto out;
 	}
 
+	kfree(req);
+	kfree(resp);
 	return 0;
+
 out:
 	CNSS_ASSERT(0);
+	kfree(req);
+	kfree(resp);
 	return ret;
 }
 
@@ -459,17 +515,17 @@
 			 BDF_FILE_NAME_PREFIX "%02x",
 			 plat_priv->board_info.board_id);
 
+	if (bdf_bypass) {
+		cnss_pr_info("bdf_bypass is enabled, sending dummy BDF\n");
+		temp = filename;
+		remaining = MAX_BDF_FILE_NAME;
+		goto bypass_bdf;
+	}
+
 	ret = request_firmware(&fw_entry, filename, &plat_priv->plat_dev->dev);
 	if (ret) {
 		cnss_pr_err("Failed to load BDF: %s\n", filename);
-		if (bdf_bypass) {
-			cnss_pr_info("bdf_bypass is enabled, sending dummy BDF\n");
-			temp = filename;
-			remaining = MAX_BDF_FILE_NAME;
-			goto bypass_bdf;
-		} else {
-			goto err_req_fw;
-		}
+		goto err_req_fw;
 	}
 
 	temp = fw_entry->data;
@@ -908,12 +964,12 @@
 				       CNSS_DRIVER_EVENT_FW_MEM_READY,
 				       0, NULL);
 		break;
-	case QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01:
+	case QMI_WLFW_FW_READY_IND_V01:
 		cnss_driver_event_post(plat_priv,
 				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
 				       0, NULL);
 		break;
-	case QMI_WLFW_FW_READY_IND_V01:
+	case QMI_WLFW_FW_INIT_DONE_IND_V01:
 		cnss_driver_event_post(plat_priv,
 				       CNSS_DRIVER_EVENT_FW_READY,
 				       0, NULL);
@@ -974,11 +1030,11 @@
 	cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n",
 		     plat_priv->driver_state);
 
-	ret = cnss_wlfw_host_cap_send_sync(plat_priv);
+	ret = cnss_wlfw_ind_register_send_sync(plat_priv);
 	if (ret < 0)
 		goto out;
 
-	ret = cnss_wlfw_ind_register_send_sync(plat_priv);
+	ret = cnss_wlfw_host_cap_send_sync(plat_priv);
 	if (ret < 0)
 		goto out;
 
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
index 7d6a771..bbf707b 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -62,7 +62,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -97,7 +97,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -123,7 +123,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -140,7 +140,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -175,7 +175,131 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_mem_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_cfg_s_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_cfg_s_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_cfg_s_v01,
+					   secure_flag),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_mem_seg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_seg_s_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_mem_type_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_seg_s_v01,
+					   type),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_seg_s_v01,
+					   mem_cfg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_MEM_CFG_V01,
+		.elem_size      = sizeof(struct wlfw_mem_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_seg_s_v01,
+					   mem_cfg),
+		.ei_array      = wlfw_mem_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_seg_resp_s_v01,
+					   addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_seg_resp_s_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_mem_type_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_seg_resp_s_v01,
+					   type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_mem_seg_resp_s_v01,
+					   restore),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -201,7 +325,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -218,7 +342,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -235,7 +359,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -261,7 +385,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -418,7 +542,7 @@
 		.is_array       = NO_ARRAY,
 		.tlv_type       = 0x18,
 		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
-					   cold_boot_cal_done_enable_valid),
+					   fw_init_done_enable_valid),
 	},
 	{
 		.data_type      = QMI_UNSIGNED_1_BYTE,
@@ -427,7 +551,7 @@
 		.is_array       = NO_ARRAY,
 		.tlv_type       = 0x18,
 		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
-					   cold_boot_cal_done_enable),
+					   fw_init_done_enable),
 	},
 	{
 		.data_type      = QMI_OPT_FLAG,
@@ -448,9 +572,45 @@
 					   rejuvenate_enable),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   xo_cal_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   xo_cal_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   cal_done_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   cal_done_enable),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -489,7 +649,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -497,7 +657,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -505,7 +665,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -573,7 +733,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -608,7 +768,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -626,7 +786,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -764,7 +924,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -782,7 +942,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -790,7 +950,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -920,7 +1080,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1054,7 +1214,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1073,7 +1233,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1117,7 +1277,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1135,7 +1295,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1153,7 +1313,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1269,7 +1429,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1288,7 +1448,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1316,7 +1476,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1342,7 +1502,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1459,7 +1619,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1485,7 +1645,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1522,7 +1682,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1530,7 +1690,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1548,7 +1708,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1574,7 +1734,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1592,7 +1752,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1627,7 +1787,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1676,7 +1836,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1724,7 +1884,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1743,7 +1903,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1760,7 +1920,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1778,7 +1938,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1804,7 +1964,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1822,7 +1982,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1834,16 +1994,16 @@
 		.is_array       = NO_ARRAY,
 		.tlv_type       = 0x10,
 		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
-					   daemon_support_valid),
+					   num_clients_valid),
 	},
 	{
-		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.data_type      = QMI_UNSIGNED_4_BYTE,
 		.elem_len       = 1,
-		.elem_size      = sizeof(u8),
+		.elem_size      = sizeof(u32),
 		.is_array       = NO_ARRAY,
 		.tlv_type       = 0x10,
 		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
-					   daemon_support),
+					   num_clients),
 	},
 	{
 		.data_type      = QMI_OPT_FLAG,
@@ -1864,9 +2024,216 @@
 					   wake_msi),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   gpios_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   gpios_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = QMI_WLFW_MAX_NUM_GPIO_V01,
+		.elem_size      = sizeof(u32),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   gpios),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   nm_modem_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   nm_modem),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   bdf_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   bdf_support),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   bdf_cache_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   bdf_cache_support),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   m3_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   m3_support),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   m3_cache_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   m3_cache_support),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   cal_filesys_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   cal_filesys_support),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   cal_cache_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   cal_cache_support),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   cal_done_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   cal_done),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   mem_bucket_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   mem_bucket),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1C,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   mem_cfg_mode_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1C,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   mem_cfg_mode),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1884,50 +2251,61 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
 struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
 	{
-		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.data_type      = QMI_DATA_LEN,
 		.elem_len       = 1,
-		.elem_size      = sizeof(u32),
+		.elem_size      = sizeof(u8),
 		.is_array       = NO_ARRAY,
 		.tlv_type       = 0x01,
 		.offset         = offsetof(struct wlfw_request_mem_ind_msg_v01,
-					   size),
+					   mem_seg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+		.elem_size      = sizeof(struct wlfw_mem_seg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_request_mem_ind_msg_v01,
+					   mem_seg),
+		.ei_array      = wlfw_mem_seg_s_v01_ei,
 	},
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
 struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
 	{
-		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.data_type      = QMI_DATA_LEN,
 		.elem_len       = 1,
-		.elem_size      = sizeof(u64),
+		.elem_size      = sizeof(u8),
 		.is_array       = NO_ARRAY,
 		.tlv_type       = 0x01,
 		.offset         = offsetof(struct wlfw_respond_mem_req_msg_v01,
-					   addr),
+					   mem_seg_len),
 	},
 	{
-		.data_type      = QMI_UNSIGNED_4_BYTE,
-		.elem_len       = 1,
-		.elem_size      = sizeof(u32),
-		.is_array       = NO_ARRAY,
-		.tlv_type       = 0x02,
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+		.elem_size      = sizeof(struct wlfw_mem_seg_resp_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x01,
 		.offset         = offsetof(struct wlfw_respond_mem_req_msg_v01,
-					   size),
+					   mem_seg),
+		.ei_array      = wlfw_mem_seg_resp_s_v01_ei,
 	},
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1945,7 +2323,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -1953,15 +2331,15 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
-struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+struct elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -2041,7 +2419,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -2049,7 +2427,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -2068,7 +2446,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -2096,7 +2474,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -2155,7 +2533,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -2181,7 +2559,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -2199,7 +2577,7 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
 
@@ -2216,6 +2594,14 @@
 	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
-		.is_array       = QMI_COMMON_TLV_TYPE,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_done_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
 	},
 };
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
index 9b56eb0..00a873d 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,10 +23,12 @@
 #define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
 #define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_CAL_DONE_IND_V01 0x003E
 #define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
 #define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
 #define QMI_WLFW_M3_INFO_REQ_V01 0x003C
 #define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
 #define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
 #define QMI_WLFW_M3_INFO_RESP_V01 0x003C
 #define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
@@ -42,7 +44,6 @@
 #define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
 #define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
 #define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
-#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038
 #define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
 #define QMI_WLFW_REJUVENATE_IND_V01 0x0039
 #define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
@@ -72,13 +73,16 @@
 #define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
 
 #define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2
+#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 32
 #define QMI_WLFW_MAX_NUM_CAL_V01 5
 #define QMI_WLFW_MAX_DATA_SIZE_V01 6144
 #define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
 #define QMI_WLFW_MAX_NUM_CE_V01 12
 #define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
 #define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
 #define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2
 #define QMI_WLFW_MAX_STR_LEN_V01 16
 #define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
 #define QMI_WLFW_MAC_ADDR_SIZE_V01 6
@@ -117,6 +121,17 @@
 	WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX,
 };
 
+enum wlfw_mem_type_enum_v01 {
+	WLFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_MEM_TYPE_MSA_V01 = 0,
+	QMI_WLFW_MEM_TYPE_DDR_V01 = 1,
+	QMI_WLFW_MEM_BDF_V01 = 2,
+	QMI_WLFW_MEM_M3_V01 = 3,
+	QMI_WLFW_MEM_CAL_V01 = 4,
+	QMI_WLFW_MEM_DPD_V01 = 5,
+	WLFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
 #define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
 #define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
 #define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
@@ -128,6 +143,7 @@
 #define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
 #define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
 #define QMI_WLFW_FW_MEM_READY_V01 ((u64)0x08ULL)
+#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL)
 
 #define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
 
@@ -160,6 +176,26 @@
 	u8 secure_flag;
 };
 
+struct wlfw_mem_cfg_s_v01 {
+	u64 offset;
+	u32 size;
+	u8 secure_flag;
+};
+
+struct wlfw_mem_seg_s_v01 {
+	u32 size;
+	enum wlfw_mem_type_enum_v01 type;
+	u32 mem_cfg_len;
+	struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct wlfw_mem_seg_resp_s_v01 {
+	u64 addr;
+	u32 size;
+	enum wlfw_mem_type_enum_v01 type;
+	u8 restore;
+};
+
 struct wlfw_rf_chip_info_s_v01 {
 	u32 chip_id;
 	u32 chip_family;
@@ -195,13 +231,17 @@
 	u8 request_mem_enable;
 	u8 fw_mem_ready_enable_valid;
 	u8 fw_mem_ready_enable;
-	u8 cold_boot_cal_done_enable_valid;
-	u8 cold_boot_cal_done_enable;
+	u8 fw_init_done_enable_valid;
+	u8 fw_init_done_enable;
 	u8 rejuvenate_enable_valid;
 	u32 rejuvenate_enable;
+	u8 xo_cal_enable_valid;
+	u8 xo_cal_enable;
+	u8 cal_done_enable_valid;
+	u8 cal_done_enable;
 };
 
-#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 54
 extern struct elem_info wlfw_ind_register_req_msg_v01_ei[];
 
 struct wlfw_ind_register_resp_msg_v01 {
@@ -533,13 +573,36 @@
 extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[];
 
 struct wlfw_host_cap_req_msg_v01 {
-	u8 daemon_support_valid;
-	u8 daemon_support;
+	u8 num_clients_valid;
+	u32 num_clients;
 	u8 wake_msi_valid;
 	u32 wake_msi;
+	u8 gpios_valid;
+	u32 gpios_len;
+	u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+	u8 nm_modem_valid;
+	u8 nm_modem;
+	u8 bdf_support_valid;
+	u8 bdf_support;
+	u8 bdf_cache_support_valid;
+	u8 bdf_cache_support;
+	u8 m3_support_valid;
+	u8 m3_support;
+	u8 m3_cache_support_valid;
+	u8 m3_cache_support;
+	u8 cal_filesys_support_valid;
+	u8 cal_filesys_support;
+	u8 cal_cache_support_valid;
+	u8 cal_cache_support;
+	u8 cal_done_valid;
+	u8 cal_done;
+	u8 mem_bucket_valid;
+	u32 mem_bucket;
+	u8 mem_cfg_mode_valid;
+	u8 mem_cfg_mode;
 };
 
-#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 11
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
 extern struct elem_info wlfw_host_cap_req_msg_v01_ei[];
 
 struct wlfw_host_cap_resp_msg_v01 {
@@ -550,18 +613,19 @@
 extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[];
 
 struct wlfw_request_mem_ind_msg_v01 {
-	u32 size;
+	u32 mem_seg_len;
+	struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
 };
 
-#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 1124
 extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[];
 
 struct wlfw_respond_mem_req_msg_v01 {
-	u64 addr;
-	u32 size;
+	u32 mem_seg_len;
+	struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
 };
 
-#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 548
 extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[];
 
 struct wlfw_respond_mem_resp_msg_v01 {
@@ -578,12 +642,12 @@
 #define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
 extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[];
 
-struct wlfw_cold_boot_cal_done_ind_msg_v01 {
+struct wlfw_fw_init_done_ind_msg_v01 {
 	char placeholder;
 };
 
-#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
-extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[];
+#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
 
 struct wlfw_rejuvenate_ind_msg_v01 {
 	u8 cause_for_rejuvenation_valid;
@@ -654,4 +718,11 @@
 #define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
 extern struct elem_info wlfw_xo_cal_ind_msg_v01_ei[];
 
+struct wlfw_cal_done_ind_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cal_done_ind_msg_v01_ei[];
+
 #endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index d4b73de..b35adbc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -79,8 +79,8 @@
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN	17
 #define IWL7265_UCODE_API_MIN	17
-#define IWL7265D_UCODE_API_MIN	17
-#define IWL3168_UCODE_API_MIN	20
+#define IWL7265D_UCODE_API_MIN	22
+#define IWL3168_UCODE_API_MIN	22
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION		0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 8d3e53f..20d08dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -74,8 +74,8 @@
 #define IWL8265_UCODE_API_MAX	26
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN	17
-#define IWL8265_UCODE_API_MIN	20
+#define IWL8000_UCODE_API_MIN	22
+#define IWL8265_UCODE_API_MIN	22
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION		0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 406ef30..da8234b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -369,6 +369,7 @@
 #define MON_DMARB_RD_DATA_ADDR		(0xa03c5c)
 
 #define DBGC_IN_SAMPLE			(0xa03c00)
+#define DBGC_OUT_CTRL			(0xa03c0c)
 
 /* enable the ID buf for read */
 #define WFPM_PS_CTL_CLR			0xA0300C
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 700d244..2642d8e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -914,14 +914,6 @@
 	return 0;
 }
 
-static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
-{
-	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
-		iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
-	else
-		iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
-}
-
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
 {
 	u8 *ptr;
@@ -935,10 +927,8 @@
 	/* EARLY START - firmware's configuration is hard coded */
 	if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
 	     !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
-	    conf_id == FW_DBG_START_FROM_ALIVE) {
-		iwl_mvm_restart_early_start(mvm);
+	    conf_id == FW_DBG_START_FROM_ALIVE)
 		return 0;
-	}
 
 	if (!mvm->fw->dbg_conf_tlv[conf_id])
 		return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index f1231c0..0bffade 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2585,6 +2585,10 @@
 
 		/* enable beacon filtering */
 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+
+		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+				     false);
+
 		ret = 0;
 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
 		   new_state == IEEE80211_STA_ASSOC) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index c60703e..2b1c691 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1666,8 +1666,11 @@
  */
 static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
 {
+	u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
+		IWL_MVM_CMD_QUEUE;
+
 	return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
-		~BIT(IWL_MVM_CMD_QUEUE));
+		~BIT(cmd_queue));
 }
 
 static inline
@@ -1687,6 +1690,7 @@
 static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
 {
 	mvm->ucode_loaded = false;
+	mvm->fw_dbg_conf = FW_DBG_INVALID;
 	iwl_trans_stop_device(mvm->trans);
 }
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 4d35deb..6d38eec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1118,22 +1118,38 @@
 
 	mutex_lock(&mvm->mutex);
 
-	/* stop recording */
 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+		/* stop recording */
 		iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+
+		iwl_mvm_fw_error_dump(mvm);
+
+		/* start recording again if the firmware is not crashed */
+		if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+		    mvm->fw->dbg_dest_tlv)
+			iwl_clear_bits_prph(mvm->trans,
+					    MON_BUFF_SAMPLE_CTL, 0x100);
 	} else {
+		u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
+		u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
+
+		/* stop recording */
 		iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
-		/* wait before we collect the data till the DBGC stop */
 		udelay(100);
+		iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+		/* wait before we collect the data till the DBGC stop */
+		udelay(500);
+
+		iwl_mvm_fw_error_dump(mvm);
+
+		/* start recording again if the firmware is not crashed */
+		if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+		    mvm->fw->dbg_dest_tlv) {
+			iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
+			iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
+		}
 	}
 
-	iwl_mvm_fw_error_dump(mvm);
-
-	/* start recording again if the firmware is not crashed */
-	WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
-		     mvm->fw->dbg_dest_tlv &&
-		     iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
-
 	mutex_unlock(&mvm->mutex);
 
 	iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 0aea476..f251c2a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2709,7 +2709,8 @@
 				struct ieee80211_sta *sta,
 				struct iwl_lq_sta *lq_sta,
 				enum nl80211_band band,
-				struct rs_rate *rate)
+				struct rs_rate *rate,
+				bool init)
 {
 	int i, nentries;
 	unsigned long active_rate;
@@ -2763,14 +2764,25 @@
 	 */
 	if (sta->vht_cap.vht_supported &&
 	    best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
-		switch (sta->bandwidth) {
-		case IEEE80211_STA_RX_BW_160:
-		case IEEE80211_STA_RX_BW_80:
-		case IEEE80211_STA_RX_BW_40:
+		/*
+		 * In AP mode, when a new station associates, rs is initialized
+		 * immediately upon association completion, before the phy
+		 * context is updated with the association parameters, so the
+		 * sta bandwidth might be wider than the phy context allows.
+		 * To avoid this issue, always initialize rs with 20mhz
+		 * bandwidth rate, and after authorization, when the phy context
+		 * is already up-to-date, re-init rs with the correct bw.
+		 */
+		u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
+
+		switch (bw) {
+		case RATE_MCS_CHAN_WIDTH_40:
+		case RATE_MCS_CHAN_WIDTH_80:
+		case RATE_MCS_CHAN_WIDTH_160:
 			initial_rates = rs_optimal_rates_vht;
 			nentries = ARRAY_SIZE(rs_optimal_rates_vht);
 			break;
-		case IEEE80211_STA_RX_BW_20:
+		case RATE_MCS_CHAN_WIDTH_20:
 			initial_rates = rs_optimal_rates_vht_20mhz;
 			nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
 			break;
@@ -2781,7 +2793,7 @@
 
 		active_rate = lq_sta->active_siso_rate;
 		rate->type = LQ_VHT_SISO;
-		rate->bw = rs_bw_from_sta_bw(sta);
+		rate->bw = bw;
 	} else if (sta->ht_cap.ht_supported &&
 		   best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
 		initial_rates = rs_optimal_rates_ht;
@@ -2863,7 +2875,7 @@
 	tbl = &(lq_sta->lq_info[active_tbl]);
 	rate = &tbl->rate;
 
-	rs_get_initial_rate(mvm, sta, lq_sta, band, rate);
+	rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init);
 	rs_init_optimal_rate(mvm, sta, lq_sta);
 
 	WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a481eb4..c2bbc8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -72,6 +72,7 @@
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
 	struct iwl_mvm_key_pn *ptk_pn;
+	int res;
 	u8 tid, keyidx;
 	u8 pn[IEEE80211_CCMP_PN_LEN];
 	u8 *extiv;
@@ -128,12 +129,13 @@
 	pn[4] = extiv[1];
 	pn[5] = extiv[0];
 
-	if (memcmp(pn, ptk_pn->q[queue].pn[tid],
-		   IEEE80211_CCMP_PN_LEN) <= 0)
+	res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
+	if (res < 0)
+		return -1;
+	if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
 		return -1;
 
-	if (!(stats->flag & RX_FLAG_AMSDU_MORE))
-		memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+	memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
 	stats->flag |= RX_FLAG_PN_VALIDATED;
 
 	return 0;
@@ -295,28 +297,21 @@
 }
 
 /*
- * returns true if a packet outside BA session is a duplicate and
- * should be dropped
+ * returns true if a packet is a duplicate and should be dropped.
+ * Updates AMSDU PN tracking info
  */
-static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
-				  struct ieee80211_rx_status *rx_status,
-				  struct ieee80211_hdr *hdr,
-				  struct iwl_rx_mpdu_desc *desc)
+static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
+			   struct ieee80211_rx_status *rx_status,
+			   struct ieee80211_hdr *hdr,
+			   struct iwl_rx_mpdu_desc *desc)
 {
 	struct iwl_mvm_sta *mvm_sta;
 	struct iwl_mvm_rxq_dup_data *dup_data;
-	u8 baid, tid, sub_frame_idx;
+	u8 tid, sub_frame_idx;
 
 	if (WARN_ON(IS_ERR_OR_NULL(sta)))
 		return false;
 
-	baid = (le32_to_cpu(desc->reorder_data) &
-		IWL_RX_MPDU_REORDER_BAID_MASK) >>
-		IWL_RX_MPDU_REORDER_BAID_SHIFT;
-
-	if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
-		return false;
-
 	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 	dup_data = &mvm_sta->dup_data[queue];
 
@@ -346,6 +341,12 @@
 		     dup_data->last_sub_frame[tid] >= sub_frame_idx))
 		return true;
 
+	/* Allow same PN as the first subframe for following sub frames */
+	if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
+	    sub_frame_idx > dup_data->last_sub_frame[tid] &&
+	    desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
+		rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
+
 	dup_data->last_seq[tid] = hdr->seq_ctrl;
 	dup_data->last_sub_frame[tid] = sub_frame_idx;
 
@@ -882,7 +883,7 @@
 		if (ieee80211_is_data(hdr->frame_control))
 			iwl_mvm_rx_csum(sta, skb, desc);
 
-		if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
+		if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
 			kfree_skb(skb);
 			rcu_read_unlock();
 			return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index bec7d9c..c520356 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -790,11 +790,13 @@
 	struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
 	int ret;
 
-	if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
-		return -EIO;
-
 	mutex_lock(&mvm->mutex);
 
+	if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+		ret = -EIO;
+		goto unlock;
+	}
+
 	if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
 		ret = -EINVAL;
 		goto unlock;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 7465d4d..bd7ff56 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -406,11 +406,11 @@
 {
 	struct ieee80211_key_conf *keyconf = info->control.hw_key;
 	u8 *crypto_hdr = skb_frag->data + hdrlen;
+	enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
 	u64 pn;
 
 	switch (keyconf->cipher) {
 	case WLAN_CIPHER_SUITE_CCMP:
-	case WLAN_CIPHER_SUITE_CCMP_256:
 		iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
 		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
 		break;
@@ -434,13 +434,16 @@
 		break;
 	case WLAN_CIPHER_SUITE_GCMP:
 	case WLAN_CIPHER_SUITE_GCMP_256:
+		type = TX_CMD_SEC_GCMP;
+		/* Fall through */
+	case WLAN_CIPHER_SUITE_CCMP_256:
 		/* TODO: Taking the key from the table might introduce a race
 		 * when PTK rekeying is done, having an old packets with a PN
 		 * based on the old key but the message encrypted with a new
 		 * one.
 		 * Need to handle this.
 		 */
-		tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE;
+		tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
 		tx_cmd->key[0] = keyconf->hw_key_idx;
 		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
 		break;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 10ef44e..fe32de2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2824,7 +2824,8 @@
 #ifdef CONFIG_PM_SLEEP
 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
 {
-	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+	    (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
 		return iwl_pci_fw_enter_d0i3(trans);
 
 	return 0;
@@ -2832,7 +2833,8 @@
 
 static void iwl_trans_pcie_resume(struct iwl_trans *trans)
 {
-	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+	    (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
 		iwl_pci_fw_exit_d0i3(trans);
 }
 #endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4182c37..95e9641 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3084,8 +3084,10 @@
 	if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
 		u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
 
-		if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
+		if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
+			kfree(hwname);
 			return -EINVAL;
+		}
 		param.regd = hwsim_world_regdom_custom[idx];
 	}
 
@@ -3346,8 +3348,11 @@
 			continue;
 
 		list_del(&data->list);
-		INIT_WORK(&data->destroy_work, destroy_radio);
-		schedule_work(&data->destroy_work);
+		spin_unlock_bh(&hwsim_radio_lock);
+		mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
+					 NULL);
+		spin_lock_bh(&hwsim_radio_lock);
+
 	}
 	spin_unlock_bh(&hwsim_radio_lock);
 }
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 13da95a..987c7c4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -142,15 +142,25 @@
 	if (!rt2x00dev->ops->hw->set_rts_threshold &&
 	    (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
 						IEEE80211_TX_RC_USE_CTS_PROTECT))) {
-		if (rt2x00queue_available(queue) <= 1)
-			goto exit_fail;
+		if (rt2x00queue_available(queue) <= 1) {
+			/*
+			 * Recheck for full queue under lock to avoid race
+			 * conditions with rt2x00lib_txdone().
+			 */
+			spin_lock(&queue->tx_lock);
+			if (rt2x00queue_threshold(queue))
+				rt2x00queue_pause_queue(queue);
+			spin_unlock(&queue->tx_lock);
+
+			goto exit_free_skb;
+		}
 
 		if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
-			goto exit_fail;
+			goto exit_free_skb;
 	}
 
 	if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
-		goto exit_fail;
+		goto exit_free_skb;
 
 	/*
 	 * Pausing queue has to be serialized with rt2x00lib_txdone(). Note
@@ -164,10 +174,6 @@
 
 	return;
 
- exit_fail:
-	spin_lock(&queue->tx_lock);
-	rt2x00queue_pause_queue(queue);
-	spin_unlock(&queue->tx_lock);
  exit_free_skb:
 	ieee80211_free_txskb(hw, skb);
 }
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0881ba8..c78abfc 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -247,7 +247,10 @@
 	0x04, 0x08,		/* Noise gain, limit offset */
 	0x28, 0x28,		/* det rssi, med busy offsets */
 	7,			/* det sync thresh */
-	0, 2, 2			/* test mode, min, max */
+	0, 2, 2,		/* test mode, min, max */
+	0,			/* rx/tx delay */
+	0, 0, 0, 0, 0, 0,	/* current BSS id */
+	0			/* hop set */
 };
 
 /*===========================================================================*/
@@ -598,7 +601,7 @@
 	 *    a_beacon_period = hops    a_beacon_period = KuS
 	 *//* 64ms = 010000 */
 	if (local->fw_ver == 0x55) {
-		memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms,
+		memcpy(&local->sparm.b4, b4_default_startup_parms,
 		       sizeof(struct b4_startup_params));
 		/* Translate sane kus input values to old build 4/5 format */
 		/* i = hop time in uS truncated to 3 bytes */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 231f84d..6113624 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1454,6 +1454,7 @@
 		goto err_free_dev;
 	}
 	mutex_init(&priv->io_mutex);
+	mutex_init(&priv->conf_mutex);
 
 	SET_IEEE80211_DEV(dev, &intf->dev);
 	usb_set_intfdata(intf, dev);
@@ -1627,7 +1628,6 @@
 		printk(KERN_ERR "rtl8187: Cannot register device\n");
 		goto err_free_dmabuf;
 	}
-	mutex_init(&priv->conf_mutex);
 	skb_queue_head_init(&priv->b_tx_status.queue);
 
 	wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 1c539c8..5e41bf0 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1200,8 +1200,7 @@
 		WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
 
 		enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
-		wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+		ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
 		if (ret < 0)
 			goto out_sleep;
 	}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b09c81e..520050e 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -350,6 +350,9 @@
 	unsigned int i = 0;
 	struct netfront_queue *queue = NULL;
 
+	if (!np->queues)
+		return -ENODEV;
+
 	for (i = 0; i < num_queues; ++i) {
 		queue = &np->queues[i];
 		napi_enable(&queue->napi);
@@ -1377,18 +1380,8 @@
 #ifdef CONFIG_SYSFS
 	info->netdev->sysfs_groups[0] = &xennet_dev_group;
 #endif
-	err = register_netdev(info->netdev);
-	if (err) {
-		pr_warn("%s: register_netdev err=%d\n", __func__, err);
-		goto fail;
-	}
 
 	return 0;
-
- fail:
-	xennet_free_netdev(netdev);
-	dev_set_drvdata(&dev->dev, NULL);
-	return err;
 }
 
 static void xennet_end_access(int ref, void *page)
@@ -1757,8 +1750,6 @@
 {
 	unsigned int i;
 
-	rtnl_lock();
-
 	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
 		struct netfront_queue *queue = &info->queues[i];
 
@@ -1767,8 +1758,6 @@
 		netif_napi_del(&queue->napi);
 	}
 
-	rtnl_unlock();
-
 	kfree(info->queues);
 	info->queues = NULL;
 }
@@ -1784,8 +1773,6 @@
 	if (!info->queues)
 		return -ENOMEM;
 
-	rtnl_lock();
-
 	for (i = 0; i < *num_queues; i++) {
 		struct netfront_queue *queue = &info->queues[i];
 
@@ -1794,7 +1781,7 @@
 
 		ret = xennet_init_queue(queue);
 		if (ret < 0) {
-			dev_warn(&info->netdev->dev,
+			dev_warn(&info->xbdev->dev,
 				 "only created %d queues\n", i);
 			*num_queues = i;
 			break;
@@ -1808,10 +1795,8 @@
 
 	netif_set_real_num_tx_queues(info->netdev, *num_queues);
 
-	rtnl_unlock();
-
 	if (*num_queues == 0) {
-		dev_err(&info->netdev->dev, "no queues\n");
+		dev_err(&info->xbdev->dev, "no queues\n");
 		return -EINVAL;
 	}
 	return 0;
@@ -1853,6 +1838,7 @@
 		goto out;
 	}
 
+	rtnl_lock();
 	if (info->queues)
 		xennet_destroy_queues(info);
 
@@ -1863,6 +1849,7 @@
 		info->queues = NULL;
 		goto out;
 	}
+	rtnl_unlock();
 
 	/* Create shared ring, alloc event channel -- for each queue */
 	for (i = 0; i < num_queues; ++i) {
@@ -1959,8 +1946,10 @@
 	xenbus_transaction_end(xbt, 1);
  destroy_ring:
 	xennet_disconnect_backend(info);
+	rtnl_lock();
 	xennet_destroy_queues(info);
  out:
+	rtnl_unlock();
 	device_unregister(&dev->dev);
 	return err;
 }
@@ -1996,6 +1985,15 @@
 	netdev_update_features(dev);
 	rtnl_unlock();
 
+	if (dev->reg_state == NETREG_UNINITIALIZED) {
+		err = register_netdev(dev);
+		if (err) {
+			pr_warn("%s: register_netdev err=%d\n", __func__, err);
+			device_unregister(&np->xbdev->dev);
+			return err;
+		}
+	}
+
 	/*
 	 * All public and private state should now be sane.  Get
 	 * ready to start sending and receiving packets and give the driver
@@ -2038,7 +2036,10 @@
 	case XenbusStateInitialised:
 	case XenbusStateReconfiguring:
 	case XenbusStateReconfigured:
+		break;
+
 	case XenbusStateUnknown:
+		wake_up_all(&module_unload_q);
 		break;
 
 	case XenbusStateInitWait:
@@ -2169,7 +2170,9 @@
 		xenbus_switch_state(dev, XenbusStateClosing);
 		wait_event(module_unload_q,
 			   xenbus_read_driver_state(dev->otherend) ==
-			   XenbusStateClosing);
+			   XenbusStateClosing ||
+			   xenbus_read_driver_state(dev->otherend) ==
+			   XenbusStateUnknown);
 
 		xenbus_switch_state(dev, XenbusStateClosed);
 		wait_event(module_unload_q,
@@ -2181,10 +2184,14 @@
 
 	xennet_disconnect_backend(info);
 
-	unregister_netdev(info->netdev);
+	if (info->netdev->reg_state == NETREG_REGISTERED)
+		unregister_netdev(info->netdev);
 
-	if (info->queues)
+	if (info->queues) {
+		rtnl_lock();
 		xennet_destroy_queues(info);
+		rtnl_unlock();
+	}
 	xennet_free_netdev(info->netdev);
 
 	return 0;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 24222a5..da95bd8 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -996,6 +996,9 @@
 	mw_base = nt->mw_vec[mw_num].phys_addr;
 	mw_size = nt->mw_vec[mw_num].phys_size;
 
+	if (max_mw_size && mw_size > max_mw_size)
+		mw_size = max_mw_size;
+
 	tx_size = (unsigned int)mw_size / num_qps_mw;
 	qp_offset = tx_size * (qp_num / mw_count);
 
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index b8fb1ef..74257ac 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1747,7 +1747,7 @@
 	}
 
 	if (i < nd_region->ndr_mappings) {
-		struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
+		struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
 
 		/*
 		 * Give up if we don't find an instance of a uuid at each
@@ -1755,7 +1755,7 @@
 		 * find a dimm with two instances of the same uuid.
 		 */
 		dev_err(&nd_region->dev, "%s missing label for %pUb\n",
-				dev_name(ndd->dev), nd_label->uuid);
+				nvdimm_name(nvdimm), nd_label->uuid);
 		rc = -EINVAL;
 		goto err;
 	}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ad9d82e..c823e93 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2040,6 +2040,10 @@
 	struct nvme_ns *ns;
 
 	mutex_lock(&ctrl->namespaces_mutex);
+
+	/* Forcibly start all queues to avoid having stuck requests */
+	blk_mq_start_hw_queues(ctrl->admin_q);
+
 	list_for_each_entry(ns, &ctrl->namespaces, list) {
 		/*
 		 * Revalidating a dead namespace sets capacity to 0. This will
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index eef1a68..b634b89 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -583,8 +583,10 @@
 			opts->discovery_nqn =
 				!(strcmp(opts->subsysnqn,
 					 NVME_DISC_SUBSYS_NAME));
-			if (opts->discovery_nqn)
+			if (opts->discovery_nqn) {
+				opts->kato = 0;
 				opts->nr_io_queues = 0;
+			}
 			break;
 		case NVMF_OPT_TRADDR:
 			p = match_strdup(args);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e48ecb9..642ee00 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1120,7 +1120,7 @@
 	nvmeq->cq_vector = qid - 1;
 	result = adapter_alloc_cq(dev, qid, nvmeq);
 	if (result < 0)
-		return result;
+		goto release_vector;
 
 	result = adapter_alloc_sq(dev, qid, nvmeq);
 	if (result < 0)
@@ -1134,9 +1134,12 @@
 	return result;
 
  release_sq:
+	dev->online_queues--;
 	adapter_delete_sq(dev, qid);
  release_cq:
 	adapter_delete_cq(dev, qid);
+ release_vector:
+	nvmeq->cq_vector = -1;
 	return result;
 }
 
@@ -1263,7 +1266,7 @@
 	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
 
 	/* If there is a reset ongoing, we shouldn't reset again. */
-	if (work_busy(&dev->reset_work))
+	if (dev->ctrl.state == NVME_CTRL_RESETTING)
 		return false;
 
 	/* We shouldn't reset unless the controller is on fatal error state
@@ -1755,7 +1758,7 @@
 	struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
 	int result = -ENODEV;
 
-	if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
+	if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
 		goto out;
 
 	/*
@@ -1765,9 +1768,6 @@
 	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
 		nvme_dev_disable(dev, false);
 
-	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
-		goto out;
-
 	result = nvme_pci_enable(dev);
 	if (result)
 		goto out;
@@ -1841,8 +1841,8 @@
 {
 	if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
 		return -ENODEV;
-	if (work_busy(&dev->reset_work))
-		return -ENODEV;
+	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+		return -EBUSY;
 	if (!queue_work(nvme_workq, &dev->reset_work))
 		return -EBUSY;
 	return 0;
@@ -1944,6 +1944,7 @@
 	if (result)
 		goto release_pools;
 
+	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
 	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
 	queue_work(nvme_workq, &dev->reset_work);
@@ -1987,6 +1988,7 @@
 
 	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
 
+	cancel_work_sync(&dev->reset_work);
 	pci_set_drvdata(pdev, NULL);
 
 	if (!pci_device_is_present(pdev)) {
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index c89d68a..3a04492 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -491,9 +491,12 @@
 		goto fail;
 	}
 
-	/* either variant of SGLs is fine, as we don't support metadata */
-	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
-		     (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
+	/*
+	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
+	 * contains an address of a single contiguous physical buffer that is
+	 * byte aligned.
+	 */
+	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 		goto fail;
 	}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 52a297d..0a963b1 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -46,10 +46,20 @@
 config OF_PROMTREE
 	bool
 
+config OF_KOBJ
+	bool "Display devicetree in sysfs"
+	def_bool SYSFS
+	help
+	  Some embedded platforms have no need to display the devicetree
+	  nodes and properties in sysfs. Disabling this option will save
+	  a small amount of memory, as well as decrease boot time. By
+	  default this option will be enabled if SYSFS is enabled.
+
 # Hardly any platforms need this.  It is safe to select, but only do so if you
 # need it.
 config OF_DYNAMIC
 	bool "Support for dynamic device trees" if OF_UNITTEST
+	select OF_KOBJ
 	help
 	  On some platforms, the device tree can be manipulated at runtime.
 	  While this option is selected automatically on such platforms, you
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index b2f474a..760b730 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,4 +1,5 @@
 obj-y = base.o device.o platform.o
+obj-$(CONFIG_OF_KOBJ) += kobj.o
 obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
 obj-$(CONFIG_OF_FLATTREE) += fdt.o
 obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 23a6d36..9a6fad6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -98,108 +98,6 @@
 }
 #endif
 
-#ifndef CONFIG_OF_DYNAMIC
-static void of_node_release(struct kobject *kobj)
-{
-	/* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
-}
-#endif /* CONFIG_OF_DYNAMIC */
-
-struct kobj_type of_node_ktype = {
-	.release = of_node_release,
-};
-
-static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
-				struct bin_attribute *bin_attr, char *buf,
-				loff_t offset, size_t count)
-{
-	struct property *pp = container_of(bin_attr, struct property, attr);
-	return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
-}
-
-/* always return newly allocated name, caller must free after use */
-static const char *safe_name(struct kobject *kobj, const char *orig_name)
-{
-	const char *name = orig_name;
-	struct kernfs_node *kn;
-	int i = 0;
-
-	/* don't be a hero. After 16 tries give up */
-	while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) {
-		sysfs_put(kn);
-		if (name != orig_name)
-			kfree(name);
-		name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
-	}
-
-	if (name == orig_name) {
-		name = kstrdup(orig_name, GFP_KERNEL);
-	} else {
-		pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
-			kobject_name(kobj), name);
-	}
-	return name;
-}
-
-int __of_add_property_sysfs(struct device_node *np, struct property *pp)
-{
-	int rc;
-
-	/* Important: Don't leak passwords */
-	bool secure = strncmp(pp->name, "security-", 9) == 0;
-
-	if (!IS_ENABLED(CONFIG_SYSFS))
-		return 0;
-
-	if (!of_kset || !of_node_is_attached(np))
-		return 0;
-
-	sysfs_bin_attr_init(&pp->attr);
-	pp->attr.attr.name = safe_name(&np->kobj, pp->name);
-	pp->attr.attr.mode = secure ? S_IRUSR : S_IRUGO;
-	pp->attr.size = secure ? 0 : pp->length;
-	pp->attr.read = of_node_property_read;
-
-	rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
-	WARN(rc, "error adding attribute %s to node %s\n", pp->name, np->full_name);
-	return rc;
-}
-
-int __of_attach_node_sysfs(struct device_node *np)
-{
-	const char *name;
-	struct kobject *parent;
-	struct property *pp;
-	int rc;
-
-	if (!IS_ENABLED(CONFIG_SYSFS))
-		return 0;
-
-	if (!of_kset)
-		return 0;
-
-	np->kobj.kset = of_kset;
-	if (!np->parent) {
-		/* Nodes without parents are new top level trees */
-		name = safe_name(&of_kset->kobj, "base");
-		parent = NULL;
-	} else {
-		name = safe_name(&np->parent->kobj, kbasename(np->full_name));
-		parent = &np->parent->kobj;
-	}
-	if (!name)
-		return -ENOMEM;
-	rc = kobject_add(&np->kobj, parent, "%s", name);
-	kfree(name);
-	if (rc)
-		return rc;
-
-	for_each_property_of_node(np, pp)
-		__of_add_property_sysfs(np, pp);
-
-	return 0;
-}
-
 static struct device_node **phandle_cache;
 static u32 phandle_cache_mask;
 
@@ -2021,22 +1919,6 @@
 	return 0;
 }
 
-void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
-{
-	sysfs_remove_bin_file(&np->kobj, &prop->attr);
-	kfree(prop->attr.attr.name);
-}
-
-void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
-{
-	if (!IS_ENABLED(CONFIG_SYSFS))
-		return;
-
-	/* at early boot, bail here and defer setup to of_init() */
-	if (of_kset && of_node_is_attached(np))
-		__of_sysfs_remove_bin_file(np, prop);
-}
-
 /**
  * of_remove_property - Remove a property from a node.
  *
@@ -2096,21 +1978,6 @@
 	return 0;
 }
 
-void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
-		struct property *oldprop)
-{
-	if (!IS_ENABLED(CONFIG_SYSFS))
-		return;
-
-	/* At early boot, bail out and defer setup to of_init() */
-	if (!of_kset)
-		return;
-
-	if (oldprop)
-		__of_sysfs_remove_bin_file(np, oldprop);
-	__of_add_property_sysfs(np, newprop);
-}
-
 /*
  * of_update_property - Update a property in a node, if the property does
  * not exist, add it.
@@ -2218,7 +2085,7 @@
 			continue;
 
 		/* Allocate an alias_prop with enough space for the stem */
-		ap = dt_alloc(sizeof(*ap) + len + 1, 4);
+		ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
 		if (!ap)
 			continue;
 		memset(ap, 0, sizeof(*ap) + len + 1);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 888fdbc..765ba6e 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -16,6 +16,11 @@
 
 #include "of_private.h"
 
+static struct device_node *kobj_to_device_node(struct kobject *kobj)
+{
+	return container_of(kobj, struct device_node, kobj);
+}
+
 /**
  * of_node_get() - Increment refcount of a node
  * @node:	Node to inc refcount, NULL is supported to simplify writing of
@@ -43,28 +48,6 @@
 }
 EXPORT_SYMBOL(of_node_put);
 
-void __of_detach_node_sysfs(struct device_node *np)
-{
-	struct property *pp;
-
-	if (!IS_ENABLED(CONFIG_SYSFS))
-		return;
-
-	BUG_ON(!of_node_is_initialized(np));
-	if (!of_kset)
-		return;
-
-	/* only remove properties if on sysfs */
-	if (of_node_is_attached(np)) {
-		for_each_property_of_node(np, pp)
-			__of_sysfs_remove_bin_file(np, pp);
-		kobject_del(&np->kobj);
-	}
-
-	/* finally remove the kobj_init ref */
-	of_node_put(np);
-}
-
 static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain);
 
 int of_reconfig_notifier_register(struct notifier_block *nb)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 755b386..744f625 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -937,7 +937,7 @@
 	int offset;
 	const char *p, *q, *options = NULL;
 	int l;
-	const struct earlycon_id *match;
+	const struct earlycon_id **p_match;
 	const void *fdt = initial_boot_params;
 
 	offset = fdt_path_offset(fdt, "/chosen");
@@ -964,7 +964,10 @@
 		return 0;
 	}
 
-	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+	for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+	     p_match++) {
+		const struct earlycon_id *match = *p_match;
+
 		if (!match->compatible[0])
 			continue;
 
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
new file mode 100644
index 0000000..662f79e
--- /dev/null
+++ b/drivers/of/kobj.c
@@ -0,0 +1,165 @@
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "of_private.h"
+
+/* true when node is initialized */
+static int of_node_is_initialized(struct device_node *node)
+{
+	return node && node->kobj.state_initialized;
+}
+
+/* true when node is attached (i.e. present on sysfs) */
+int of_node_is_attached(struct device_node *node)
+{
+	return node && node->kobj.state_in_sysfs;
+}
+
+
+#ifndef CONFIG_OF_DYNAMIC
+static void of_node_release(struct kobject *kobj)
+{
+	/* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
+struct kobj_type of_node_ktype = {
+	.release = of_node_release,
+};
+
+static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
+				struct bin_attribute *bin_attr, char *buf,
+				loff_t offset, size_t count)
+{
+	struct property *pp = container_of(bin_attr, struct property, attr);
+	return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
+}
+
+/* always return newly allocated name, caller must free after use */
+static const char *safe_name(struct kobject *kobj, const char *orig_name)
+{
+	const char *name = orig_name;
+	struct kernfs_node *kn;
+	int i = 0;
+
+	/* don't be a hero. After 16 tries give up */
+	while (i < 16 && name && (kn = sysfs_get_dirent(kobj->sd, name))) {
+		sysfs_put(kn);
+		if (name != orig_name)
+			kfree(name);
+		name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
+	}
+
+	if (name == orig_name) {
+		name = kstrdup(orig_name, GFP_KERNEL);
+	} else {
+		pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
+			kobject_name(kobj), name);
+	}
+	return name;
+}
+
+int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+	int rc;
+
+	/* Important: Don't leak passwords */
+	bool secure = strncmp(pp->name, "security-", 9) == 0;
+
+	if (!IS_ENABLED(CONFIG_SYSFS))
+		return 0;
+
+	if (!of_kset || !of_node_is_attached(np))
+		return 0;
+
+	sysfs_bin_attr_init(&pp->attr);
+	pp->attr.attr.name = safe_name(&np->kobj, pp->name);
+	pp->attr.attr.mode = secure ? 0400 : 0444;
+	pp->attr.size = secure ? 0 : pp->length;
+	pp->attr.read = of_node_property_read;
+
+	rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
+	WARN(rc, "error adding attribute %s to node %s\n", pp->name,
+		np->full_name);
+	return rc;
+}
+
+void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
+{
+	if (!IS_ENABLED(CONFIG_SYSFS))
+		return;
+
+	sysfs_remove_bin_file(&np->kobj, &prop->attr);
+	kfree(prop->attr.attr.name);
+}
+
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
+{
+	/* at early boot, bail here and defer setup to of_init() */
+	if (of_kset && of_node_is_attached(np))
+		__of_sysfs_remove_bin_file(np, prop);
+}
+
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+		struct property *oldprop)
+{
+	/* At early boot, bail out and defer setup to of_init() */
+	if (!of_kset)
+		return;
+
+	if (oldprop)
+		__of_sysfs_remove_bin_file(np, oldprop);
+	__of_add_property_sysfs(np, newprop);
+}
+
+int __of_attach_node_sysfs(struct device_node *np)
+{
+	const char *name;
+	struct kobject *parent;
+	struct property *pp;
+	int rc;
+
+	if (!of_kset)
+		return 0;
+
+	np->kobj.kset = of_kset;
+	if (!np->parent) {
+		/* Nodes without parents are new top level trees */
+		name = safe_name(&of_kset->kobj, "base");
+		parent = NULL;
+	} else {
+		name = safe_name(&np->parent->kobj, kbasename(np->full_name));
+		parent = &np->parent->kobj;
+	}
+	if (!name)
+		return -ENOMEM;
+	rc = kobject_add(&np->kobj, parent, "%s", name);
+	kfree(name);
+	if (rc)
+		return rc;
+
+	for_each_property_of_node(np, pp)
+		__of_add_property_sysfs(np, pp);
+
+	return 0;
+}
+
+void __of_detach_node_sysfs(struct device_node *np)
+{
+	struct property *pp;
+
+	BUG_ON(!of_node_is_initialized(np));
+	if (!of_kset)
+		return;
+
+	/* only remove properties if on sysfs */
+	if (of_node_is_attached(np)) {
+		for_each_property_of_node(np, pp)
+			__of_sysfs_remove_bin_file(np, pp);
+		kobject_del(&np->kobj);
+	}
+
+	/* finally remove the kobj_init ref */
+	of_node_put(np);
+}
+
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index c4d7fdc..eb811185 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -35,12 +35,6 @@
 extern struct list_head aliases_lookup;
 extern struct kset *of_kset;
 
-
-static inline struct device_node *kobj_to_device_node(struct kobject *kobj)
-{
-	return container_of(kobj, struct device_node, kobj);
-}
-
 #if defined(CONFIG_OF_DYNAMIC)
 extern int of_property_notify(int action, struct device_node *np,
 			      struct property *prop, struct property *old_prop);
@@ -55,6 +49,29 @@
 }
 #endif /* CONFIG_OF_DYNAMIC */
 
+#if defined(CONFIG_OF_KOBJ)
+int of_node_is_attached(struct device_node *node);
+int __of_add_property_sysfs(struct device_node *np, struct property *pp);
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop);
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+		struct property *oldprop);
+int __of_attach_node_sysfs(struct device_node *np);
+void __of_detach_node_sysfs(struct device_node *np);
+#else
+static inline int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+	return 0;
+}
+static inline void __of_remove_property_sysfs(struct device_node *np, struct property *prop) {}
+static inline void __of_update_property_sysfs(struct device_node *np,
+		struct property *newprop, struct property *oldprop) {}
+static inline int __of_attach_node_sysfs(struct device_node *np)
+{
+	return 0;
+}
+static inline void __of_detach_node_sysfs(struct device_node *np) {}
+#endif
+
 /**
  * General utilities for working with live trees.
  *
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 4aba42f..8d8e4c5 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -502,7 +502,7 @@
 }
 EXPORT_SYMBOL_GPL(of_platform_default_populate);
 
-#ifndef CONFIG_PPC
+#if !defined(CONFIG_PPC) && !defined(CONFIG_ARCH_MSM8953_BOOT_ORDERING)
 static int __init of_platform_default_populate_init(void)
 {
 	struct device_node *node;
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 1cced1d..7e93858 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1367,9 +1367,27 @@
 		WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
 	}
 
-	/* Set HF mode as the default (vs. -1 mode). */
+
+	/*
+	 * Hard Fail vs. Soft Fail on PCI "Master Abort".
+	 *
+	 * "Master Abort" means the MMIO transaction timed out - usually due to
+	 * the device not responding to an MMIO read. We would like HF to be
+	 * enabled to find driver problems, though it means the system will
+	 * crash with a HPMC.
+	 *
+	 * In SoftFail mode "~0L" is returned as a result of a timeout on the
+	 * pci bus. This is like how PCI busses on x86 and most other
+	 * architectures behave.  In order to increase compatibility with
+	 * existing (x86) PCI hardware and existing Linux drivers we enable
+	 * Soft Faul mode on PA-RISC now too.
+	 */
         stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
+#if defined(ENABLE_HARDFAIL)
 	WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
+#else
+	WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
+#endif
 
 	/*
 	** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 78530d1..bdce067 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2646,6 +2646,7 @@
 	netmos_9901,
 	netmos_9865,
 	quatech_sppxp100,
+	wch_ch382l,
 };
 
 
@@ -2708,6 +2709,7 @@
 	/* netmos_9901 */               { 1, { { 0, -1 }, } },
 	/* netmos_9865 */               { 1, { { 0, -1 }, } },
 	/* quatech_sppxp100 */		{ 1, { { 0, 1 }, } },
+	/* wch_ch382l */		{ 1, { { 2, -1 }, } },
 };
 
 static const struct pci_device_id parport_pc_pci_tbl[] = {
@@ -2797,6 +2799,8 @@
 	/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
 	{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
+	/* WCH CH382L PCI-E single parallel port card */
+	{ 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
 	{ 0, } /* terminate list */
 };
 MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 4fce494..11bad82 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -32,6 +32,7 @@
 #define     PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT	5
 #define     PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE		(0 << 11)
 #define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT	12
+#define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ		0x2
 #define PCIE_CORE_LINK_CTRL_STAT_REG				0xd0
 #define     PCIE_CORE_LINK_L0S_ENTRY				BIT(0)
 #define     PCIE_CORE_LINK_TRAINING				BIT(5)
@@ -175,8 +176,6 @@
 #define PCIE_CONFIG_WR_TYPE0			0xa
 #define PCIE_CONFIG_WR_TYPE1			0xb
 
-/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
-#define PCIE_BDF(dev)				(dev << 4)
 #define PCIE_CONF_BUS(bus)			(((bus) & 0xff) << 20)
 #define PCIE_CONF_DEV(dev)			(((dev) & 0x1f) << 15)
 #define PCIE_CONF_FUNC(fun)			(((fun) & 0x7)	<< 12)
@@ -298,7 +297,8 @@
 	reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
 		(7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
 		PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
-		PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
+		(PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
+		 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
 	advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
 
 	/* Program PCIe Control 2 to disable strict ordering */
@@ -439,7 +439,7 @@
 	u32 reg;
 	int ret;
 
-	if (PCI_SLOT(devfn) != 0) {
+	if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
 		*val = 0xffffffff;
 		return PCIBIOS_DEVICE_NOT_FOUND;
 	}
@@ -458,7 +458,7 @@
 	advk_writel(pcie, reg, PIO_CTRL);
 
 	/* Program the address registers */
-	reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
+	reg = PCIE_CONF_ADDR(bus->number, devfn, where);
 	advk_writel(pcie, reg, PIO_ADDR_LS);
 	advk_writel(pcie, 0, PIO_ADDR_MS);
 
@@ -493,7 +493,7 @@
 	int offset;
 	int ret;
 
-	if (PCI_SLOT(devfn) != 0)
+	if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
 	if (where % size)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a46b585..d44b558 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -587,6 +587,7 @@
 {
 	unsigned long long sta = 0;
 	struct acpiphp_func *func;
+	u32 dvid;
 
 	list_for_each_entry(func, &slot->funcs, sibling) {
 		if (func->flags & FUNC_HAS_STA) {
@@ -597,19 +598,27 @@
 			if (ACPI_SUCCESS(status) && sta)
 				break;
 		} else {
-			u32 dvid;
-
-			pci_bus_read_config_dword(slot->bus,
-						  PCI_DEVFN(slot->device,
-							    func->function),
-						  PCI_VENDOR_ID, &dvid);
-			if (dvid != 0xffffffff) {
+			if (pci_bus_read_dev_vendor_id(slot->bus,
+					PCI_DEVFN(slot->device, func->function),
+					&dvid, 0)) {
 				sta = ACPI_STA_ALL;
 				break;
 			}
 		}
 	}
 
+	if (!sta) {
+		/*
+		 * Check for the slot itself since it may be that the
+		 * ACPI slot is a device below PCIe upstream port so in
+		 * that case it may not even be reachable yet.
+		 */
+		if (pci_bus_read_dev_vendor_id(slot->bus,
+				PCI_DEVFN(slot->device, 0), &dvid, 0)) {
+			sta = ACPI_STA_ALL;
+		}
+	}
+
 	return (unsigned int)sta;
 }
 
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d81ad84..f11c382 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1147,11 +1147,14 @@
 	int error;
 
 	/*
-	 * If pci_dev->driver is not set (unbound), the device should
-	 * always remain in D0 regardless of the runtime PM status
+	 * If pci_dev->driver is not set (unbound), we leave the device in D0,
+	 * but it may go to D3cold when the bridge above it runtime suspends.
+	 * Save its config space in case that happens.
 	 */
-	if (!pci_dev->driver)
+	if (!pci_dev->driver) {
+		pci_save_state(pci_dev);
 		return 0;
+	}
 
 	if (!pm || !pm->runtime_suspend)
 		return -ENOSYS;
@@ -1199,16 +1202,18 @@
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
 	/*
-	 * If pci_dev->driver is not set (unbound), the device should
-	 * always remain in D0 regardless of the runtime PM status
+	 * Restoring config space is necessary even if the device is not bound
+	 * to a driver because although we left it in D0, it may have gone to
+	 * D3cold when the bridge above it runtime suspended.
 	 */
+	pci_restore_standard_config(pci_dev);
+
 	if (!pci_dev->driver)
 		return 0;
 
 	if (!pm || !pm->runtime_resume)
 		return -ENOSYS;
 
-	pci_restore_standard_config(pci_dev);
 	pci_fixup_device(pci_fixup_resume_early, pci_dev);
 	__pci_enable_wake(pci_dev, PCI_D0, true, false);
 	pci_fixup_device(pci_fixup_resume, pci_dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a87c8e1..9c13aee 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3756,27 +3756,49 @@
 }
 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
 
-/*
- * We should only need to wait 100ms after FLR, but some devices take longer.
- * Wait for up to 1000ms for config space to return something other than -1.
- * Intel IGD requires this when an LCD panel is attached.  We read the 2nd
- * dword because VFs don't implement the 1st dword.
- */
 static void pci_flr_wait(struct pci_dev *dev)
 {
-	int i = 0;
+	int delay = 1, timeout = 60000;
 	u32 id;
 
-	do {
-		msleep(100);
-		pci_read_config_dword(dev, PCI_COMMAND, &id);
-	} while (i++ < 10 && id == ~0);
+	/*
+	 * Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within
+	 * 100ms, but may silently discard requests while the FLR is in
+	 * progress.  Wait 100ms before trying to access the device.
+	 */
+	msleep(100);
 
-	if (id == ~0)
-		dev_warn(&dev->dev, "Failed to return from FLR\n");
-	else if (i > 1)
-		dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
-			 (i - 1) * 100);
+	/*
+	 * After 100ms, the device should not silently discard config
+	 * requests, but it may still indicate that it needs more time by
+	 * responding to them with CRS completions.  The Root Port will
+	 * generally synthesize ~0 data to complete the read (except when
+	 * CRS SV is enabled and the read was for the Vendor ID; in that
+	 * case it synthesizes 0x0001 data).
+	 *
+	 * Wait for the device to return a non-CRS completion.  Read the
+	 * Command register instead of Vendor ID so we don't have to
+	 * contend with the CRS SV value.
+	 */
+	pci_read_config_dword(dev, PCI_COMMAND, &id);
+	while (id == ~0) {
+		if (delay > timeout) {
+			dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n",
+				 100 + delay - 1);
+			return;
+		}
+
+		if (delay > 1000)
+			dev_info(&dev->dev, "not ready %dms after FLR; waiting\n",
+				 100 + delay - 1);
+
+		msleep(delay);
+		delay *= 2;
+		pci_read_config_dword(dev, PCI_COMMAND, &id);
+	}
+
+	if (delay > 1000)
+		dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1);
 }
 
 static int pcie_flr(struct pci_dev *dev, int probe)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index a98be6d..56340ab 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -231,7 +231,7 @@
 			res->flags |= IORESOURCE_ROM_ENABLE;
 		l64 = l & PCI_ROM_ADDRESS_MASK;
 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
-		mask64 = (u32)PCI_ROM_ADDRESS_MASK;
+		mask64 = PCI_ROM_ADDRESS_MASK;
 	}
 
 	if (res->flags & IORESOURCE_MEM_64) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fb177dc..b55f9179 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3857,6 +3857,8 @@
 			 quirk_dma_func1_alias);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
 			 quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
+			 quirk_dma_func1_alias);
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
 			 quirk_dma_func1_alias);
@@ -3872,6 +3874,9 @@
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
 			 quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
+			 quirk_dma_func1_alias);
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
 			 quirk_dma_func1_alias);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 4bc589e..85774b7 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -63,7 +63,7 @@
 		mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
 		new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
 	} else if (resno == PCI_ROM_RESOURCE) {
-		mask = (u32)PCI_ROM_ADDRESS_MASK;
+		mask = PCI_ROM_ADDRESS_MASK;
 	} else {
 		mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
 		new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index df02f98..37df1bf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -1128,6 +1128,7 @@
 	armpmu_init(pmu);
 
 	pmu->plat_device = pdev;
+	platform_set_drvdata(pdev, pmu);
 
 	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
 		init_fn = of_id->data;
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 0a96502..fc5b18d 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -46,6 +46,9 @@
 #define BYT_TRIG_POS		BIT(25)
 #define BYT_TRIG_LVL		BIT(24)
 #define BYT_DEBOUNCE_EN		BIT(20)
+#define BYT_GLITCH_FILTER_EN	BIT(19)
+#define BYT_GLITCH_F_SLOW_CLK	BIT(17)
+#define BYT_GLITCH_F_FAST_CLK	BIT(16)
 #define BYT_PULL_STR_SHIFT	9
 #define BYT_PULL_STR_MASK	(3 << BYT_PULL_STR_SHIFT)
 #define BYT_PULL_STR_2K		(0 << BYT_PULL_STR_SHIFT)
@@ -1579,6 +1582,9 @@
 	 */
 	value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
 		   BYT_TRIG_LVL);
+	/* Enable glitch filtering */
+	value |= BYT_GLITCH_FILTER_EN | BYT_GLITCH_F_SLOW_CLK |
+		 BYT_GLITCH_F_FAST_CLK;
 
 	writel(value, reg);
 
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index df63b7d..b40a074 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -368,18 +368,6 @@
 	writel(value, padcfg0);
 }
 
-static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
-{
-	u32 value;
-
-	/* Put the pad into GPIO mode */
-	value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
-	/* Disable SCI/SMI/NMI generation */
-	value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
-	value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
-	writel(value, padcfg0);
-}
-
 static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
 				     struct pinctrl_gpio_range *range,
 				     unsigned pin)
@@ -387,6 +375,7 @@
 	struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
 	void __iomem *padcfg0;
 	unsigned long flags;
+	u32 value;
 
 	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
@@ -396,7 +385,13 @@
 	}
 
 	padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-	intel_gpio_set_gpio_mode(padcfg0);
+	/* Put the pad into GPIO mode */
+	value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
+	/* Disable SCI/SMI/NMI generation */
+	value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
+	value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
+	writel(value, padcfg0);
+
 	/* Disable TX buffer and enable RX (this will be input) */
 	__intel_gpio_set_direction(padcfg0, true);
 
@@ -775,8 +770,6 @@
 
 	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
-	intel_gpio_set_gpio_mode(reg);
-
 	value = readl(reg);
 
 	value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 7511723..257c1c2 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -138,7 +138,6 @@
 	MESON_PIN(GPIOX_19, EE_OFF),
 	MESON_PIN(GPIOX_20, EE_OFF),
 	MESON_PIN(GPIOX_21, EE_OFF),
-	MESON_PIN(GPIOX_22, EE_OFF),
 
 	MESON_PIN(GPIOCLK_0, EE_OFF),
 	MESON_PIN(GPIOCLK_1, EE_OFF),
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 0991a99..bdce49b 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014, 2016-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -529,7 +529,7 @@
 			pad->pullup = arg;
 			break;
 		case PMIC_GPIO_CONF_STRENGTH:
-			if (arg > PMIC_GPIO_STRENGTH_LOW)
+			if (arg > PMIC_GPIO_STRENGTH_HIGH)
 				return -EINVAL;
 			pad->strength = arg;
 			break;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index d3f5501d..a562ed7 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2015, Sony Mobile Communications AB.
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -365,7 +365,7 @@
 			banks |= BIT(0);
 			break;
 		case PM8XXX_QCOM_DRIVE_STRENGH:
-			if (arg > PMIC_GPIO_STRENGTH_LOW) {
+			if (arg > PM8921_GPIO_STRENGTH_LOW) {
 				dev_err(pctrl->dev, "invalid drive strength\n");
 				return -EINVAL;
 			}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index dc9b671..2971888 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -1,7 +1,7 @@
 /*
  * R8A7796 processor support - PFC hardware block.
  *
- * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2016-2017 Renesas Electronics Corp.
  *
  * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
  *
@@ -476,7 +476,7 @@
 #define MOD_SEL1_26		FM(SEL_TIMER_TMU_0)	FM(SEL_TIMER_TMU_1)
 #define MOD_SEL1_25_24		FM(SEL_SSP1_1_0)	FM(SEL_SSP1_1_1)	FM(SEL_SSP1_1_2)	FM(SEL_SSP1_1_3)
 #define MOD_SEL1_23_22_21	FM(SEL_SSP1_0_0)	FM(SEL_SSP1_0_1)	FM(SEL_SSP1_0_2)	FM(SEL_SSP1_0_3)	FM(SEL_SSP1_0_4)	F_(0, 0)		F_(0, 0)		F_(0, 0)
-#define MOD_SEL1_20		FM(SEL_SSI_0)		FM(SEL_SSI_1)
+#define MOD_SEL1_20		FM(SEL_SSI1_0)		FM(SEL_SSI1_1)
 #define MOD_SEL1_19		FM(SEL_SPEED_PULSE_0)	FM(SEL_SPEED_PULSE_1)
 #define MOD_SEL1_18_17		FM(SEL_SIMCARD_0)	FM(SEL_SIMCARD_1)	FM(SEL_SIMCARD_2)	FM(SEL_SIMCARD_3)
 #define MOD_SEL1_16		FM(SEL_SDHI2_0)		FM(SEL_SDHI2_1)
@@ -1208,7 +1208,7 @@
 	PINMUX_IPSR_GPSR(IP13_11_8,	HSCK0),
 	PINMUX_IPSR_MSEL(IP13_11_8,	MSIOF1_SCK_D,		SEL_MSIOF1_3),
 	PINMUX_IPSR_MSEL(IP13_11_8,	AUDIO_CLKB_A,		SEL_ADG_B_0),
-	PINMUX_IPSR_MSEL(IP13_11_8,	SSI_SDATA1_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP13_11_8,	SSI_SDATA1_B,		SEL_SSI1_1),
 	PINMUX_IPSR_MSEL(IP13_11_8,	TS_SCK0_D,		SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP13_11_8,	STP_ISCLK_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP13_11_8,	RIF0_CLK_C,		SEL_DRIF0_2),
@@ -1216,14 +1216,14 @@
 
 	PINMUX_IPSR_GPSR(IP13_15_12,	HRX0),
 	PINMUX_IPSR_MSEL(IP13_15_12,	MSIOF1_RXD_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_15_12,	SSI_SDATA2_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP13_15_12,	SSI_SDATA2_B,		SEL_SSI2_1),
 	PINMUX_IPSR_MSEL(IP13_15_12,	TS_SDEN0_D,		SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP13_15_12,	STP_ISEN_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP13_15_12,	RIF0_D0_C,		SEL_DRIF0_2),
 
 	PINMUX_IPSR_GPSR(IP13_19_16,	HTX0),
 	PINMUX_IPSR_MSEL(IP13_19_16,	MSIOF1_TXD_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_19_16,	SSI_SDATA9_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP13_19_16,	SSI_SDATA9_B,		SEL_SSI9_1),
 	PINMUX_IPSR_MSEL(IP13_19_16,	TS_SDAT0_D,		SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP13_19_16,	STP_ISD_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP13_19_16,	RIF0_D1_C,		SEL_DRIF0_2),
@@ -1231,7 +1231,7 @@
 	PINMUX_IPSR_GPSR(IP13_23_20,	HCTS0_N),
 	PINMUX_IPSR_MSEL(IP13_23_20,	RX2_B,			SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP13_23_20,	MSIOF1_SYNC_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_23_20,	SSI_SCK9_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP13_23_20,	SSI_SCK9_A,		SEL_SSI9_0),
 	PINMUX_IPSR_MSEL(IP13_23_20,	TS_SPSYNC0_D,		SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP13_23_20,	STP_ISSYNC_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP13_23_20,	RIF0_SYNC_C,		SEL_DRIF0_2),
@@ -1240,7 +1240,7 @@
 	PINMUX_IPSR_GPSR(IP13_27_24,	HRTS0_N),
 	PINMUX_IPSR_MSEL(IP13_27_24,	TX2_B,			SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP13_27_24,	MSIOF1_SS1_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_27_24,	SSI_WS9_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP13_27_24,	SSI_WS9_A,		SEL_SSI9_0),
 	PINMUX_IPSR_MSEL(IP13_27_24,	STP_IVCXO27_0_D,	SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP13_27_24,	BPFCLK_A,		SEL_FM_0),
 	PINMUX_IPSR_GPSR(IP13_27_24,	AUDIO_CLKOUT2_A),
@@ -1255,7 +1255,7 @@
 	PINMUX_IPSR_MSEL(IP14_3_0,	RX5_A,			SEL_SCIF5_0),
 	PINMUX_IPSR_MSEL(IP14_3_0,	NFWP_N_A,		SEL_NDF_0),
 	PINMUX_IPSR_MSEL(IP14_3_0,	AUDIO_CLKA_C,		SEL_ADG_A_2),
-	PINMUX_IPSR_MSEL(IP14_3_0,	SSI_SCK2_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP14_3_0,	SSI_SCK2_A,		SEL_SSI2_0),
 	PINMUX_IPSR_MSEL(IP14_3_0,	STP_IVCXO27_0_C,	SEL_SSP1_0_2),
 	PINMUX_IPSR_GPSR(IP14_3_0,	AUDIO_CLKOUT3_A),
 	PINMUX_IPSR_MSEL(IP14_3_0,	TCLK1_B,		SEL_TIMER_TMU_1),
@@ -1264,7 +1264,7 @@
 	PINMUX_IPSR_MSEL(IP14_7_4,	TX5_A,			SEL_SCIF5_0),
 	PINMUX_IPSR_MSEL(IP14_7_4,	MSIOF1_SS2_D,		SEL_MSIOF1_3),
 	PINMUX_IPSR_MSEL(IP14_7_4,	AUDIO_CLKC_A,		SEL_ADG_C_0),
-	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_WS2_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_WS2_A,		SEL_SSI2_0),
 	PINMUX_IPSR_MSEL(IP14_7_4,	STP_OPWM_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_GPSR(IP14_7_4,	AUDIO_CLKOUT_D),
 	PINMUX_IPSR_MSEL(IP14_7_4,	SPEEDIN_B,		SEL_SPEED_PULSE_1),
@@ -1292,10 +1292,10 @@
 	PINMUX_IPSR_MSEL(IP14_31_28,	MSIOF1_SS2_F,		SEL_MSIOF1_5),
 
 	/* IPSR15 */
-	PINMUX_IPSR_MSEL(IP15_3_0,	SSI_SDATA1_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP15_3_0,	SSI_SDATA1_A,		SEL_SSI1_0),
 
-	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SDATA2_A,		SEL_SSI_0),
-	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SCK1_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SDATA2_A,		SEL_SSI2_0),
+	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SCK1_B,		SEL_SSI1_1),
 
 	PINMUX_IPSR_GPSR(IP15_11_8,	SSI_SCK34),
 	PINMUX_IPSR_MSEL(IP15_11_8,	MSIOF1_SS1_A,		SEL_MSIOF1_0),
@@ -1381,11 +1381,11 @@
 	PINMUX_IPSR_MSEL(IP16_27_24,	RIF1_D1_A,		SEL_DRIF1_0),
 	PINMUX_IPSR_MSEL(IP16_27_24,	RIF3_D1_A,		SEL_DRIF3_0),
 
-	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_SDATA9_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_SDATA9_A,		SEL_SSI9_0),
 	PINMUX_IPSR_MSEL(IP16_31_28,	HSCK2_B,		SEL_HSCIF2_1),
 	PINMUX_IPSR_MSEL(IP16_31_28,	MSIOF1_SS1_C,		SEL_MSIOF1_2),
 	PINMUX_IPSR_MSEL(IP16_31_28,	HSCK1_A,		SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS1_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS1_B,		SEL_SSI1_1),
 	PINMUX_IPSR_GPSR(IP16_31_28,	SCK1),
 	PINMUX_IPSR_MSEL(IP16_31_28,	STP_IVCXO27_1_A,	SEL_SSP1_1_0),
 	PINMUX_IPSR_GPSR(IP16_31_28,	SCK5_A),
@@ -1417,7 +1417,7 @@
 
 	PINMUX_IPSR_GPSR(IP17_19_16,	USB1_PWEN),
 	PINMUX_IPSR_MSEL(IP17_19_16,	SIM0_CLK_C,		SEL_SIMCARD_2),
-	PINMUX_IPSR_MSEL(IP17_19_16,	SSI_SCK1_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP17_19_16,	SSI_SCK1_A,		SEL_SSI1_0),
 	PINMUX_IPSR_MSEL(IP17_19_16,	TS_SCK0_E,		SEL_TSIF0_4),
 	PINMUX_IPSR_MSEL(IP17_19_16,	STP_ISCLK_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP17_19_16,	FMCLK_B,		SEL_FM_1),
@@ -1427,7 +1427,7 @@
 
 	PINMUX_IPSR_GPSR(IP17_23_20,	USB1_OVC),
 	PINMUX_IPSR_MSEL(IP17_23_20,	MSIOF1_SS2_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP17_23_20,	SSI_WS1_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP17_23_20,	SSI_WS1_A,		SEL_SSI1_0),
 	PINMUX_IPSR_MSEL(IP17_23_20,	TS_SDAT0_E,		SEL_TSIF0_4),
 	PINMUX_IPSR_MSEL(IP17_23_20,	STP_ISD_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP17_23_20,	FMIN_B,			SEL_FM_1),
@@ -1437,7 +1437,7 @@
 
 	PINMUX_IPSR_GPSR(IP17_27_24,	USB30_PWEN),
 	PINMUX_IPSR_GPSR(IP17_27_24,	AUDIO_CLKOUT_B),
-	PINMUX_IPSR_MSEL(IP17_27_24,	SSI_SCK2_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP17_27_24,	SSI_SCK2_B,		SEL_SSI2_1),
 	PINMUX_IPSR_MSEL(IP17_27_24,	TS_SDEN1_D,		SEL_TSIF1_3),
 	PINMUX_IPSR_MSEL(IP17_27_24,	STP_ISEN_1_D,		SEL_SSP1_1_2),
 	PINMUX_IPSR_MSEL(IP17_27_24,	STP_OPWM_0_E,		SEL_SSP1_0_4),
@@ -1449,7 +1449,7 @@
 
 	PINMUX_IPSR_GPSR(IP17_31_28,	USB30_OVC),
 	PINMUX_IPSR_GPSR(IP17_31_28,	AUDIO_CLKOUT1_B),
-	PINMUX_IPSR_MSEL(IP17_31_28,	SSI_WS2_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP17_31_28,	SSI_WS2_B,		SEL_SSI2_1),
 	PINMUX_IPSR_MSEL(IP17_31_28,	TS_SPSYNC1_D,		SEL_TSIF1_3),
 	PINMUX_IPSR_MSEL(IP17_31_28,	STP_ISSYNC_1_D,		SEL_SSP1_1_3),
 	PINMUX_IPSR_MSEL(IP17_31_28,	STP_IVCXO27_0_E,	SEL_SSP1_0_4),
@@ -1460,7 +1460,7 @@
 	/* IPSR18 */
 	PINMUX_IPSR_GPSR(IP18_3_0,	GP6_30),
 	PINMUX_IPSR_GPSR(IP18_3_0,	AUDIO_CLKOUT2_B),
-	PINMUX_IPSR_MSEL(IP18_3_0,	SSI_SCK9_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP18_3_0,	SSI_SCK9_B,		SEL_SSI9_1),
 	PINMUX_IPSR_MSEL(IP18_3_0,	TS_SDEN0_E,		SEL_TSIF0_4),
 	PINMUX_IPSR_MSEL(IP18_3_0,	STP_ISEN_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP18_3_0,	RIF2_D0_B,		SEL_DRIF2_1),
@@ -1471,7 +1471,7 @@
 
 	PINMUX_IPSR_GPSR(IP18_7_4,	GP6_31),
 	PINMUX_IPSR_GPSR(IP18_7_4,	AUDIO_CLKOUT3_B),
-	PINMUX_IPSR_MSEL(IP18_7_4,	SSI_WS9_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP18_7_4,	SSI_WS9_B,		SEL_SSI9_1),
 	PINMUX_IPSR_MSEL(IP18_7_4,	TS_SPSYNC0_E,		SEL_TSIF0_4),
 	PINMUX_IPSR_MSEL(IP18_7_4,	STP_ISSYNC_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP18_7_4,	RIF2_D1_B,		SEL_DRIF2_1),
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_com.h b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
index 33e0314..36d49e4 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_com.h
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
@@ -359,7 +359,7 @@
 	ulong                        global_irq_counter;
 
 	bool                         dump_conf;
-
+	bool                         config_mmio_init;
 	bool                         enumerated;
 	enum ep_pcie_link_status     link_status;
 	bool                         perst_deast;
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
index 4558530..0ada0bf 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
@@ -503,6 +503,13 @@
 		"Initial version of MMIO is:0x%x\n",
 		readl_relaxed(dev->mmio + PCIE20_MHIVER));
 
+	if (dev->config_mmio_init) {
+		EP_PCIE_DBG(dev,
+			"PCIe V%d: MMIO already initialized, return\n",
+				dev->rev);
+		return;
+	}
+
 	ep_pcie_write_reg(dev->mmio, PCIE20_MHICFG, 0x02800880);
 	ep_pcie_write_reg(dev->mmio, PCIE20_BHI_EXECENV, 0x2);
 	ep_pcie_write_reg(dev->mmio, PCIE20_MHICTRL, 0x0);
@@ -511,6 +518,8 @@
 	ep_pcie_write_reg(dev->mmio, PCIE20_BHI_VERSION_LOWER, 0x2);
 	ep_pcie_write_reg(dev->mmio, PCIE20_BHI_VERSION_UPPER, 0x1);
 	ep_pcie_write_reg(dev->mmio, PCIE20_BHI_INTVEC, 0xffffffff);
+
+	dev->config_mmio_init = true;
 }
 
 static void ep_pcie_core_init(struct ep_pcie_dev_t *dev, bool configured)
diff --git a/drivers/platform/msm/gsi/Makefile b/drivers/platform/msm/gsi/Makefile
index b350a59..1eed995 100644
--- a/drivers/platform/msm/gsi/Makefile
+++ b/drivers/platform/msm/gsi/Makefile
@@ -1 +1,8 @@
 obj-$(CONFIG_GSI) += gsi.o gsi_dbg.o
+
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+obj-$(CONFIG_GSI) += gsi_emulation.o
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index dc445a0..2c29538 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include "gsi.h"
 #include "gsi_reg.h"
+#include "gsi_emulation.h"
 
 #define GSI_CMD_TIMEOUT (5*HZ)
 #define GSI_STOP_CMD_TIMEOUT_MS 20
@@ -33,6 +34,8 @@
 	{ },
 };
 
+static bool running_emulation = IPA_EMULATION_COMPILE;
+
 struct gsi_ctx *gsi_ctx;
 
 static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
@@ -577,7 +580,7 @@
 		if (!type)
 			break;
 
-		GSIDBG_LOW("type %x\n", type);
+		GSIDBG_LOW("type 0x%x\n", type);
 
 		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
 			gsi_handle_ch_ctrl(ee);
@@ -777,17 +780,57 @@
 			GSIERR("bad irq specified %u\n", props->irq);
 			return -GSI_STATUS_INVALID_PARAMS;
 		}
-
-		res = devm_request_irq(gsi_ctx->dev, props->irq,
+		/*
+		 * On a real UE, there are two separate interrupt
+		 * vectors that get directed toward the GSI/IPA
+		 * drivers.  They are handled by gsi_isr() and
+		 * (ipa_isr() or ipa3_isr()) respectively.  In the
+		 * emulation environment, this is not the case;
+		 * instead, interrupt vectors are routed to the
+		 * emualation hardware's interrupt controller, who in
+		 * turn, forwards a single interrupt to the GSI/IPA
+		 * driver.  When the new interrupt vector is received,
+		 * the driver needs to probe the interrupt
+		 * controller's registers so see if one, the other, or
+		 * both interrupts have occurred.  Given the above, we
+		 * now need to handle both situations, namely: the
+		 * emulator's and the real UE.
+		 */
+		if (running_emulation) {
+			/*
+			 * New scheme involving the emulator's
+			 * interrupt controller.
+			 */
+			res = devm_request_threaded_irq(
+				gsi_ctx->dev,
+				props->irq,
+				/* top half handler to follow */
+				emulator_hard_irq_isr,
+				/* threaded bottom half handler to follow */
+				emulator_soft_irq_isr,
+				IRQF_SHARED,
+				"emulator_intcntrlr",
+				gsi_ctx);
+		} else {
+			/*
+			 * Traditional scheme used on the real UE.
+			 */
+			res = devm_request_irq(gsi_ctx->dev, props->irq,
 				(irq_handler_t) gsi_isr,
 				props->req_clk_cb ? IRQF_TRIGGER_RISING :
 					IRQF_TRIGGER_HIGH,
 				"gsi",
 				gsi_ctx);
+		}
 		if (res) {
-			GSIERR("failed to register isr for %u\n", props->irq);
+			GSIERR(
+			 "failed to register isr for %u\n",
+			 props->irq);
 			return -GSI_STATUS_ERROR;
 		}
+		GSIDBG(
+			"succeeded to register isr for %u\n",
+			props->irq);
 
 		res = enable_irq_wake(props->irq);
 		if (res)
@@ -808,6 +851,41 @@
 		return -GSI_STATUS_RES_ALLOC_FAILURE;
 	}
 
+	GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%lx)\n",
+	       &(props->phys_addr),
+	       gsi_ctx->base,
+	       props->size);
+
+	if (running_emulation) {
+		GSIDBG("GSI SW ver register value 0x%x\n",
+		       gsi_readl(gsi_ctx->base +
+		       GSI_EE_n_GSI_SW_VERSION_OFFS(0)));
+		gsi_ctx->intcntrlr_mem_size =
+		    props->emulator_intcntrlr_size;
+		gsi_ctx->intcntrlr_base =
+		    devm_ioremap_nocache(
+			gsi_ctx->dev,
+			props->emulator_intcntrlr_addr,
+			props->emulator_intcntrlr_size);
+		if (!gsi_ctx->intcntrlr_base) {
+			GSIERR(
+			  "failed to remap emulator's interrupt controller HW\n");
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+			devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+			return -GSI_STATUS_RES_ALLOC_FAILURE;
+		}
+
+		GSIDBG(
+		    "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n",
+		    &(props->emulator_intcntrlr_addr),
+		    gsi_ctx->intcntrlr_base,
+		    props->emulator_intcntrlr_size);
+
+		gsi_ctx->intcntrlr_gsi_isr = gsi_isr;
+		gsi_ctx->intcntrlr_client_isr =
+		    props->emulator_intcntrlr_client_isr;
+	}
+
 	gsi_ctx->per = *props;
 	gsi_ctx->per_registered = true;
 	mutex_init(&gsi_ctx->mlock);
@@ -816,6 +894,9 @@
 	gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
 	if (gsi_ctx->max_ch == 0) {
 		devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+		if (running_emulation)
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("failed to get max channels\n");
 		return -GSI_STATUS_ERROR;
@@ -823,6 +904,9 @@
 	gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
 	if (gsi_ctx->max_ev == 0) {
 		devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+		if (running_emulation)
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("failed to get max event rings\n");
 		return -GSI_STATUS_ERROR;
@@ -831,7 +915,9 @@
 	if (props->mhi_er_id_limits_valid &&
 	    props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
 		devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
-		gsi_ctx->base = NULL;
+		if (running_emulation)
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("MHI event ring start id %u is beyond max %u\n",
 			props->mhi_er_id_limits[0], gsi_ctx->max_ev);
@@ -872,6 +958,22 @@
 		gsi_writel(0, gsi_ctx->base +
 			GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
 
+	if (running_emulation) {
+		/*
+		 * Set up the emulator's interrupt controller...
+		 */
+		res = setup_emulator_cntrlr(
+		    gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
+		if (res != 0) {
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+			gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
+			devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+			GSIERR("setup_emulator_cntrlr() failed\n");
+			return res;
+		}
+	}
+
 	*dev_hdl = (uintptr_t)gsi_ctx;
 
 	return GSI_STATUS_SUCCESS;
@@ -1190,6 +1292,7 @@
 		if (!props->evchid_valid)
 			clear_bit(evt_id, &gsi_ctx->evt_bmap);
 		mutex_unlock(&gsi_ctx->mlock);
+		BUG();
 		return -GSI_STATUS_RES_ALLOC_FAILURE;
 	}
 
@@ -1572,6 +1675,10 @@
 			 GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
 		((props->use_db_eng << GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
 			 GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK));
+	if (gsi_ctx->per.ver >= GSI_VER_2_0)
+		val |= ((props->prefetch_mode <<
+			GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT)
+			& GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK);
 	gsi_writel(val, gsi_ctx->base +
 			GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
 }
@@ -1770,6 +1877,32 @@
 }
 EXPORT_SYMBOL(gsi_alloc_channel);
 
+static void __gsi_read_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch * val)
+{
+	uint32_t reg;
+
+	reg = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	val->data.word1 = reg;
+
+	reg = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	val->data.word2 = reg;
+
+	reg = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	val->data.word3 = reg;
+
+	reg = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	val->data.word4 = reg;
+}
+
 static void __gsi_write_channel_scratch(unsigned long chan_hdl,
 		union __packed gsi_channel_scratch val)
 {
@@ -1830,6 +1963,40 @@
 }
 EXPORT_SYMBOL(gsi_write_channel_scratch);
 
+int gsi_read_channel_scratch(unsigned long chan_hdl,
+	union __packed gsi_channel_scratch * ch_scratch)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+	__gsi_read_channel_scratch(chan_hdl, ch_scratch);
+	ctx->restore_scratch = *ch_scratch;
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_read_channel_scratch);
+
 int gsi_query_channel_db_addr(unsigned long chan_hdl,
 		uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
 {
@@ -2130,6 +2297,10 @@
 		BUG();
 	}
 
+	/* Hardware issue fixed from GSI 2.0 and no need for the WA */
+	if (gsi_ctx->per.ver >= GSI_VER_2_0)
+		reset_done = true;
+
 	/* workaround: reset GSI producers again */
 	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
 		usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
@@ -2508,7 +2679,7 @@
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
-	if (ctx->state != GSI_CHAN_STATE_STARTED) {
+	if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
 		GSIERR("bad state %d\n", ctx->state);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
@@ -2617,21 +2788,27 @@
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
-	spin_lock_irqsave(&gsi_ctx->slock, flags);
 	if (curr == GSI_CHAN_MODE_CALLBACK &&
 			mode == GSI_CHAN_MODE_POLL) {
+		spin_lock_irqsave(&gsi_ctx->slock, flags);
 		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
+		spin_unlock_irqrestore(&gsi_ctx->slock, flags);
+		spin_lock_irqsave(&ctx->ring.slock, flags);
 		atomic_set(&ctx->poll_mode, mode);
+		spin_unlock_irqrestore(&ctx->ring.slock, flags);
 		ctx->stats.callback_to_poll++;
 	}
 
 	if (curr == GSI_CHAN_MODE_POLL &&
 			mode == GSI_CHAN_MODE_CALLBACK) {
+		spin_lock_irqsave(&ctx->ring.slock, flags);
 		atomic_set(&ctx->poll_mode, mode);
+		spin_unlock_irqrestore(&ctx->ring.slock, flags);
+		spin_lock_irqsave(&gsi_ctx->slock, flags);
 		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
+		spin_unlock_irqrestore(&gsi_ctx->slock, flags);
 		ctx->stats.poll_to_callback++;
 	}
-	spin_unlock_irqrestore(&gsi_ctx->slock, flags);
 
 	return GSI_STATUS_SUCCESS;
 }
@@ -2726,24 +2903,47 @@
 {
 	void __iomem *gsi_base = (void __iomem *)base;
 
-	gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
-	gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
-	gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
-	gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
-	gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
-	gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
-	gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
-	gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
-	gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
-	gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
-	gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
-	gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
-	gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
+	gsi_writel(1,
+		   gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
+	gsi_writel(2,
+		   gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
+	gsi_writel(3,
+		   gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
+	gsi_writel(4,
+		   gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
+	gsi_writel(5,
+		   gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
+	gsi_writel(6,
+		   gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
+	gsi_writel(7,
+		   gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
+	gsi_writel(8,
+		   gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
+	gsi_writel(9,
+		   gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
+	gsi_writel(10,
+		   gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
+	gsi_writel(11,
+		   gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
+	gsi_writel(12,
+		   gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
+	gsi_writel(13,
+		   gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
+
+	if (running_emulation) {
+		gsi_writel(14,
+			   gsi_base + GSI_GSI_IRAM_PTR_EV_DB_OFFS);
+		gsi_writel(15,
+			   gsi_base + GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS);
+		gsi_writel(16,
+			   gsi_base + GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS);
+	}
 }
 
 static void gsi_configure_bck_prs_matrix(void *base)
 {
 	void __iomem *gsi_base = (void __iomem *)base;
+
 	/*
 	 * For now, these are default values. In the future, GSI FW image will
 	 * produce optimized back-pressure values based on the FW image.
@@ -2966,15 +3166,45 @@
 	},
 };
 
+static struct platform_device *pdev;
+
 /**
  * Module Init.
  */
 static int __init gsi_init(void)
 {
+	int ret;
+
 	pr_debug("gsi_init\n");
-	return platform_driver_register(&msm_gsi_driver);
+
+	ret = platform_driver_register(&msm_gsi_driver);
+	if (ret < 0)
+		goto out;
+
+	if (running_emulation) {
+		pdev = platform_device_register_simple("gsi", -1, NULL, 0);
+		if (IS_ERR(pdev)) {
+			ret = PTR_ERR(pdev);
+			platform_driver_unregister(&msm_gsi_driver);
+			goto out;
+		}
+	}
+
+out:
+	return ret;
 }
 
+/*
+ * Module exit.
+ */
+static void __exit gsi_exit(void)
+{
+	if (running_emulation && pdev)
+		platform_device_unregister(pdev);
+	platform_driver_unregister(&msm_gsi_driver);
+}
+
+module_exit(gsi_exit);
 arch_initcall(gsi_init);
 
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 7e10405..4a3f4ec 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,8 +18,16 @@
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/msm_gsi.h>
+#include <linux/errno.h>
 #include <linux/ipc_logging.h>
 
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "gsi_emulation_stubs.h"
+#endif
+
 #define GSI_CHAN_MAX      31
 #define GSI_EVT_RING_MAX  23
 #define GSI_NO_EVT_ERINDEX 31
@@ -129,6 +137,7 @@
 	bool allocated;
 	atomic_t poll_mode;
 	union __packed gsi_channel_scratch scratch;
+	union __packed gsi_channel_scratch restore_scratch;
 	struct gsi_chan_stats stats;
 	bool enable_dp_stats;
 	bool print_dp_stats;
@@ -204,6 +213,13 @@
 	struct completion gen_ee_cmd_compl;
 	void *ipc_logbuf;
 	void *ipc_logbuf_low;
+	/*
+	 * The following used only on emulation systems.
+	 */
+	void __iomem *intcntrlr_base;
+	u32 intcntrlr_mem_size;
+	irq_handler_t intcntrlr_gsi_isr;
+	irq_handler_t intcntrlr_client_isr;
 };
 
 enum gsi_re_type {
diff --git a/drivers/platform/msm/gsi/gsi_emulation.c b/drivers/platform/msm/gsi/gsi_emulation.c
new file mode 100644
index 0000000..adaaaaa
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_emulation.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "gsi_emulation.h"
+
+/*
+ * *****************************************************************************
+ * The following used to set up the EMULATION interrupt controller...
+ * *****************************************************************************
+ */
+int setup_emulator_cntrlr(
+	void __iomem *intcntrlr_base,
+	u32           intcntrlr_mem_size)
+{
+	uint32_t val, ver, intrCnt, rangeCnt, range;
+
+	val = gsi_emu_readl(intcntrlr_base + GE_INT_CTL_VER_CNT);
+
+	intrCnt  = val & 0xFFFF;
+	ver      = (val >> 16) & 0xFFFF;
+	rangeCnt = intrCnt / 32;
+
+	GSIDBG(
+	    "CTL_VER_CNT reg val(0x%x) intr cnt(%u) cntrlr ver(0x%x) rangeCnt(%u)\n",
+	    val, intrCnt, ver, rangeCnt);
+
+	/*
+	 * Verify the interrupt controller version
+	 */
+	if (ver == 0 || ver == 0xFFFF || ver < DEO_IC_INT_CTL_VER_MIN) {
+		GSIERR(
+		  "Error: invalid interrupt controller version 0x%x\n",
+		  ver);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	/*
+	 * Verify the interrupt count
+	 *
+	 * NOTE: intrCnt must be at least one block and multiple of 32
+	 */
+	if ((intrCnt % 32) != 0) {
+		GSIERR(
+		  "Invalid interrupt count read from HW 0x%04x\n",
+		  intrCnt);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/*
+	 * Calculate number of ranges used, each range handles 32 int lines
+	 */
+	if (rangeCnt > DEO_IC_MAX_RANGE_CNT) {
+		GSIERR(
+		  "SW interrupt limit(%u) passed, increase DEO_IC_MAX_RANGE_CNT(%u)\n",
+		  rangeCnt,
+		  DEO_IC_MAX_RANGE_CNT);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/*
+	 * Let's take the last register offset minus the first
+	 * register offset (ie. range) and compare it to the interrupt
+	 * controller's dtsi defined memory size.  The range better
+	 * fit within the size.
+	 */
+	val = GE_SOFT_INT_n(rangeCnt-1) - GE_INT_CTL_VER_CNT;
+	if (val > intcntrlr_mem_size) {
+		GSIERR(
+		    "Interrupt controller register range (%u) exceeds dtsi provisioned size (%u)\n",
+		    val, intcntrlr_mem_size);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/*
+	 * The following will disable the emulators interrupt controller,
+	 * so that we can config it...
+	 */
+	GSIDBG("Writing GE_INT_MASTER_ENABLE\n");
+	gsi_emu_writel(
+		0x0,
+		intcntrlr_base + GE_INT_MASTER_ENABLE);
+
+	/*
+	 * Init register maps of all ranges
+	 */
+	for (range = 0; range < rangeCnt; range++) {
+		/*
+		 * Disable all int sources by setting all enable clear bits
+		 */
+		GSIDBG("Writing GE_INT_ENABLE_CLEAR_n(%u)\n", range);
+		gsi_emu_writel(
+		    0xFFFFFFFF,
+		    intcntrlr_base + GE_INT_ENABLE_CLEAR_n(range));
+
+		/*
+		 * Clear all raw statuses
+		 */
+		GSIDBG("Writing GE_INT_CLEAR_n(%u)\n", range);
+		gsi_emu_writel(
+		    0xFFFFFFFF,
+		    intcntrlr_base + GE_INT_CLEAR_n(range));
+
+		/*
+		 * Init all int types
+		 */
+		GSIDBG("Writing GE_INT_TYPE_n(%u)\n", range);
+		gsi_emu_writel(
+		    0x0,
+		    intcntrlr_base + GE_INT_TYPE_n(range));
+	}
+
+	/*
+	 * The following tells the interrupt controller to interrupt us
+	 * when it sees interupts from ipa and/or gsi.
+	 *
+	 * Interrupts:
+	 * ===================================================================
+	 * DUT0                       [  63 :   16 ]
+	 * ipa_irq                                        [ 3 : 0 ] <---HERE
+	 * ipa_gsi_bam_irq                                [ 7 : 4 ] <---HERE
+	 * ipa_bam_apu_sec_error_irq                      [ 8 ]
+	 * ipa_bam_apu_non_sec_error_irq                  [ 9 ]
+	 * ipa_bam_xpu2_msa_intr                          [ 10 ]
+	 * ipa_vmidmt_nsgcfgirpt                          [ 11 ]
+	 * ipa_vmidmt_nsgirpt                             [ 12 ]
+	 * ipa_vmidmt_gcfgirpt                            [ 13 ]
+	 * ipa_vmidmt_girpt                               [ 14 ]
+	 * bam_xpu3_qad_non_secure_intr_sp                [ 15 ]
+	 */
+	GSIDBG("Writing GE_INT_ENABLE_n(0)\n");
+	gsi_emu_writel(
+	    0x00FF, /* See <---HERE above */
+	    intcntrlr_base + GE_INT_ENABLE_n(0));
+
+	/*
+	 * The following will enable the IC post config...
+	 */
+	GSIDBG("Writing GE_INT_MASTER_ENABLE\n");
+	gsi_emu_writel(
+	    0x1,
+	    intcntrlr_base + GE_INT_MASTER_ENABLE);
+
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION hard irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_hard_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt;
+
+	uint32_t val;
+
+	val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_INT_MASTER_STATUS);
+
+	/*
+	 * If bit zero is set, interrupt is for us, hence return IRQ_NONE
+	 * when it's not set...
+	 */
+	if (!(val & 0x00000001))
+		return IRQ_NONE;
+
+	/*
+	 * The following will mask (ie. turn off) future interrupts from
+	 * the emulator's interrupt controller. It wil stay this way until
+	 * we turn back on...which will be done in the bottom half
+	 * (ie. emulator_soft_irq_isr)...
+	 */
+	gsi_emu_writel(
+		0x0,
+		gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE);
+
+	return IRQ_WAKE_THREAD;
+}
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION soft irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_soft_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt;
+
+	irqreturn_t retVal = IRQ_HANDLED;
+	uint32_t	val;
+
+	val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_IRQ_STATUS_n(0));
+
+	GSIDBG("Got irq(%d) with status(0x%08X)\n", irq, val);
+
+	if (val & 0xF0 && gsi_ctx_ptr->intcntrlr_gsi_isr) {
+		GSIDBG("Got gsi interrupt\n");
+		retVal = gsi_ctx_ptr->intcntrlr_gsi_isr(irq, ctxt);
+	}
+
+	if (val & 0x0F && gsi_ctx_ptr->intcntrlr_client_isr) {
+		GSIDBG("Got ipa interrupt\n");
+		retVal = gsi_ctx_ptr->intcntrlr_client_isr(irq, 0);
+	}
+
+	/*
+	 * The following will clear the interrupts...
+	 */
+	gsi_emu_writel(
+		0xFFFFFFFF,
+		gsi_ctx_ptr->intcntrlr_base + GE_INT_CLEAR_n(0));
+
+	/*
+	 * The following will unmask (ie. turn on) future interrupts from
+	 * the emulator's interrupt controller...
+	 */
+	gsi_emu_writel(
+		0x1,
+		gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE);
+
+	return retVal;
+}
diff --git a/drivers/platform/msm/gsi/gsi_emulation.h b/drivers/platform/msm/gsi/gsi_emulation.h
new file mode 100644
index 0000000..246f9a8
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_emulation.h
@@ -0,0 +1,192 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#if !defined(_GSI_EMULATION_H_)
+# define _GSI_EMULATION_H_
+
+# include <linux/interrupt.h>
+
+# include "gsi.h"
+# include "gsi_reg.h"
+# include "gsi_emulation_stubs.h"
+
+# define gsi_emu_readl(c)     ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+# define gsi_emu_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); })
+
+# define CNTRLR_BASE 0
+
+/*
+ * The following file contains definitions and declarations that are
+ * germane only to the IPA emulation system, which is run from an X86
+ * environment.  Declaration's for non-X86 (ie. arm) are merely stubs
+ * to facilitate compile and link.
+ *
+ * Interrupt controller registers.
+ * Descriptions taken from the EMULATION interrupt controller SWI.
+ * - There is only one Master Enable register
+ * - Each group of 32 interrupt lines (range) is controlled by 8 registers,
+ *   which are consecutive in memory:
+ *      GE_INT_ENABLE_n
+ *      GE_INT_ENABLE_CLEAR_n
+ *      GE_INT_ENABLE_SET_n
+ *      GE_INT_TYPE_n
+ *      GE_IRQ_STATUS_n
+ *      GE_RAW_STATUS_n
+ *      GE_INT_CLEAR_n
+ *      GE_SOFT_INT_n
+ * - After the above 8 registers, there are the registers of the next
+ *   group (range) of 32 interrupt lines, and so on.
+ */
+
+/** @brief The interrupt controller version and interrupt count register.
+ *         Specifies interrupt controller version (upper 16 bits) and the
+ *         number of interrupt lines supported by HW (lower 16 bits).
+ */
+# define GE_INT_CTL_VER_CNT              \
+	(CNTRLR_BASE + 0x0000)
+
+/** @brief Enable or disable physical IRQ output signal to the system,
+ *         not affecting any status registers.
+ *
+ *         0x0 : DISABLE IRQ output disabled
+ *         0x1 : ENABLE  IRQ output enabled
+ */
+# define GE_INT_OUT_ENABLE               \
+	(CNTRLR_BASE + 0x0004)
+
+/** @brief The IRQ master enable register.
+ *         Bit #0: IRQ_ENABLE, set 0 to disable, 1 to enable.
+ */
+# define GE_INT_MASTER_ENABLE            \
+	(CNTRLR_BASE + 0x0008)
+
+# define GE_INT_MASTER_STATUS            \
+	(CNTRLR_BASE + 0x000C)
+
+/** @brief Each bit disables (bit=0, default) or enables (bit=1) the
+ *         corresponding interrupt source
+ */
+# define GE_INT_ENABLE_n(n)              \
+	(CNTRLR_BASE + 0x0010 + 0x20 * (n))
+
+/** @brief Write bit=1 to clear (to 0) the corresponding bit(s) in INT_ENABLE.
+ *         Does nothing for bit=0
+ */
+# define GE_INT_ENABLE_CLEAR_n(n)        \
+	(CNTRLR_BASE + 0x0014 + 0x20 * (n))
+
+/** @brief Write bit=1 to set (to 1) the corresponding bit(s) in INT_ENABLE.
+ *         Does nothing for bit=0
+ */
+# define GE_INT_ENABLE_SET_n(n)          \
+	(CNTRLR_BASE + 0x0018 + 0x20 * (n))
+
+/** @brief Select level (bit=0, default) or edge (bit=1) sensitive input
+ *         detection logic for each corresponding interrupt source
+ */
+# define GE_INT_TYPE_n(n)                \
+	(CNTRLR_BASE + 0x001C + 0x20 * (n))
+
+/** @brief Shows the interrupt sources captured in RAW_STATUS that have been
+ *         steered to irq_n by INT_SELECT. Interrupts must also be enabled by
+ *         INT_ENABLE and MASTER_ENABLE. Read only register.
+ *         Bit values: 1=active, 0=inactive
+ */
+# define GE_IRQ_STATUS_n(n)                      \
+	(CNTRLR_BASE + 0x0020 + 0x20 * (n))
+
+/** @brief Shows the interrupt sources that have been latched by the input
+ *         logic of the Interrupt Controller. Read only register.
+ *         Bit values: 1=active, 0=inactive
+ */
+# define GE_RAW_STATUS_n(n)                      \
+	(CNTRLR_BASE + 0x0024 + 0x20 * (n))
+
+/** @brief Write bit=1 to clear the corresponding bit(s) in RAW_STATUS.
+ *         Does nothing for bit=0
+ */
+# define GE_INT_CLEAR_n(n)               \
+	(CNTRLR_BASE + 0x0028 + 0x20 * (n))
+
+/** @brief Write bit=1 to set the corresponding bit(s) in RAW_STATUS.
+ *         Does nothing for bit=0.
+ *  @note  Only functional for edge detected interrupts
+ */
+# define GE_SOFT_INT_n(n)                        \
+	(CNTRLR_BASE + 0x002C + 0x20 * (n))
+
+/** @brief Maximal number of ranges in SW. Each range supports 32 interrupt
+ *         lines. If HW is extended considerably, increase this value
+ */
+# define DEO_IC_MAX_RANGE_CNT            8
+
+/** @brief Size of the registers of one range in memory, in bytes */
+# define DEO_IC_RANGE_MEM_SIZE           32  /* SWI: 8 registers, no gaps */
+
+/** @brief Minimal Interrupt controller HW version */
+# define DEO_IC_INT_CTL_VER_MIN          0x0102
+
+
+#if IPA_EMULATION_COMPILE == 1 /* declarations to follow */
+
+/*
+ * *****************************************************************************
+ * The following used to set up the EMULATION interrupt controller...
+ * *****************************************************************************
+ */
+int setup_emulator_cntrlr(
+	void __iomem *intcntrlr_base,
+	u32           intcntrlr_mem_size);
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION hard irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_hard_irq_isr(
+	int   irq,
+	void *ctxt);
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION soft irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_soft_irq_isr(
+	int   irq,
+	void *ctxt);
+
+# else /* #if IPA_EMULATION_COMPILE != 1, then definitions to follow */
+
+static inline int setup_emulator_cntrlr(
+	void __iomem *intcntrlr_base,
+	u32           intcntrlr_mem_size)
+{
+	return 0;
+}
+
+static inline irqreturn_t emulator_hard_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	return IRQ_NONE;
+}
+
+static inline irqreturn_t emulator_soft_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	return IRQ_HANDLED;
+}
+
+# endif /* #if IPA_EMULATION_COMPILE == 1 */
+
+#endif /* #if !defined(_GSI_EMULATION_H_) */
diff --git a/drivers/platform/msm/gsi/gsi_emulation_stubs.h b/drivers/platform/msm/gsi/gsi_emulation_stubs.h
new file mode 100644
index 0000000..dd9d0df
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_emulation_stubs.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_GSI_EMULATION_STUBS_H_)
+# define _GSI_EMULATION_STUBS_H_
+
+# include <asm/barrier.h>
+# define __iormb()       rmb() /* used in gsi.h */
+# define __iowmb()       wmb() /* used in gsi.h */
+
+#endif /* #if !defined(_GSI_EMULATION_STUBS_H_) */
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index 7817613..add3876 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1124,6 +1124,8 @@
 #define GSI_EE_n_GSI_CH_k_QOS_RMSK 0x303
 #define GSI_EE_n_GSI_CH_k_QOS_MAXk 30
 #define GSI_EE_n_GSI_CH_k_QOS_MAXn 3
+#define GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK 0x400
+#define GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT 0xa
 #define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
 #define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
 #define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
index 15ed471..857e17b 100644
--- a/drivers/platform/msm/ipa/Makefile
+++ b/drivers/platform/msm/ipa/Makefile
@@ -1,3 +1,9 @@
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
+
 obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common
 obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common
 obj-$(CONFIG_IPA_UT) += test/
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index fdcf44d..3a75bdd 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -19,8 +19,16 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/ipa_uc_offload.h>
+#include <linux/pci.h>
 #include "ipa_api.h"
 
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "ipa_v3/ipa_emulation_stubs.h"
+#endif
+
 #define DRV_NAME "ipa"
 
 #define IPA_API_DISPATCH_RETURN(api, p...) \
@@ -96,6 +104,8 @@
 		} \
 	} while (0)
 
+static bool running_emulation = IPA_EMULATION_COMPILE;
+
 static enum ipa_hw_type ipa_api_hw_type;
 static struct ipa_api_controller *ipa_api_ctrl;
 
@@ -2916,6 +2926,57 @@
 	{}
 };
 
+/*********************************************************/
+/*                PCIe Version                           */
+/*********************************************************/
+
+static const struct of_device_id ipa_pci_drv_match[] = {
+	{ .compatible = "qcom,ipa", },
+	{}
+};
+
+/*
+ * Forward declarations of static functions required for PCI
+ * registraion
+ *
+ * VENDOR and DEVICE should be defined in pci_ids.h
+ */
+static int ipa_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void ipa_pci_remove(struct pci_dev *pdev);
+static void ipa_pci_shutdown(struct pci_dev *pdev);
+static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *dev,
+	pci_channel_state_t state);
+static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *dev);
+static void ipa_pci_io_resume(struct pci_dev *dev);
+
+#define LOCAL_VENDOR 0x17CB
+#define LOCAL_DEVICE 0x00ff
+
+static const char ipa_pci_driver_name[] = "qcipav3";
+
+static const struct pci_device_id ipa_pci_tbl[] = {
+	{ PCI_DEVICE(LOCAL_VENDOR, LOCAL_DEVICE) },
+	{ 0, 0, 0, 0, 0, 0, 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, ipa_pci_tbl);
+
+/* PCI Error Recovery */
+static const struct pci_error_handlers ipa_pci_err_handler = {
+	.error_detected = ipa_pci_io_error_detected,
+	.slot_reset = ipa_pci_io_slot_reset,
+	.resume = ipa_pci_io_resume,
+};
+
+static struct pci_driver ipa_pci_driver = {
+	.name     = ipa_pci_driver_name,
+	.id_table = ipa_pci_tbl,
+	.probe    = ipa_pci_probe,
+	.remove   = ipa_pci_remove,
+	.shutdown = ipa_pci_shutdown,
+	.err_handler = &ipa_pci_err_handler
+};
+
 static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
 {
 	int result;
@@ -3364,10 +3425,86 @@
 	},
 };
 
+/*********************************************************/
+/*                PCIe Version                           */
+/*********************************************************/
+
+static int ipa_pci_probe(
+	struct pci_dev             *pci_dev,
+	const struct pci_device_id *ent)
+{
+	int result;
+
+	if (!pci_dev || !ent) {
+		pr_err(
+		    "Bad arg: pci_dev (%pK) and/or ent (%pK)\n",
+		    pci_dev, ent);
+		return -EOPNOTSUPP;
+	}
+
+	if (!ipa_api_ctrl) {
+		ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
+		if (ipa_api_ctrl == NULL)
+			return -ENOMEM;
+		/* Get IPA HW Version */
+		result = of_property_read_u32(NULL,
+			"qcom,ipa-hw-ver", &ipa_api_hw_type);
+		if (result || ipa_api_hw_type == 0) {
+			pr_err("ipa: get resource failed for ipa-hw-ver!\n");
+			kfree(ipa_api_ctrl);
+			ipa_api_ctrl = NULL;
+			return -ENODEV;
+		}
+		pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type);
+	}
+
+	/*
+	 * Call a reduced version of platform_probe appropriate for PCIe
+	 */
+	result = ipa3_pci_drv_probe(pci_dev, ipa_api_ctrl, ipa_pci_drv_match);
+
+	if (result && result != -EPROBE_DEFER)
+		pr_err("ipa: ipa3_pci_drv_probe failed\n");
+
+	if (running_emulation)
+		ipa_ut_module_init();
+
+	return result;
+}
+
+static void ipa_pci_remove(struct pci_dev *pci_dev)
+{
+	if (running_emulation)
+		ipa_ut_module_exit();
+}
+
+static void ipa_pci_shutdown(struct pci_dev *pci_dev)
+{
+}
+
+static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *pci_dev,
+	pci_channel_state_t state)
+{
+	return 0;
+}
+
+static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *pci_dev)
+{
+	return 0;
+}
+
+static void ipa_pci_io_resume(struct pci_dev *pci_dev)
+{
+}
+
 static int __init ipa_module_init(void)
 {
 	pr_debug("IPA module init\n");
 
+	if (running_emulation) {
+		/* Register as a PCI device driver */
+		return pci_register_driver(&ipa_pci_driver);
+	}
 	/* Register as a platform device driver */
 	return platform_driver_register(&ipa_plat_drv);
 }
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index b7edb6f..cbcb0ee 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -452,6 +452,10 @@
 int ipa3_plat_drv_probe(struct platform_device *pdev_p,
 	struct ipa_api_controller *api_ctrl,
 	const struct of_device_id *pdrv_match);
+int ipa3_pci_drv_probe(
+	struct pci_dev            *pci_dev,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match);
 #else
 static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p,
 	struct ipa_api_controller *api_ctrl,
@@ -459,6 +463,13 @@
 {
 	return -ENODEV;
 }
+static inline int ipa3_pci_drv_probe(
+	struct pci_dev            *pci_dev,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	return -ENODEV;
+}
 #endif /* (CONFIG_IPA3) */
 
 #endif /* _IPA_API_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
index 738d88f..a213130 100644
--- a/drivers/platform/msm/ipa/ipa_clients/Makefile
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o
+obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o ipa_gsb.o
 obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o
 obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
 obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c
new file mode 100644
index 0000000..5812d8d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c
@@ -0,0 +1,1084 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/ipa.h>
+#include <linux/cdev.h>
+#include <linux/ipa_odu_bridge.h>
+#include "../ipa_common_i.h"
+#ifdef CONFIG_IPA3
+#include "../ipa_v3/ipa_pm.h"
+#endif
+
+#define IPA_GSB_DRV_NAME "ipa_gsb"
+
+#define MAX_SUPPORTED_IFACE 5
+
+#define IPA_GSB_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_GSB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_GSB_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_GSB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_GSB_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_GSB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_GSB_MAX_MSG_LEN 512
+static char dbg_buff[IPA_GSB_MAX_MSG_LEN];
+
+#define IPA_GSB_SKB_HEADROOM 256
+#define IPA_GSB_SKB_DUMMY_HEADER 42
+#define IPA_GSB_AGGR_BYTE_LIMIT 14
+#define IPA_GSB_AGGR_TIME_LIMIT 1
+
+static struct dentry *dent;
+static struct dentry *dfile_stats;
+
+/**
+ * struct stats - driver statistics,
+ * @num_ul_packets: number of uplink packets
+ * @num_dl_packets: number of downlink packets
+ * @num_insufficient_headroom_packets: number of
+	packets with insufficient headroom
+ */
+struct stats {
+	u64 num_ul_packets;
+	u64 num_dl_packets;
+	u64 num_insufficient_headroom_packets;
+};
+
+/**
+ * struct ipa_gsb_mux_hdr - ipa gsb mux header,
+ * @iface_hdl: interface handle
+ * @qmap_id: qmap id
+ * @pkt_size: packet size
+ */
+struct ipa_gsb_mux_hdr {
+	u8 iface_hdl;
+	u8 qmap_id;
+	u16 pkt_size;
+};
+
+/**
+ * struct ipa_gsb_iface_info - GSB interface information
+ * @netdev_name: network interface name
+ * @device_ethaddr: network interface ethernet address
+ * @priv: client's private data. to be used in client's callbacks
+ * @tx_dp_notify: client callback for handling IPA ODU_PROD callback
+ * @send_dl_skb: client callback for sending skb in downlink direction
+ * @iface_stats: statistics, how many packets were transmitted
+ * using the SW bridge.
+ * @partial_hdr_hdl: handle for partial header
+ * @wakeup_request: client callback to wakeup
+ * @is_conencted: is interface connected ?
+ * @is_resumed: is interface resumed ?
+ * @iface_hdl: interface handle
+ */
+struct ipa_gsb_iface_info {
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	u8 device_ethaddr[ETH_ALEN];
+	void *priv;
+	ipa_notify_cb tx_dp_notify;
+	int (*send_dl_skb)(void *priv, struct sk_buff *skb);
+	struct stats iface_stats;
+	uint32_t partial_hdr_hdl[IPA_IP_MAX];
+	void (*wakeup_request)(void *);
+	bool is_connected;
+	bool is_resumed;
+	u8 iface_hdl;
+};
+
+/**
+ * struct ipa_gsb_context - GSB driver context information
+ * @logbuf: buffer of ipc logging
+ * @logbuf_low: buffer of ipc logging (low priority)
+ * @lock: mutex lock
+ * @prod_hdl: handle for prod pipe
+ * @cons_hdl: handle for cons pipe
+ * @ipa_sys_desc_size: sys pipe desc size
+ * @num_iface: number of interface
+ * @iface_hdl: interface handles
+ * @num_connected_iface: number of connected interface
+ * @num_resumed_iface: number of resumed interface
+ * @iface: interface information
+ * @pm_hdl: IPA PM handle
+ */
+struct ipa_gsb_context {
+	void *logbuf;
+	void *logbuf_low;
+	struct mutex lock;
+	u32 prod_hdl;
+	u32 cons_hdl;
+	u32 ipa_sys_desc_size;
+	int num_iface;
+	bool iface_hdl[MAX_SUPPORTED_IFACE];
+	int num_connected_iface;
+	int num_resumed_iface;
+	struct ipa_gsb_iface_info *iface[MAX_SUPPORTED_IFACE];
+	u32 pm_hdl;
+};
+
+static struct ipa_gsb_context *ipa_gsb_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t ipa_gsb_debugfs_stats(struct file *file,
+				  char __user *ubuf,
+				  size_t count,
+				  loff_t *ppos)
+{
+	int i, nbytes = 0;
+
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (ipa_gsb_ctx->iface[i] != NULL) {
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"netdev: %s\n",
+				ipa_gsb_ctx->iface[i]->netdev_name);
+
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"UL packets: %lld\n",
+				ipa_gsb_ctx->iface[i]->
+				iface_stats.num_ul_packets);
+
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"DL packets: %lld\n",
+				ipa_gsb_ctx->iface[i]->
+				iface_stats.num_dl_packets);
+
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"packets with insufficient headroom: %lld\n",
+				ipa_gsb_ctx->iface[i]->
+				iface_stats.num_insufficient_headroom_packets);
+		}
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static const struct file_operations ipa_gsb_stats_ops = {
+	.read = ipa_gsb_debugfs_stats,
+};
+
+static void ipa_gsb_debugfs_init(void)
+{
+	const mode_t read_only_mode = 00444;
+
+	dent = debugfs_create_dir("ipa_gsb", NULL);
+	if (IS_ERR(dent)) {
+		IPA_GSB_ERR("fail to create folder ipa_gsb\n");
+		return;
+	}
+
+	dfile_stats =
+		debugfs_create_file("stats", read_only_mode, dent,
+					NULL, &ipa_gsb_stats_ops);
+	if (!dfile_stats || IS_ERR(dfile_stats)) {
+		IPA_GSB_ERR("fail to create file stats\n");
+		goto fail;
+	}
+
+	return;
+
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+static void ipa_gsb_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+#else
+static void ipa_gsb_debugfs_init(void)
+{
+}
+
+static void ipa_gsb_debugfs_destroy(void)
+{
+}
+#endif
+
+static int ipa_gsb_driver_init(struct odu_bridge_params *params)
+{
+	if (!ipa_is_ready()) {
+		IPA_GSB_ERR("IPA is not ready\n");
+		return -EFAULT;
+	}
+
+	ipa_gsb_ctx = kzalloc(sizeof(*ipa_gsb_ctx),
+		GFP_KERNEL);
+
+	if (!ipa_gsb_ctx)
+		return -ENOMEM;
+
+	mutex_init(&ipa_gsb_ctx->lock);
+	ipa_gsb_debugfs_init();
+
+	return 0;
+}
+
+static int ipa_gsb_commit_partial_hdr(struct ipa_gsb_iface_info *iface_info)
+{
+	int i;
+	struct ipa_ioc_add_hdr *hdr;
+
+	if (!iface_info) {
+		IPA_GSB_ERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
+		2 * sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (!hdr)
+		return -ENOMEM;
+
+	hdr->commit = 1;
+	hdr->num_hdrs = 2;
+
+	snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+			 "%s_ipv4", iface_info->netdev_name);
+	snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+			 "%s_ipv6", iface_info->netdev_name);
+	/*
+	 * partial header:
+	 * [hdl][QMAP ID][pkt size][Dummy Header][ETH header]
+	 */
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+		/*
+		 * Optimization: add dummy header to reserve space
+		 * for rndis header, so we can do the skb_clone
+		 * instead of deep copy.
+		 */
+		hdr->hdr[i].hdr_len = ETH_HLEN +
+			sizeof(struct ipa_gsb_mux_hdr) +
+			IPA_GSB_SKB_DUMMY_HEADER;
+		hdr->hdr[i].type = IPA_HDR_L2_ETHERNET_II;
+		hdr->hdr[i].is_partial = 1;
+		hdr->hdr[i].is_eth2_ofst_valid = 1;
+		hdr->hdr[i].eth2_ofst = sizeof(struct ipa_gsb_mux_hdr) +
+			IPA_GSB_SKB_DUMMY_HEADER;
+		/* populate iface handle */
+		hdr->hdr[i].hdr[0] = iface_info->iface_hdl;
+		/* populate src ETH address */
+		memcpy(&hdr->hdr[i].hdr[10 + IPA_GSB_SKB_DUMMY_HEADER],
+			iface_info->device_ethaddr, 6);
+		/* populate Ethertype */
+		if (i == IPA_IP_v4)
+			*(u16 *)(hdr->hdr[i].hdr + 16 +
+				IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IP);
+		else
+			*(u16 *)(hdr->hdr[i].hdr + 16 +
+				IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IPV6);
+	}
+
+	if (ipa_add_hdr(hdr)) {
+		IPA_GSB_ERR("fail to add partial headers\n");
+		kfree(hdr);
+		return -EFAULT;
+	}
+
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++)
+		iface_info->partial_hdr_hdl[i] =
+			hdr->hdr[i].hdr_hdl;
+
+	IPA_GSB_DBG("added partial hdr hdl for ipv4: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v4]);
+	IPA_GSB_DBG("added partial hdr hdl for ipv6: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v6]);
+
+	kfree(hdr);
+	return 0;
+}
+
+static void ipa_gsb_delete_partial_hdr(struct ipa_gsb_iface_info *iface_info)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+
+	del_hdr = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
+		2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
+	if (!del_hdr)
+		return;
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 2;
+	del_hdr->hdl[IPA_IP_v4].hdl = iface_info->partial_hdr_hdl[IPA_IP_v4];
+	del_hdr->hdl[IPA_IP_v6].hdl = iface_info->partial_hdr_hdl[IPA_IP_v6];
+
+	if (ipa_del_hdr(del_hdr) != 0)
+		IPA_GSB_ERR("failed to delete partial hdr\n");
+
+	IPA_GSB_DBG("deleted partial hdr hdl for ipv4: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v4]);
+	IPA_GSB_DBG("deleted partial hdr hdl for ipv6: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v6]);
+
+	kfree(del_hdr);
+}
+
+static int ipa_gsb_reg_intf_props(struct ipa_gsb_iface_info *iface_info)
+{
+	struct ipa_tx_intf tx;
+	struct ipa_rx_intf rx;
+	struct ipa_ioc_tx_intf_prop tx_prop[2];
+	struct ipa_ioc_rx_intf_prop rx_prop[2];
+
+	/* populate tx prop */
+	tx.num_props = 2;
+	tx.prop = tx_prop;
+
+	memset(tx_prop, 0, sizeof(tx_prop));
+	tx_prop[0].ip = IPA_IP_v4;
+	tx_prop[0].dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+	tx_prop[0].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	snprintf(tx_prop[0].hdr_name, sizeof(tx_prop[0].hdr_name),
+			 "%s_ipv4", iface_info->netdev_name);
+
+	tx_prop[1].ip = IPA_IP_v6;
+	tx_prop[1].dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+	tx_prop[1].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	snprintf(tx_prop[1].hdr_name, sizeof(tx_prop[1].hdr_name),
+			 "%s_ipv6", iface_info->netdev_name);
+
+	/* populate rx prop */
+	rx.num_props = 2;
+	rx.prop = rx_prop;
+
+	memset(rx_prop, 0, sizeof(rx_prop));
+	rx_prop[0].ip = IPA_IP_v4;
+	rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_prop[0].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_prop[0].attrib.meta_data = iface_info->iface_hdl;
+	rx_prop[0].attrib.meta_data_mask = 0xFF;
+
+	rx_prop[1].ip = IPA_IP_v6;
+	rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_prop[1].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_prop[1].attrib.meta_data = iface_info->iface_hdl;
+	rx_prop[1].attrib.meta_data_mask = 0xFF;
+
+	if (ipa_register_intf(iface_info->netdev_name, &tx, &rx)) {
+		IPA_GSB_ERR("fail to add interface prop\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void ipa_gsb_dereg_intf_props(struct ipa_gsb_iface_info *iface_info)
+{
+	if (ipa_deregister_intf(iface_info->netdev_name) != 0)
+		IPA_GSB_ERR("fail to dereg intf props\n");
+
+	IPA_GSB_DBG("deregistered iface props for %s\n",
+		iface_info->netdev_name);
+}
+
+static void ipa_gsb_pm_cb(void *user_data, enum ipa_pm_cb_event event)
+{
+	int i;
+
+	if (event != IPA_PM_REQUEST_WAKEUP) {
+		IPA_GSB_ERR("Unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+
+	IPA_GSB_DBG_LOW("wake up clients\n");
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (ipa_gsb_ctx->iface[i] != NULL)
+			ipa_gsb_ctx->iface[i]->wakeup_request(
+				ipa_gsb_ctx->iface[i]->priv);
+}
+
+static int ipa_gsb_register_pm(void)
+{
+	struct ipa_pm_register_params reg_params;
+	int ret;
+
+	memset(&reg_params, 0, sizeof(reg_params));
+	reg_params.name = "ipa_gsb";
+	reg_params.callback = ipa_gsb_pm_cb;
+	reg_params.user_data = NULL;
+	reg_params.group = IPA_PM_GROUP_DEFAULT;
+
+	ret = ipa_pm_register(&reg_params,
+		&ipa_gsb_ctx->pm_hdl);
+	if (ret) {
+		IPA_GSB_ERR("fail to register with PM %d\n", ret);
+		goto fail_pm_reg;
+	}
+	IPA_GSB_DBG("ipa pm hdl: %d\n", ipa_gsb_ctx->pm_hdl);
+
+	ret = ipa_pm_associate_ipa_cons_to_client(ipa_gsb_ctx->pm_hdl,
+		IPA_CLIENT_ODU_EMB_CONS);
+	if (ret) {
+		IPA_GSB_ERR("fail to associate cons with PM %d\n", ret);
+		goto fail_pm_cons;
+	}
+
+	return 0;
+
+fail_pm_cons:
+	ipa_pm_deregister(ipa_gsb_ctx->pm_hdl);
+	ipa_gsb_ctx->pm_hdl = ~0;
+fail_pm_reg:
+	return ret;
+}
+
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
+{
+	int i, ret;
+	struct ipa_gsb_iface_info *new_intf;
+
+	if (!params || !params->wakeup_request || !hdl ||
+		!params->info.netdev_name || !params->info.tx_dp_notify ||
+		!params->info.send_dl_skb) {
+		IPA_GSB_ERR("NULL parameters\n");
+		return -EINVAL;
+	}
+
+	IPA_GSB_DBG("netdev_name: %s\n", params->info.netdev_name);
+
+	if (ipa_gsb_ctx == NULL) {
+		ret = ipa_gsb_driver_init(&params->info);
+		if (ret) {
+			IPA_GSB_ERR("fail to init ipa gsb driver\n");
+			return -EFAULT;
+		}
+		ipa_gsb_ctx->ipa_sys_desc_size =
+			params->info.ipa_desc_size;
+		IPA_GSB_DBG("desc size: %d\n", ipa_gsb_ctx->ipa_sys_desc_size);
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (params->info.ipa_desc_size != ipa_gsb_ctx->ipa_sys_desc_size) {
+		IPA_GSB_ERR("unmatch: orig desc size %d, new desc size %d\n",
+			ipa_gsb_ctx->ipa_sys_desc_size,
+			params->info.ipa_desc_size);
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (ipa_gsb_ctx->iface[i] != NULL &&
+			strnlen(ipa_gsb_ctx->iface[i]->netdev_name,
+					IPA_RESOURCE_NAME_MAX) ==
+			strnlen(params->info.netdev_name,
+					IPA_RESOURCE_NAME_MAX) &&
+			strcmp(ipa_gsb_ctx->iface[i]->netdev_name,
+				params->info.netdev_name) == 0) {
+			IPA_GSB_ERR("intf was added before.\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return -EFAULT;
+		}
+
+	if (ipa_gsb_ctx->num_iface == MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("reached maximum supported interfaces");
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (ipa_gsb_ctx->iface_hdl[i] == false) {
+			ipa_gsb_ctx->iface_hdl[i] = true;
+			*hdl = i;
+			IPA_GSB_DBG("iface hdl: %d\n", *hdl);
+			break;
+		}
+
+	IPA_GSB_DBG("intf was not added before, proceed.\n");
+	new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL);
+	if (new_intf == NULL) {
+		ret = -ENOMEM;
+		goto fail_alloc_mem;
+	}
+
+	strlcpy(new_intf->netdev_name, params->info.netdev_name,
+		sizeof(new_intf->netdev_name));
+	new_intf->wakeup_request = params->wakeup_request;
+	new_intf->priv = params->info.priv;
+	new_intf->tx_dp_notify = params->info.tx_dp_notify;
+	new_intf->send_dl_skb = params->info.send_dl_skb;
+	new_intf->iface_hdl = *hdl;
+	memcpy(new_intf->device_ethaddr, params->info.device_ethaddr,
+		sizeof(new_intf->device_ethaddr));
+
+	if (ipa_gsb_commit_partial_hdr(new_intf) != 0) {
+		IPA_GSB_ERR("fail to commit partial hdrs\n");
+		ret = -EFAULT;
+		goto fail_partial_hdr;
+	}
+
+	if (ipa_gsb_reg_intf_props(new_intf) != 0) {
+		IPA_GSB_ERR("fail to register interface props\n");
+		ret = -EFAULT;
+		goto fail_reg_intf_props;
+	}
+
+	if (ipa_gsb_ctx->num_iface == 0) {
+		ret = ipa_gsb_register_pm();
+		if (ret) {
+			IPA_GSB_ERR("fail to register with IPA PM %d\n", ret);
+			ret = -EFAULT;
+			goto fail_register_pm;
+		}
+	}
+
+	ipa_gsb_ctx->iface[*hdl] = new_intf;
+	ipa_gsb_ctx->num_iface++;
+	IPA_GSB_DBG("num_iface %d\n", ipa_gsb_ctx->num_iface);
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+
+fail_register_pm:
+	ipa_gsb_dereg_intf_props(new_intf);
+fail_reg_intf_props:
+	ipa_gsb_delete_partial_hdr(new_intf);
+fail_partial_hdr:
+	kfree(new_intf);
+fail_alloc_mem:
+	ipa_gsb_ctx->iface_hdl[*hdl] = false;
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_bridge_init);
+
+static void ipa_gsb_deregister_pm(void)
+{
+	IPA_GSB_DBG("deregister ipa pm hdl: %d\n", ipa_gsb_ctx->pm_hdl);
+	ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl);
+	ipa_pm_deregister(ipa_gsb_ctx->pm_hdl);
+	ipa_gsb_ctx->pm_hdl = ~0;
+}
+
+int ipa_bridge_cleanup(u32 hdl)
+{
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	if (hdl >= MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("invalid hdl: %d\n", hdl);
+		return -EINVAL;
+	}
+
+	if (ipa_gsb_ctx->iface[hdl] == NULL) {
+		IPA_GSB_ERR("fail to find interface\n");
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG("client hdl: %d\n", hdl);
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_ERR("cannot cleanup when iface is connected\n");
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return -EFAULT;
+	}
+
+	ipa_gsb_dereg_intf_props(ipa_gsb_ctx->iface[hdl]);
+	ipa_gsb_delete_partial_hdr(ipa_gsb_ctx->iface[hdl]);
+	kfree(ipa_gsb_ctx->iface[hdl]);
+	ipa_gsb_ctx->iface[hdl] = NULL;
+	ipa_gsb_ctx->iface_hdl[hdl] = false;
+	ipa_gsb_ctx->num_iface--;
+	IPA_GSB_DBG("num_iface %d\n", ipa_gsb_ctx->num_iface);
+
+	if (ipa_gsb_ctx->num_iface == 0) {
+		ipa_gsb_deregister_pm();
+		ipa_gsb_debugfs_destroy();
+		ipc_log_context_destroy(ipa_gsb_ctx->logbuf);
+		ipc_log_context_destroy(ipa_gsb_ctx->logbuf_low);
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		kfree(ipa_gsb_ctx);
+		ipa_gsb_ctx = NULL;
+		return 0;
+	}
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_cleanup);
+
+static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb;
+	struct sk_buff *skb2;
+	struct ipa_gsb_mux_hdr *mux_hdr;
+	u16 pkt_size, pad_byte;
+	u8 hdl;
+
+	if (evt != IPA_RECEIVE) {
+		IPA_GSB_ERR("unexpected event\n");
+		WARN_ON(1);
+		return;
+	}
+
+	skb = (struct sk_buff *)data;
+
+	while (skb->len) {
+		mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data;
+		pkt_size = mux_hdr->pkt_size;
+		/* 4-byte padding */
+		pad_byte = ((pkt_size + sizeof(*mux_hdr) + ETH_HLEN +
+			3 + IPA_GSB_SKB_DUMMY_HEADER) & ~3) -
+			(pkt_size + sizeof(*mux_hdr) +
+			ETH_HLEN + IPA_GSB_SKB_DUMMY_HEADER);
+		hdl = mux_hdr->iface_hdl;
+		IPA_GSB_DBG_LOW("pkt_size: %d, pad_byte: %d, hdl: %d\n",
+			pkt_size, pad_byte, hdl);
+
+		/* remove 4 byte mux header AND dummy header*/
+		skb_pull(skb, sizeof(*mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER);
+
+		skb2 = skb_clone(skb, GFP_KERNEL);
+		if (!skb2) {
+			IPA_GSB_ERR("skb_clone failed\n");
+			WARN_ON(1);
+			break;
+		}
+		skb_trim(skb2, pkt_size + ETH_HLEN);
+		ipa_gsb_ctx->iface[hdl]->send_dl_skb(
+			ipa_gsb_ctx->iface[hdl]->priv, skb2);
+		ipa_gsb_ctx->iface[hdl]->iface_stats.num_dl_packets++;
+		skb_pull(skb, pkt_size + ETH_HLEN + pad_byte);
+	}
+	if (skb) {
+		dev_kfree_skb_any(skb);
+		skb = NULL;
+	}
+}
+
+static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt,
+		       unsigned long data)
+{
+	struct sk_buff *skb;
+	struct ipa_gsb_mux_hdr *mux_hdr;
+	u8 hdl;
+
+	skb = (struct sk_buff *)data;
+
+	if (evt != IPA_WRITE_DONE && evt != IPA_RECEIVE) {
+		IPA_GSB_ERR("unexpected event: %d\n", evt);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	/* fetch iface handle from header */
+	mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data;
+	/* change to host order */
+	*(u32 *)mux_hdr = ntohl(*(u32 *)mux_hdr);
+	hdl = mux_hdr->iface_hdl;
+	IPA_GSB_DBG_LOW("evt: %d, hdl in tx_dp_notify: %d\n", evt, hdl);
+
+	/* remove 4 byte mux header */
+	skb_pull(skb, sizeof(struct ipa_gsb_mux_hdr));
+	ipa_gsb_ctx->iface[hdl]->tx_dp_notify(
+		ipa_gsb_ctx->iface[hdl]->priv, evt,
+		(unsigned long)skb);
+}
+
+static int ipa_gsb_connect_sys_pipe(void)
+{
+	struct ipa_sys_connect_params prod_params;
+	struct ipa_sys_connect_params cons_params;
+	int res;
+
+	memset(&prod_params, 0, sizeof(prod_params));
+	memset(&cons_params, 0, sizeof(cons_params));
+
+	/* configure RX EP */
+	prod_params.client = IPA_CLIENT_ODU_PROD;
+	prod_params.ipa_ep_cfg.hdr.hdr_len =
+		ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr);
+	prod_params.ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+	prod_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+	prod_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
+	prod_params.desc_fifo_sz = ipa_gsb_ctx->ipa_sys_desc_size;
+	prod_params.priv = NULL;
+	prod_params.notify = ipa_gsb_tx_dp_notify;
+	res = ipa_setup_sys_pipe(&prod_params,
+		&ipa_gsb_ctx->prod_hdl);
+	if (res) {
+		IPA_GSB_ERR("fail to setup prod sys pipe %d\n", res);
+		goto fail_prod;
+	}
+
+	/* configure TX EP */
+	cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+	cons_params.ipa_ep_cfg.hdr.hdr_len =
+		ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr) +
+		IPA_GSB_SKB_DUMMY_HEADER;
+	cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+	cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
+	cons_params.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+	cons_params.ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
+	cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	/* setup aggregation */
+	cons_params.ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+	cons_params.ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+	cons_params.ipa_ep_cfg.aggr.aggr_time_limit =
+		IPA_GSB_AGGR_TIME_LIMIT;
+	cons_params.ipa_ep_cfg.aggr.aggr_byte_limit =
+		IPA_GSB_AGGR_BYTE_LIMIT;
+	cons_params.desc_fifo_sz = ipa_gsb_ctx->ipa_sys_desc_size;
+	cons_params.priv = NULL;
+	cons_params.notify = ipa_gsb_cons_cb;
+	res = ipa_setup_sys_pipe(&cons_params,
+		&ipa_gsb_ctx->cons_hdl);
+	if (res) {
+		IPA_GSB_ERR("fail to setup cons sys pipe %d\n", res);
+		goto fail_cons;
+	}
+
+	IPA_GSB_DBG("prod_hdl = %d, cons_hdl = %d\n",
+		ipa_gsb_ctx->prod_hdl, ipa_gsb_ctx->cons_hdl);
+
+	return 0;
+
+fail_cons:
+	ipa_teardown_sys_pipe(ipa_gsb_ctx->prod_hdl);
+	ipa_gsb_ctx->prod_hdl = 0;
+fail_prod:
+	return res;
+}
+
+int ipa_bridge_connect(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG("client hdl: %d\n", hdl);
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_DBG("iface was already connected\n");
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return 0;
+	}
+
+	if (ipa_gsb_ctx->num_connected_iface == 0) {
+		ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("failed to activate ipa pm\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+		ret = ipa_gsb_connect_sys_pipe();
+		if (ret) {
+			IPA_GSB_ERR("fail to connect pipe\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+	}
+
+	/* connect = connect + resume */
+	ipa_gsb_ctx->iface[hdl]->is_connected = true;
+	ipa_gsb_ctx->iface[hdl]->is_resumed = true;
+
+	ipa_gsb_ctx->num_connected_iface++;
+	IPA_GSB_DBG("connected iface: %d\n",
+		ipa_gsb_ctx->num_connected_iface);
+	ipa_gsb_ctx->num_resumed_iface++;
+	IPA_GSB_DBG("num resumed iface: %d\n",
+		ipa_gsb_ctx->num_resumed_iface);
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_connect);
+
+static int ipa_gsb_disconnect_sys_pipe(void)
+{
+	int ret;
+
+	IPA_GSB_DBG("prod_hdl = %d, cons_hdl = %d\n",
+		ipa_gsb_ctx->prod_hdl, ipa_gsb_ctx->cons_hdl);
+
+	ret = ipa_teardown_sys_pipe(ipa_gsb_ctx->prod_hdl);
+	if (ret) {
+		IPA_GSB_ERR("failed to tear down prod pipe\n");
+		return -EFAULT;
+	}
+	ipa_gsb_ctx->prod_hdl = 0;
+
+	ret = ipa_teardown_sys_pipe(ipa_gsb_ctx->cons_hdl);
+	if (ret) {
+		IPA_GSB_ERR("failed to tear down cons pipe\n");
+		return -EFAULT;
+	}
+	ipa_gsb_ctx->cons_hdl = 0;
+
+	return 0;
+}
+
+int ipa_bridge_disconnect(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG("client hdl: %d\n", hdl);
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_DBG("iface was not connected\n");
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return 0;
+	}
+
+	if (ipa_gsb_ctx->num_connected_iface == 1) {
+		ret = ipa_gsb_disconnect_sys_pipe();
+		if (ret) {
+			IPA_GSB_ERR("fail to discon pipes\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return -EFAULT;
+		}
+
+		ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("failed to deactivate ipa pm\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return -EFAULT;
+		}
+	}
+
+	/* disconnect = suspend + disconnect */
+	ipa_gsb_ctx->iface[hdl]->is_connected = false;
+	ipa_gsb_ctx->num_connected_iface--;
+	IPA_GSB_DBG("connected iface: %d\n",
+		ipa_gsb_ctx->num_connected_iface);
+
+	if (ipa_gsb_ctx->iface[hdl]->is_resumed) {
+		ipa_gsb_ctx->iface[hdl]->is_resumed = false;
+		ipa_gsb_ctx->num_resumed_iface--;
+		IPA_GSB_DBG("num resumed iface: %d\n",
+			ipa_gsb_ctx->num_resumed_iface);
+	}
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_disconnect);
+
+int ipa_bridge_resume(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG_LOW("client hdl: %d\n", hdl);
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_ERR("iface is not connected\n");
+		return -EFAULT;
+	}
+
+	if (ipa_gsb_ctx->iface[hdl]->is_resumed) {
+		IPA_GSB_DBG_LOW("iface was already resumed\n");
+		return 0;
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (ipa_gsb_ctx->num_resumed_iface == 0) {
+		ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("fail to activate ipa pm\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+
+		ret = ipa_start_gsi_channel(
+			ipa_gsb_ctx->cons_hdl);
+		if (ret) {
+			IPA_GSB_ERR(
+				"fail to start con ep %d\n",
+				ret);
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+	}
+
+	ipa_gsb_ctx->iface[hdl]->is_resumed = true;
+	ipa_gsb_ctx->num_resumed_iface++;
+	IPA_GSB_DBG_LOW("num resumed iface: %d\n",
+		ipa_gsb_ctx->num_resumed_iface);
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_resume);
+
+int ipa_bridge_suspend(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG_LOW("client hdl: %d\n", hdl);
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_ERR("iface is not connected\n");
+		return -EFAULT;
+	}
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_resumed) {
+		IPA_GSB_DBG_LOW("iface was already suspended\n");
+		return 0;
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (ipa_gsb_ctx->num_resumed_iface == 1) {
+		ret = ipa_stop_gsi_channel(
+			ipa_gsb_ctx->cons_hdl);
+		if (ret) {
+			IPA_GSB_ERR(
+				"fail to stop cons ep %d\n",
+				ret);
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+
+		ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("fail to deactivate ipa pm\n");
+			ipa_start_gsi_channel(ipa_gsb_ctx->cons_hdl);
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+	}
+
+	ipa_gsb_ctx->iface[hdl]->is_resumed = false;
+	ipa_gsb_ctx->num_resumed_iface--;
+	IPA_GSB_DBG_LOW("num resumed iface: %d\n",
+		ipa_gsb_ctx->num_resumed_iface);
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_suspend);
+
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
+{
+	int ret;
+
+	IPA_GSB_DBG("client hdl: %d, BW: %d\n", hdl, bandwidth);
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	ret = ipa_pm_set_perf_profile(ipa_gsb_ctx->pm_hdl,
+		bandwidth);
+	if (ret)
+		IPA_GSB_ERR("fail to set perf profile\n");
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_bridge_set_perf_profile);
+
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+	struct ipa_tx_meta *metadata)
+{
+	struct ipa_gsb_mux_hdr *mux_hdr;
+	struct sk_buff *skb2;
+	int ret;
+
+	IPA_GSB_DBG_LOW("client hdl: %d\n", hdl);
+
+	/* make sure skb has enough headroom */
+	if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) {
+		IPA_GSB_DBG_LOW("skb doesn't have enough headroom\n");
+		skb2 = skb_copy_expand(skb, sizeof(struct ipa_gsb_mux_hdr),
+			0, GFP_ATOMIC);
+		if (!skb2) {
+			dev_kfree_skb_any(skb);
+			return -ENOMEM;
+		}
+		dev_kfree_skb_any(skb);
+		skb = skb2;
+		ipa_gsb_ctx->iface[hdl]->iface_stats.
+			num_insufficient_headroom_packets++;
+	}
+
+	/* add 4 byte header for mux */
+	mux_hdr = (struct ipa_gsb_mux_hdr *)skb_push(skb,
+		sizeof(struct ipa_gsb_mux_hdr));
+	mux_hdr->iface_hdl = (u8)hdl;
+	/* change to network order */
+	*(u32 *)mux_hdr = htonl(*(u32 *)mux_hdr);
+
+	ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+	if (ret) {
+		IPA_GSB_ERR("tx dp failed %d\n", ret);
+		return -EFAULT;
+	}
+	ipa_gsb_ctx->iface[hdl]->iface_stats.num_ul_packets++;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_tx_dp);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ipa gsb driver");
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 906e911..b4af0a09 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,13 +24,39 @@
 #endif
 
 #define IPA_MHI_DRV_NAME "ipa_mhi_client"
+
 #define IPA_MHI_DBG(fmt, args...) \
-	pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
-		 __func__, __LINE__, ## args)
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_MHI_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
 #define IPA_MHI_ERR(fmt, args...) \
-	pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+	do { \
+		pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
 #define IPA_MHI_FUNC_ENTRY() \
 	IPA_MHI_DBG("ENTRY\n")
+
 #define IPA_MHI_FUNC_EXIT() \
 	IPA_MHI_DBG("EXIT\n")
 
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index ebf1d29..704308f 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1336,7 +1336,12 @@
 	chan_params.chan_params.ring_base_addr =
 		params->xfer_ring_base_addr_iova;
 	chan_params.chan_params.ring_base_vaddr = NULL;
-	chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		chan_params.chan_params.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE;
+	chan_params.chan_params.prefetch_mode =
+		ipa_get_ep_prefetch_mode(chan_params.client);
 	chan_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
 	if (params->dir == GSI_CHAN_DIR_FROM_GSI)
 		chan_params.chan_params.low_weight =
@@ -1847,12 +1852,15 @@
 	}
 
 	if (ipa_pm_is_used()) {
-		result = ipa_pm_set_perf_profile(
-			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl,
-			params->max_supported_bandwidth_mbps);
-		if (result) {
-			IPA_USB_ERR("failed to set perf profile\n");
-			return result;
+		/* perf profile is not set on  USB DPL pipe */
+		if (ttype != IPA_USB_TRANSPORT_DPL) {
+			result = ipa_pm_set_perf_profile(
+				ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl,
+				params->max_supported_bandwidth_mbps);
+			if (result) {
+				IPA_USB_ERR("failed to set perf profile\n");
+				return result;
+			}
 		}
 
 		result = ipa_pm_activate_sync(
@@ -2150,6 +2158,18 @@
 static void ipa_usb_debugfs_remove(void){}
 #endif /* CONFIG_DEBUG_FS */
 
+static int ipa_usb_set_lock_unlock(bool is_lock)
+{
+	IPA_USB_DBG("entry\n");
+	if (is_lock)
+		mutex_lock(&ipa3_usb_ctx->general_mutex);
+	else
+		mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG("exit\n");
+
+	return 0;
+}
+
 
 
 int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
@@ -2213,6 +2233,16 @@
 		goto connect_fail;
 	}
 
+	/*
+	 * Register for xdci lock/unlock callback with ipa core driver.
+	 * As per use case, only register for IPA_CONS end point for now.
+	 * If needed we can include the same for IPA_PROD ep.
+	 * For IPA_USB_DIAG/DPL config there will not be any UL ep.
+	 */
+	if (connect_params->teth_prot != IPA_USB_DIAG)
+		ipa3_register_lock_unlock_callback(&ipa_usb_set_lock_unlock,
+			ul_out_params->clnt_hdl);
+
 	IPA_USB_DBG_LOW("exit\n");
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return 0;
@@ -2290,6 +2320,15 @@
 		}
 	}
 
+	/*
+	 * Deregister for xdci lock/unlock callback from ipa core driver.
+	 * As per use case, only deregister for IPA_CONS end point for now.
+	 * If needed we can include the same for IPA_PROD ep.
+	 * For IPA_USB_DIAG/DPL config there will not be any UL config.
+	 */
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype))
+		ipa3_deregister_lock_unlock_callback(ul_clnt_hdl);
+
 	/* Change state to STOPPED */
 	if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype))
 		IPA_USB_ERR("failed to change state to stopped\n");
@@ -2913,6 +2952,7 @@
 	int i;
 	unsigned long flags;
 	int res;
+	struct ipa3_usb_pm_context *pm_ctx;
 
 	pr_debug("entry\n");
 	ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
@@ -2932,19 +2972,13 @@
 	ipa3_usb_ctx->dl_data_pending = false;
 	mutex_init(&ipa3_usb_ctx->general_mutex);
 
-	if (ipa_pm_is_used()) {
-		struct ipa3_usb_pm_context *pm_ctx;
-
-		pm_ctx =
-			&ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].pm_ctx;
-		pm_ctx->hdl = ~0;
-		pm_ctx->remote_wakeup_work =
-			&ipa3_usb_notify_remote_wakeup_work;
-		pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].pm_ctx;
-		pm_ctx->hdl = ~0;
-		pm_ctx->remote_wakeup_work =
-			&ipa3_usb_dpl_notify_remote_wakeup_work;
-	}
+	/* init PM related members */
+	pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].pm_ctx;
+	pm_ctx->hdl = ~0;
+	pm_ctx->remote_wakeup_work = &ipa3_usb_notify_remote_wakeup_work;
+	pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].pm_ctx;
+	pm_ctx->hdl = ~0;
+	pm_ctx->remote_wakeup_work = &ipa3_usb_dpl_notify_remote_wakeup_work;
 
 	for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
 		ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false;
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index df546cd..3228410 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1262,385 +1262,5 @@
 }
 EXPORT_SYMBOL(odu_bridge_cleanup);
 
-/* IPA Bridge implementation */
-#ifdef CONFIG_IPA3
-
-static void ipa_br_rm_notify(void *user_data, enum ipa_rm_event event,
-	unsigned long data)
-{
-	if (event == IPA_RM_RESOURCE_GRANTED)
-		complete(&odu_bridge_ctx->rm_comp);
-}
-
-static int ipa_br_request_prod(void)
-{
-	int res;
-
-	ODU_BRIDGE_FUNC_ENTRY();
-
-	reinit_completion(&odu_bridge_ctx->rm_comp);
-	ODU_BRIDGE_DBG("requesting odu prod\n");
-	res = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
-	if (res) {
-		if (res != -EINPROGRESS) {
-			ODU_BRIDGE_ERR("failed to request prod %d\n", res);
-			return res;
-		}
-		wait_for_completion(&odu_bridge_ctx->rm_comp);
-	}
-
-	ODU_BRIDGE_FUNC_EXIT();
-	return 0;
-
-}
-
-static int ipa_br_release_prod(void)
-{
-	int res;
-
-	ODU_BRIDGE_FUNC_ENTRY();
-
-	reinit_completion(&odu_bridge_ctx->rm_comp);
-	ODU_BRIDGE_DBG("requesting odu prod\n");
-	res = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
-	if (res) {
-		ODU_BRIDGE_ERR("failed to release prod %d\n", res);
-		return res;
-	}
-
-	ODU_BRIDGE_FUNC_EXIT();
-	return 0;
-
-}
-
-static int ipa_br_cons_request(void)
-{
-	ODU_BRIDGE_FUNC_ENTRY();
-	if (odu_bridge_ctx->is_suspended)
-		odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv);
-	ODU_BRIDGE_FUNC_EXIT();
-	return 0;
-}
-
-static int ipa_br_cons_release(void)
-{
-	ODU_BRIDGE_FUNC_ENTRY();
-	ODU_BRIDGE_FUNC_EXIT();
-	return 0;
-}
-
-static void ipa_br_pm_cb(void *p, enum ipa_pm_cb_event event)
-{
-	ODU_BRIDGE_FUNC_ENTRY();
-	if (event != IPA_PM_REQUEST_WAKEUP) {
-		ODU_BRIDGE_ERR("Unexpected event %d\n", event);
-		WARN_ON(1);
-		return;
-	}
-
-	if (odu_bridge_ctx->is_suspended)
-		odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv);
-	ODU_BRIDGE_FUNC_EXIT();
-}
-
-static int ipa_br_register_pm(void)
-{
-	struct ipa_pm_register_params reg_params;
-	int ret;
-
-	memset(&reg_params, 0, sizeof(reg_params));
-	reg_params.name = "ODU Bridge";
-	reg_params.callback = ipa_br_pm_cb;
-	reg_params.group = IPA_PM_GROUP_DEFAULT;
-
-	ret = ipa_pm_register(&reg_params,
-		&odu_bridge_ctx->pm_hdl);
-	if (ret) {
-		ODU_BRIDGE_ERR("fail to register with PM %d\n", ret);
-		goto fail_pm_reg;
-	}
-
-	ret = ipa_pm_associate_ipa_cons_to_client(odu_bridge_ctx->pm_hdl,
-		IPA_CLIENT_ODU_EMB_CONS);
-	if (ret) {
-		ODU_BRIDGE_ERR("fail to associate cons with PM %d\n", ret);
-		goto fail_pm_cons;
-	}
-
-	return 0;
-
-fail_pm_cons:
-	ipa_pm_deregister(odu_bridge_ctx->pm_hdl);
-	odu_bridge_ctx->pm_hdl = ~0;
-fail_pm_reg:
-	return ret;
-}
-
-static int ipa_br_create_rm_resources(void)
-{
-	int ret;
-	struct ipa_rm_create_params create_params;
-
-	/* create IPA RM resources for power management */
-	init_completion(&odu_bridge_ctx->rm_comp);
-	memset(&create_params, 0, sizeof(create_params));
-	create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
-	create_params.reg_params.user_data = odu_bridge_ctx;
-	create_params.reg_params.notify_cb = ipa_br_rm_notify;
-	create_params.floor_voltage = IPA_VOLTAGE_SVS;
-	ret = ipa_rm_create_resource(&create_params);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to create RM prod %d\n", ret);
-		goto fail_rm_prod;
-	}
-
-	ret = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to add ODU->APPS dependency %d\n", ret);
-		goto fail_add_dep;
-	}
-
-	memset(&create_params, 0, sizeof(create_params));
-	create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
-	create_params.request_resource = ipa_br_cons_request;
-	create_params.release_resource = ipa_br_cons_release;
-	create_params.floor_voltage = IPA_VOLTAGE_SVS;
-	ret = ipa_rm_create_resource(&create_params);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to create RM cons %d\n", ret);
-		goto fail_rm_cons;
-	}
-
-	return 0;
-
-fail_rm_cons:
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
-fail_add_dep:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
-fail_rm_prod:
-	return ret;
-}
-
-/* IPA Bridge API is the new API which will replaces old odu_bridge API */
-int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
-{
-	int ret;
-
-	if (!params || !params->wakeup_request || !hdl) {
-		ODU_BRIDGE_ERR("NULL arg\n");
-		return -EINVAL;
-	}
-
-
-	ret = odu_bridge_init(&params->info);
-	if (ret)
-		return ret;
-
-	odu_bridge_ctx->wakeup_request = params->wakeup_request;
-
-	if (ipa_pm_is_used())
-		ret = ipa_br_register_pm();
-	else
-		ret = ipa_br_create_rm_resources();
-	if (ret) {
-		ODU_BRIDGE_ERR("fail to register woth RM/PM %d\n", ret);
-		goto fail_pm;
-	}
-
-	/* handle is ignored for now */
-	*hdl = 0;
-
-	return 0;
-
-fail_pm:
-	odu_bridge_cleanup();
-	return ret;
-}
-EXPORT_SYMBOL(ipa_bridge_init);
-
-int ipa_bridge_connect(u32 hdl)
-{
-	int ret;
-
-	if (!odu_bridge_ctx) {
-		ODU_BRIDGE_ERR("Not initialized\n");
-		return -EFAULT;
-	}
-
-	if (odu_bridge_ctx->is_connected) {
-		ODU_BRIDGE_ERR("already connected\n");
-		return -EFAULT;
-	}
-
-	if (ipa_pm_is_used())
-		ret = ipa_pm_activate_sync(odu_bridge_ctx->pm_hdl);
-	else
-		ret = ipa_br_request_prod();
-	if (ret)
-		return ret;
-
-	return odu_bridge_connect();
-}
-EXPORT_SYMBOL(ipa_bridge_connect);
-
-int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
-{
-	struct ipa_rm_perf_profile profile = {0};
-	int ret;
-
-	if (ipa_pm_is_used())
-		return ipa_pm_set_perf_profile(odu_bridge_ctx->pm_hdl,
-			bandwidth);
-
-	profile.max_supported_bandwidth_mbps = bandwidth;
-	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_PROD, &profile);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to set perf profile to prod %d\n", ret);
-		return ret;
-	}
-
-	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_CONS, &profile);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to set perf profile to cons %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(ipa_bridge_set_perf_profile);
-
-int ipa_bridge_disconnect(u32 hdl)
-{
-	int ret;
-
-	ret = odu_bridge_disconnect();
-	if (ret)
-		return ret;
-
-	if (ipa_pm_is_used())
-		ret = ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl);
-	else
-		ret = ipa_br_release_prod();
-	if (ret)
-		return ret;
-
-	return 0;
-}
-EXPORT_SYMBOL(ipa_bridge_disconnect);
-
-int ipa_bridge_suspend(u32 hdl)
-{
-	int ret;
-
-	if (!odu_bridge_ctx) {
-		ODU_BRIDGE_ERR("Not initialized\n");
-		return -EFAULT;
-	}
-
-	if (!odu_bridge_ctx->is_connected) {
-		ODU_BRIDGE_ERR("bridge is  disconnected\n");
-		return -EFAULT;
-	}
-
-	if (odu_bridge_ctx->is_suspended) {
-		ODU_BRIDGE_ERR("bridge is already suspended\n");
-		return -EFAULT;
-	}
-
-	/* stop cons channel to prevent downlink data during suspend */
-	ret = ipa_stop_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to stop CONS channel %d\n", ret);
-		return ret;
-	}
-
-	if (ipa_pm_is_used())
-		ret = ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl);
-	else
-		ret = ipa_br_release_prod();
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to release prod %d\n", ret);
-		ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
-		return ret;
-	}
-	odu_bridge_ctx->is_suspended = true;
-
-	return 0;
-}
-EXPORT_SYMBOL(ipa_bridge_suspend);
-
-int ipa_bridge_resume(u32 hdl)
-{
-	int ret;
-
-	if (!odu_bridge_ctx) {
-		ODU_BRIDGE_ERR("Not initialized\n");
-		return -EFAULT;
-	}
-
-	if (!odu_bridge_ctx->is_connected) {
-		ODU_BRIDGE_ERR("bridge is  disconnected\n");
-		return -EFAULT;
-	}
-
-	if (!odu_bridge_ctx->is_suspended) {
-		ODU_BRIDGE_ERR("bridge is not suspended\n");
-		return -EFAULT;
-	}
-
-	if (ipa_pm_is_used())
-		ret = ipa_pm_activate_sync(odu_bridge_ctx->pm_hdl);
-	else
-		ret = ipa_br_request_prod();
-	if (ret)
-		return ret;
-
-	ret = ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to start CONS channel %d\n", ret);
-		return ret;
-	}
-	odu_bridge_ctx->is_suspended = false;
-
-	return 0;
-}
-EXPORT_SYMBOL(ipa_bridge_resume);
-
-int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
-	struct ipa_tx_meta *metadata)
-{
-	return odu_bridge_tx_dp(skb, metadata);
-}
-EXPORT_SYMBOL(ipa_bridge_tx_dp);
-
-static void ipa_br_delete_rm_resources(void)
-{
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
-}
-
-static void ipa_br_deregister_pm(void)
-{
-	ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl);
-	ipa_pm_deregister(odu_bridge_ctx->pm_hdl);
-	odu_bridge_ctx->pm_hdl = ~0;
-}
-
-int ipa_bridge_cleanup(u32 hdl)
-{
-	if (ipa_pm_is_used())
-		ipa_br_deregister_pm();
-	else
-		ipa_br_delete_rm_resources();
-	return odu_bridge_cleanup();
-}
-EXPORT_SYMBOL(ipa_bridge_cleanup);
-
-#endif /* CONFIG_IPA3 */
-
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("ODU bridge driver");
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index b8a517e..530aa54 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -15,6 +15,7 @@
 
 #ifndef _IPA_COMMON_I_H_
 #define _IPA_COMMON_I_H_
+#include <linux/errno.h>
 #include <linux/ipc_logging.h>
 #include <linux/ipa.h>
 #include <linux/ipa_uc_offload.h>
@@ -441,4 +442,7 @@
 		struct sg_table *in_sgt_ptr);
 int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr);
 
+int ipa_ut_module_init(void);
+void ipa_ut_module_exit(void);
+
 #endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index f062ed2..c2f7aae 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -89,6 +89,8 @@
 	__stringify(DEL_L2TP_VLAN_MAPPING),
 	__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
 	__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
+	__stringify(ADD_BRIDGE_VLAN_MAPPING),
+	__stringify(DEL_BRIDGE_VLAN_MAPPING),
 };
 
 const char *ipa_hdr_l2_type_name[] = {
@@ -432,6 +434,8 @@
 
 	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
 			link) {
+		if (entry->cookie != IPA_HDR_COOKIE)
+			continue;
 		nbytes = scnprintf(
 			dbg_buff,
 			IPA_MAX_MSG_LEN,
@@ -606,6 +610,14 @@
 	if (attrib->protocol_eq_present)
 		pr_err("protocol:%d ", attrib->protocol_eq);
 
+	if (attrib->num_ihl_offset_range_16 >
+			IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) {
+		IPAERR_RL("num_ihl_offset_range_16  Max %d passed value %d\n",
+			IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS,
+			attrib->num_ihl_offset_range_16);
+		return -EPERM;
+	}
+
 	for (i = 0; i < attrib->num_ihl_offset_range_16; i++) {
 		pr_err(
 			   "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
@@ -614,6 +626,12 @@
 			   attrib->ihl_offset_range_16[i].range_high);
 	}
 
+	if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) {
+		IPAERR_RL("num_offset_meq_32  Max %d passed value %d\n",
+		IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32);
+		return -EPERM;
+	}
+
 	for (i = 0; i < attrib->num_offset_meq_32; i++) {
 		pr_err(
 			   "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
@@ -635,6 +653,12 @@
 				attrib->ihl_offset_eq_16.value);
 	}
 
+	if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) {
+		IPAERR_RL("num_ihl_offset_meq_32  Max %d passed value %d\n",
+		IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32);
+		return -EPERM;
+	}
+
 	for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) {
 		pr_err(
 				"(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
@@ -643,6 +667,12 @@
 				attrib->ihl_offset_meq_32[i].value);
 	}
 
+	if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) {
+		IPAERR_RL("num_offset_meq_128  Max %d passed value %d\n",
+		IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128);
+		return -EPERM;
+	}
+
 	for (i = 0; i < attrib->num_offset_meq_128; i++) {
 		for (j = 0; j < 16; j++) {
 			addr[j] = attrib->offset_meq_128[i].value[j];
@@ -812,11 +842,14 @@
 	u32 rt_tbl_idx;
 	u32 bitmap;
 	bool eq;
+	int res = 0;
 
 	tbl = &ipa_ctx->glob_flt_tbl[ip];
 	mutex_lock(&ipa_ctx->lock);
 	i = 0;
 	list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+		if (entry->cookie != IPA_FLT_COOKIE)
+			continue;
 		if (entry->rule.eq_attrib_type) {
 			rt_tbl_idx = entry->rule.rt_tbl_idx;
 			bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
@@ -835,10 +868,14 @@
 			i, entry->rule.action, rt_tbl_idx);
 		pr_err("attrib_mask:%08x retain_hdr:%d eq:%d ",
 			bitmap, entry->rule.retain_hdr, eq);
-		if (eq)
-			ipa_attrib_dump_eq(
+		if (eq) {
+			res = ipa_attrib_dump_eq(
 				&entry->rule.eq_attrib);
-		else
+			if (res) {
+				IPAERR_RL("failed read attrib eq\n");
+				goto bail;
+			}
+		} else
 			ipa_attrib_dump(
 				&entry->rule.attrib, ip);
 		i++;
@@ -848,6 +885,8 @@
 		tbl = &ipa_ctx->flt_tbl[j][ip];
 		i = 0;
 		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			if (entry->cookie != IPA_FLT_COOKIE)
+				continue;
 			if (entry->rule.eq_attrib_type) {
 				rt_tbl_idx = entry->rule.rt_tbl_idx;
 				bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
@@ -867,18 +906,23 @@
 			pr_err("attrib_mask:%08x retain_hdr:%d ",
 				bitmap, entry->rule.retain_hdr);
 			pr_err("eq:%d ", eq);
-			if (eq)
-				ipa_attrib_dump_eq(
-					&entry->rule.eq_attrib);
-			else
+			if (eq) {
+				res = ipa_attrib_dump_eq(
+						&entry->rule.eq_attrib);
+				if (res) {
+					IPAERR_RL("failed read attrib eq\n");
+					goto bail;
+				}
+			} else
 				ipa_attrib_dump(
 					&entry->rule.attrib, ip);
 			i++;
 		}
 	}
+bail:
 	mutex_unlock(&ipa_ctx->lock);
 
-	return 0;
+	return res;
 }
 
 static ssize_t ipa_read_stats(struct file *file, char __user *ubuf,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 9f71d7b..ba98228 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2115,8 +2115,10 @@
 			goto fail_dma_mapping;
 		}
 
+		spin_lock_bh(&sys->spinlock);
 		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
 		rx_len_cached = ++sys->len;
+		spin_unlock_bh(&sys->spinlock);
 
 		ret = sps_transfer_one(sys->ep->ep_hdl,
 			rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
@@ -2130,8 +2132,10 @@
 	return;
 
 fail_sps_transfer:
+	spin_lock_bh(&sys->spinlock);
 	list_del(&rx_pkt->link);
 	rx_len_cached = --sys->len;
+	spin_unlock_bh(&sys->spinlock);
 	dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
 			sys->rx_buff_sz, DMA_FROM_DEVICE);
 fail_dma_mapping:
@@ -2171,8 +2175,10 @@
 			goto fail_dma_mapping;
 		}
 
+		spin_lock_bh(&sys->spinlock);
 		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
 		rx_len_cached = ++sys->len;
+		spin_unlock_bh(&sys->spinlock);
 
 		ret = sps_transfer_one(sys->ep->ep_hdl,
 			rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
@@ -2185,9 +2191,11 @@
 
 	return;
 fail_sps_transfer:
+	spin_lock_bh(&sys->spinlock);
 	rx_len_cached = --sys->len;
 	list_del(&rx_pkt->link);
 	INIT_LIST_HEAD(&rx_pkt->link);
+	spin_unlock_bh(&sys->spinlock);
 	dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
 		sys->rx_buff_sz, DMA_FROM_DEVICE);
 fail_dma_mapping:
@@ -2219,7 +2227,9 @@
 		}
 
 		rx_pkt = sys->repl.cache[curr];
+		spin_lock_bh(&sys->spinlock);
 		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+		spin_unlock_bh(&sys->spinlock);
 
 		ret = sps_transfer_one(sys->ep->ep_hdl,
 			rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
@@ -2278,6 +2288,7 @@
 	u32 head;
 	u32 tail;
 
+	spin_lock_bh(&sys->spinlock);
 	list_for_each_entry_safe(rx_pkt, r,
 				 &sys->head_desc_list, link) {
 		list_del(&rx_pkt->link);
@@ -2295,6 +2306,7 @@
 		sys->free_skb(rx_pkt->data.skb);
 		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
 	}
+	spin_unlock_bh(&sys->spinlock);
 
 	if (sys->repl.cache) {
 		head = atomic_read(&sys->repl.head_idx);
@@ -2976,8 +2988,10 @@
 	struct ipa_rx_pkt_wrapper *rx_pkt_expected;
 	struct sk_buff *rx_skb;
 
+	spin_lock_bh(&sys->spinlock);
 	if (unlikely(list_empty(&sys->head_desc_list))) {
 		WARN_ON(1);
+		spin_unlock_bh(&sys->spinlock);
 		return;
 	}
 	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
@@ -2985,6 +2999,7 @@
 					   link);
 	list_del(&rx_pkt_expected->link);
 	sys->len--;
+	spin_unlock_bh(&sys->spinlock);
 	if (size)
 		rx_pkt_expected->len = size;
 	rx_skb = rx_pkt_expected->data.skb;
@@ -3005,8 +3020,10 @@
 	struct ipa_rx_pkt_wrapper *rx_pkt_expected;
 	struct sk_buff *rx_skb;
 
+	spin_lock_bh(&sys->spinlock);
 	if (unlikely(list_empty(&sys->head_desc_list))) {
 		WARN_ON(1);
+		spin_unlock_bh(&sys->spinlock);
 		return;
 	}
 	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
@@ -3014,6 +3031,7 @@
 					   link);
 	list_del(&rx_pkt_expected->link);
 	sys->len--;
+	spin_unlock_bh(&sys->spinlock);
 
 	if (size)
 		rx_pkt_expected->len = size;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index 84dce6f..50fe2a1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1495,8 +1495,16 @@
 			}
 		}
 	}
-	mutex_unlock(&ipa_ctx->lock);
 
+	/* commit the change to IPA-HW */
+	if (ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4) ||
+		ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6)) {
+		IPAERR_RL("fail to commit flt-rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa_ctx->lock);
+		return -EPERM;
+	}
+	mutex_unlock(&ipa_ctx->lock);
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 6285130..61c3a71 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -1249,8 +1249,9 @@
 	struct ipa_hdr_offset_entry *off_next;
 	struct ipa_hdr_proc_ctx_offset_entry *ctx_off_entry;
 	struct ipa_hdr_proc_ctx_offset_entry *ctx_off_next;
-	int i, end = 0;
-	bool user_rule = false;
+	struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+	struct ipa_hdr_proc_ctx_tbl *htbl_proc = &ipa_ctx->hdr_proc_ctx_tbl;
+	int i;
 
 	/*
 	 * issue a reset on the routing module since routing rules point to
@@ -1288,9 +1289,6 @@
 			return -EFAULT;
 		}
 
-		if (entry->ipacm_installed)
-			user_rule = true;
-
 		if (!user_only || entry->ipacm_installed) {
 			if (entry->is_hdr_proc_ctx) {
 				dma_unmap_single(ipa_ctx->pdev,
@@ -1298,9 +1296,15 @@
 					entry->hdr_len,
 					DMA_TO_DEVICE);
 				entry->proc_ctx = NULL;
+			} else {
+				/* move the offset entry to free list */
+				entry->offset_entry->ipacm_installed = 0;
+				list_move(&entry->offset_entry->link,
+				&htbl->head_free_offset_list[
+					entry->offset_entry->bin]);
 			}
 			list_del(&entry->link);
-			ipa_ctx->hdr_tbl.hdr_cnt--;
+			htbl->hdr_cnt--;
 			entry->ref_cnt = 0;
 			entry->cookie = 0;
 
@@ -1309,53 +1313,37 @@
 			kmem_cache_free(ipa_ctx->hdr_cache, entry);
 		}
 	}
-	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
-		list_for_each_entry_safe(off_entry, off_next,
-					 &ipa_ctx->hdr_tbl.head_offset_list[i],
-					 link) {
 
-			/*
-			 * do not remove the default exception header which is
-			 * at offset 0
-			 */
-			if (off_entry->offset == 0)
-				continue;
-
-			if (!user_only ||
-					off_entry->ipacm_installed) {
+	/* only clean up offset_list and free_offset_list on global reset */
+	if (!user_only) {
+		for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+			list_for_each_entry_safe(off_entry, off_next,
+				&ipa_ctx->hdr_tbl.head_offset_list[i],
+				link) {
+				/**
+				 * do not remove the default exception
+				 * header which is at offset 0
+				 */
+				if (off_entry->offset == 0)
+					continue;
 				list_del(&off_entry->link);
 				kmem_cache_free(ipa_ctx->hdr_offset_cache,
 					off_entry);
-			} else {
-				if (off_entry->offset +
-					ipa_hdr_bin_sz[off_entry->bin] > end) {
-					end = off_entry->offset +
-						ipa_hdr_bin_sz[off_entry->bin];
-					IPADBG("replace end = %d\n", end);
-				}
 			}
-		}
-		list_for_each_entry_safe(off_entry, off_next,
+			list_for_each_entry_safe(off_entry, off_next,
 				&ipa_ctx->hdr_tbl.head_free_offset_list[i],
 				link) {
-
-			if (!user_only ||
-					off_entry->ipacm_installed) {
 				list_del(&off_entry->link);
 				kmem_cache_free(ipa_ctx->hdr_offset_cache,
 					off_entry);
 			}
 		}
+		/* there is one header of size 8 */
+		ipa_ctx->hdr_tbl.end = 8;
+		ipa_ctx->hdr_tbl.hdr_cnt = 1;
 	}
 
-	IPADBG("hdr_tbl.end = %d\n", end);
-	if (user_rule) {
-		ipa_ctx->hdr_tbl.end = end;
-		IPADBG("hdr_tbl.end = %d\n", end);
-	}
 	IPADBG("reset hdr proc ctx\n");
-	user_rule = false;
-	end = 0;
 	list_for_each_entry_safe(
 		ctx_entry,
 		ctx_next,
@@ -1364,17 +1352,18 @@
 
 		if (ipa_id_find(ctx_entry->id) == NULL) {
 			mutex_unlock(&ipa_ctx->lock);
-			WARN_ON(1);
+			WARN_ON_RATELIMIT_IPA(1);
 			return -EFAULT;
 		}
 
-		if (entry->ipacm_installed)
-			user_rule = true;
-
 		if (!user_only ||
 				ctx_entry->ipacm_installed) {
+			/* move the offset entry to appropriate free list */
+			list_move(&ctx_entry->offset_entry->link,
+				&htbl_proc->head_free_offset_list[
+					ctx_entry->offset_entry->bin]);
 			list_del(&ctx_entry->link);
-			ipa_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt--;
+			htbl_proc->proc_ctx_cnt--;
 			ctx_entry->ref_cnt = 0;
 			ctx_entry->cookie = 0;
 
@@ -1384,48 +1373,39 @@
 				ctx_entry);
 		}
 	}
-	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
-		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+	/* only clean up offset_list and free_offset_list on global reset */
+	if (!user_only) {
+		for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+			list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
 				&ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
 				link) {
-
-			if (!user_only ||
-					ctx_off_entry->ipacm_installed) {
 				list_del(&ctx_off_entry->link);
 				kmem_cache_free(
 					ipa_ctx->hdr_proc_ctx_offset_cache,
 					ctx_off_entry);
-			} else {
-				if (ctx_off_entry->offset +
-					ipa_hdr_bin_sz[ctx_off_entry->bin]
-					> end) {
-					end = ctx_off_entry->offset +
-					ipa_hdr_bin_sz[ctx_off_entry->bin];
-					IPADBG("replace hdr_proc as %d\n", end);
-				}
 			}
-		}
-		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
-			    &ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
-			    link) {
-
-			if (!user_only ||
-					ctx_off_entry->ipacm_installed) {
+			list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+				&ipa_ctx->hdr_proc_ctx_tbl.
+				head_free_offset_list[i], link) {
 				list_del(&ctx_off_entry->link);
 				kmem_cache_free(
 					ipa_ctx->hdr_proc_ctx_offset_cache,
 					ctx_off_entry);
 			}
 		}
+		ipa_ctx->hdr_proc_ctx_tbl.end = 0;
+		ipa_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
 	}
 
-	IPADBG("hdr_proc_tbl.end = %d\n", end);
-	if (user_rule) {
-		ipa_ctx->hdr_proc_ctx_tbl.end = end;
-		IPADBG("hdr_proc_tbl.end = %d\n", end);
+	/* commit the change to IPA-HW */
+	if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+		IPAERR_RL("fail to commit hdr\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa_ctx->lock);
+		return -EFAULT;
 	}
+
 	mutex_unlock(&ipa_ctx->lock);
-
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index c043bad..15be5d6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -327,6 +327,11 @@
 	size_t tmp;
 	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
+	if (!ipa_ctx->nat_mem.is_dev_init) {
+		IPAERR_RL("Nat table not initialized\n");
+		return -EPERM;
+	}
+
 	IPADBG("\n");
 	if (init->table_entries == 0) {
 		IPADBG("Table entries is zero\n");
@@ -576,6 +581,11 @@
 	int ret = 0;
 	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
+	if (!ipa_ctx->nat_mem.is_dev_init) {
+		IPAERR_RL("Nat table not initialized\n");
+		return -EPERM;
+	}
+
 	IPADBG("\n");
 	if (dma->entries <= 0) {
 		IPAERR_RL("Invalid number of commands %d\n",
@@ -762,6 +772,16 @@
 	int result;
 	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
+	if (!ipa_ctx->nat_mem.is_dev_init) {
+		IPAERR_RL("Nat table not initialized\n");
+		return -EPERM;
+	}
+
+	if (ipa_ctx->nat_mem.public_ip_addr) {
+		IPAERR_RL("Public IP addr not assigned and trying to delete\n");
+		return -EPERM;
+	}
+
 	IPADBG("\n");
 	if (ipa_ctx->nat_mem.is_tmp_mem) {
 		IPAERR("using temp memory during nat del\n");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
index 1f5d619..98f5574 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -281,7 +281,7 @@
 {
 }
 
-static int rmnet_ipa_reset_tethering_stats
+static inline int rmnet_ipa_reset_tethering_stats
 (
 	struct wan_ioctl_reset_tether_stats *data
 )
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 7710279..073409b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1445,6 +1445,15 @@
 			}
 		}
 	}
+
+	/* commit the change to IPA-HW */
+	if (ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v4) ||
+		ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v6)) {
+		IPAERR("fail to commit rt-rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa_ctx->lock);
+		return -EPERM;
+	}
 	mutex_unlock(&ipa_ctx->lock);
 
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index d76c208..681b009 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -72,6 +72,9 @@
 
 #define INVALID_EP_MAPPING_INDEX (-1)
 
+#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
+		(ARRAY_SIZE(__eq_array) <= (__eq_index))
+
 struct ipa_ep_confing {
 	bool valid;
 	int pipe_num;
@@ -119,6 +122,7 @@
 	[IPA_2_0][IPA_CLIENT_MHI_PROD]           = {true, 18},
 	[IPA_2_0][IPA_CLIENT_Q6_LAN_PROD]        = {true,  6},
 	[IPA_2_0][IPA_CLIENT_Q6_CMD_PROD]        = {true,  7},
+
 	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
 						 = {true, 12},
 	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
@@ -1409,6 +1413,11 @@
 		}
 
 		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+			if (IPA_IS_RAN_OUT_OF_EQ(ipa_ihl_ofst_meq32,
+							ihl_ofst_meq32)) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
 			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
 				IPAERR("ran out of ihl_meq32 eq\n");
 				return -EPERM;
@@ -1426,6 +1435,11 @@
 		}
 
 		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+			if (IPA_IS_RAN_OUT_OF_EQ(ipa_ihl_ofst_meq32,
+							ihl_ofst_meq32)) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
 			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
 				IPAERR("ran out of ihl_meq32 eq\n");
 				return -EPERM;
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index ae4dccf..ed78342 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -1,9 +1,19 @@
 obj-$(CONFIG_IPA3) += ipahal/
 
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
+
 obj-$(CONFIG_IPA3) += ipat.o
 ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
 	ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
 	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \
 	ipa_hw_stats.o ipa_pm.o ipa_wdi3_i.o
 
+ifdef CONFIG_X86
+ipat-y += ipa_dtsi_replacement.o
+endif
+
 obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 9eb3c35..b46da99 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -35,6 +35,7 @@
 #include <linux/time.h>
 #include <linux/hashtable.h>
 #include <linux/jhash.h>
+#include <linux/pci.h>
 #include <soc/qcom/subsystem_restart.h>
 #include <soc/qcom/smem.h>
 #include <soc/qcom/scm.h>
@@ -57,169 +58,14 @@
 #define CREATE_TRACE_POINTS
 #include "ipa_trace.h"
 
-#define IPA_GPIO_IN_QUERY_CLK_IDX 0
-#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
-#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
-
-#define IPA_SUMMING_THRESHOLD (0x10)
-#define IPA_PIPE_MEM_START_OFST (0x0)
-#define IPA_PIPE_MEM_SIZE (0x0)
-#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
-			       x == IPA_MODE_MOBILE_AP_WAN || \
-			       x == IPA_MODE_MOBILE_AP_WLAN)
-#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
-#define IPA_A5_MUX_HEADER_LENGTH (8)
-
-#define IPA_AGGR_MAX_STR_LENGTH (10)
-
-#define CLEANUP_TAG_PROCESS_TIMEOUT 500
-
-#define IPA_AGGR_STR_IN_BYTES(str) \
-	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
-
-#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
-
-#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
-
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
-
-#define IPA_MHI_GSI_EVENT_RING_ID_START 10
-#define IPA_MHI_GSI_EVENT_RING_ID_END 12
-
-#define IPA_SMEM_SIZE (8 * 1024)
-
-#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000
-#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000
-#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10
-
-/* round addresses for closes page per SMMU requirements */
-#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
-	do { \
-		(iova_p) = rounddown((iova), PAGE_SIZE); \
-		(pa_p) = rounddown((pa), PAGE_SIZE); \
-		(size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
-	} while (0)
-
-
-/* The relative location in /lib/firmware where the FWs will reside */
-#define IPA_FWS_PATH "ipa/ipa_fws.elf"
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "ipa_emulation_stubs.h"
+#endif
 
 #ifdef CONFIG_COMPAT
-#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_ADD_HDR, \
-					compat_uptr_t)
-#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_DEL_HDR, \
-					compat_uptr_t)
-#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_ADD_RT_RULE, \
-					compat_uptr_t)
-#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_DEL_RT_RULE, \
-					compat_uptr_t)
-#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_ADD_FLT_RULE, \
-					compat_uptr_t)
-#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_DEL_FLT_RULE, \
-					compat_uptr_t)
-#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_GET_RT_TBL, \
-				compat_uptr_t)
-#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_COPY_HDR, \
-				compat_uptr_t)
-#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_QUERY_INTF, \
-				compat_uptr_t)
-#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
-				compat_uptr_t)
-#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
-					compat_uptr_t)
-#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
-					compat_uptr_t)
-#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_GET_HDR, \
-				compat_uptr_t)
-#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_ALLOC_NAT_MEM, \
-				compat_uptr_t)
-#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_ALLOC_NAT_TABLE, \
-				compat_uptr_t)
-#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
-				compat_uptr_t)
-#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_V4_INIT_NAT, \
-				compat_uptr_t)
-#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_INIT_IPV6CT_TABLE, \
-				compat_uptr_t)
-#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_TABLE_DMA_CMD, \
-				compat_uptr_t)
-#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_V4_DEL_NAT, \
-				compat_uptr_t)
-#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_DEL_NAT_TABLE, \
-				compat_uptr_t)
-#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_DEL_IPV6CT_TABLE, \
-				compat_uptr_t)
-#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_NAT_MODIFY_PDN, \
-				compat_uptr_t)
-#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_GET_NAT_OFFSET, \
-				compat_uptr_t)
-#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_PULL_MSG, \
-				compat_uptr_t)
-#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_RM_ADD_DEPENDENCY, \
-				compat_uptr_t)
-#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_RM_DEL_DEPENDENCY, \
-				compat_uptr_t)
-#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_GENERATE_FLT_EQ, \
-				compat_uptr_t)
-#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
-				compat_uptr_t)
-#define IPA_IOC_WRITE_QMAPID32  _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_WRITE_QMAPID, \
-				compat_uptr_t)
-#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_MDFY_FLT_RULE, \
-				compat_uptr_t)
-#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
-				compat_uptr_t)
-#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
-				compat_uptr_t)
-#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
-					compat_uptr_t)
-#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_ADD_HDR_PROC_CTX, \
-				compat_uptr_t)
-#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_DEL_HDR_PROC_CTX, \
-				compat_uptr_t)
-#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_MDFY_RT_RULE, \
-				compat_uptr_t)
-
 /**
  * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
  * properties
@@ -243,8 +89,7 @@
 	compat_size_t size;
 	compat_off_t offset;
 };
-
-#endif
+#endif /* #ifdef CONFIG_COMPAT */
 
 #define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
 #define TZ_MEM_PROTECT_REGION_ID 0x10
@@ -284,6 +129,7 @@
 static struct clk *ipa3_clk;
 
 struct ipa3_context *ipa3_ctx;
+
 static struct {
 	bool present[IPA_SMMU_CB_MAX];
 	bool arm_smmu;
@@ -653,10 +499,15 @@
 		return;
 	}
 
-	if (type != ADD_VLAN_IFACE &&
-	    type != DEL_VLAN_IFACE &&
-	    type != ADD_L2TP_VLAN_MAPPING &&
-		type != DEL_L2TP_VLAN_MAPPING) {
+	switch (type) {
+	case ADD_VLAN_IFACE:
+	case DEL_VLAN_IFACE:
+	case ADD_L2TP_VLAN_MAPPING:
+	case DEL_L2TP_VLAN_MAPPING:
+	case ADD_BRIDGE_VLAN_MAPPING:
+	case DEL_BRIDGE_VLAN_MAPPING:
+		break;
+	default:
 		IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
 		return;
 	}
@@ -669,10 +520,17 @@
 	int retval;
 	struct ipa_ioc_vlan_iface_info *vlan_info;
 	struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
+	struct ipa_ioc_bridge_vlan_mapping_info *bridge_vlan_info;
 	struct ipa_msg_meta msg_meta;
+	void *buff;
 
-	if (msg_type == ADD_VLAN_IFACE ||
-		msg_type == DEL_VLAN_IFACE) {
+	IPADBG("type %d\n", msg_type);
+
+	memset(&msg_meta, 0, sizeof(msg_meta));
+	msg_meta.msg_type = msg_type;
+
+	if ((msg_type == ADD_VLAN_IFACE) ||
+		(msg_type == DEL_VLAN_IFACE)) {
 		vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
 			GFP_KERNEL);
 		if (!vlan_info) {
@@ -686,18 +544,10 @@
 			return -EFAULT;
 		}
 
-		memset(&msg_meta, 0, sizeof(msg_meta));
-		msg_meta.msg_type = msg_type;
 		msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
-		retval = ipa3_send_msg(&msg_meta, vlan_info,
-			ipa3_vlan_l2tp_msg_free_cb);
-		if (retval) {
-			IPAERR("ipa3_send_msg failed: %d\n", retval);
-			kfree(vlan_info);
-			return retval;
-		}
-	} else if (msg_type == ADD_L2TP_VLAN_MAPPING ||
-		msg_type == DEL_L2TP_VLAN_MAPPING) {
+		buff = vlan_info;
+	} else if ((msg_type == ADD_L2TP_VLAN_MAPPING) ||
+		(msg_type == DEL_L2TP_VLAN_MAPPING)) {
 		mapping_info = kzalloc(sizeof(struct
 			ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
 		if (!mapping_info) {
@@ -712,22 +562,46 @@
 			return -EFAULT;
 		}
 
-		memset(&msg_meta, 0, sizeof(msg_meta));
-		msg_meta.msg_type = msg_type;
 		msg_meta.msg_len = sizeof(struct
 			ipa_ioc_l2tp_vlan_mapping_info);
-		retval = ipa3_send_msg(&msg_meta, mapping_info,
-			ipa3_vlan_l2tp_msg_free_cb);
-		if (retval) {
-			IPAERR("ipa3_send_msg failed: %d\n", retval);
-			kfree(mapping_info);
-			return retval;
+		buff = mapping_info;
+	} else if ((msg_type == ADD_BRIDGE_VLAN_MAPPING) ||
+		(msg_type == DEL_BRIDGE_VLAN_MAPPING)) {
+		bridge_vlan_info = kzalloc(
+			sizeof(struct ipa_ioc_bridge_vlan_mapping_info),
+			GFP_KERNEL);
+		if (!bridge_vlan_info) {
+			IPAERR("no memory\n");
+			return -ENOMEM;
 		}
+
+		if (copy_from_user((u8 *)bridge_vlan_info,
+			(void __user *)usr_param,
+			sizeof(struct ipa_ioc_bridge_vlan_mapping_info))) {
+			kfree(bridge_vlan_info);
+			IPAERR("copy from user failed\n");
+			return -EFAULT;
+		}
+
+		msg_meta.msg_len = sizeof(struct
+			ipa_ioc_bridge_vlan_mapping_info);
+		buff = bridge_vlan_info;
 	} else {
 		IPAERR("Unexpected event\n");
 		return -EFAULT;
 	}
 
+	retval = ipa3_send_msg(&msg_meta, buff,
+		ipa3_vlan_l2tp_msg_free_cb);
+	if (retval) {
+		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+			retval,
+			msg_type);
+		kfree(buff);
+		return retval;
+	}
+	IPADBG("exit\n");
+
 	return 0;
 }
 
@@ -1880,7 +1754,18 @@
 			break;
 		}
 		break;
-
+	case IPA_IOC_ADD_BRIDGE_VLAN_MAPPING:
+		if (ipa3_send_vlan_l2tp_msg(arg, ADD_BRIDGE_VLAN_MAPPING)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_DEL_BRIDGE_VLAN_MAPPING:
+		if (ipa3_send_vlan_l2tp_msg(arg, DEL_BRIDGE_VLAN_MAPPING)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
 	case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
 		if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
 			retval = -EFAULT;
@@ -2587,9 +2472,12 @@
 		/* disable statuses for all modem controlled prod pipes */
 		if (IPA_CLIENT_IS_Q6_PROD(client_idx) ||
 			(ipa3_ctx->ep[ep_idx].valid &&
-			ipa3_ctx->ep[ep_idx].skip_ep_cfg)) {
+			ipa3_ctx->ep[ep_idx].skip_ep_cfg) ||
+			(ipa3_ctx->ep[ep_idx].client == IPA_CLIENT_APPS_WAN_PROD
+			&& ipa3_ctx->modem_cfg_emb_pipe_flt)) {
 			ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
 
+			ipa3_ctx->ep[ep_idx].status.status_en = false;
 			reg_write.skip_pipeline_clear = false;
 			reg_write.pipeline_clear_options =
 				IPAHAL_HPS_CLEAR;
@@ -2661,6 +2549,8 @@
 	 */
 	ipa3_q6_pipe_delay(false);
 
+	ipa3_set_usb_prod_pipe_delay();
+
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	IPADBG_LOW("Exit with success\n");
 }
@@ -2735,6 +2625,16 @@
 	u32 *ipa_sram_mmio;
 	unsigned long phys_addr;
 
+	IPADBG(
+	    "ipa_wrapper_base(0x%08X) ipa_reg_base_ofst(0x%08X) IPA_SRAM_DIRECT_ACCESS_n(0x%08X) smem_restricted_bytes(0x%08X) smem_sz(0x%08X)\n",
+	    ipa3_ctx->ipa_wrapper_base,
+	    ipa3_ctx->ctrl->ipa_reg_base_ofst,
+	    ipahal_get_reg_n_ofst(
+		IPA_SRAM_DIRECT_ACCESS_n,
+		ipa3_ctx->smem_restricted_bytes / 4),
+	    ipa3_ctx->smem_restricted_bytes,
+	    ipa3_ctx->smem_sz);
+
 	phys_addr = ipa3_ctx->ipa_wrapper_base +
 		ipa3_ctx->ctrl->ipa_reg_base_ofst +
 		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
@@ -3207,21 +3107,27 @@
 	}
 	IPADBG("Apps to IPA cmd pipe is connected\n");
 
+	IPADBG("Will initialize SRAM\n");
 	ipa3_ctx->ctrl->ipa_init_sram();
 	IPADBG("SRAM initialized\n");
 
+	IPADBG("Will initialize HDR\n");
 	ipa3_ctx->ctrl->ipa_init_hdr();
 	IPADBG("HDR initialized\n");
 
+	IPADBG("Will initialize V4 RT\n");
 	ipa3_ctx->ctrl->ipa_init_rt4();
 	IPADBG("V4 RT initialized\n");
 
+	IPADBG("Will initialize V6 RT\n");
 	ipa3_ctx->ctrl->ipa_init_rt6();
 	IPADBG("V6 RT initialized\n");
 
+	IPADBG("Will initialize V4 FLT\n");
 	ipa3_ctx->ctrl->ipa_init_flt4();
 	IPADBG("V4 FLT initialized\n");
 
+	IPADBG("Will initialize V6 FLT\n");
 	ipa3_ctx->ctrl->ipa_init_flt6();
 	IPADBG("V6 FLT initialized\n");
 
@@ -3947,16 +3853,18 @@
 {
 	u32 clk_rate;
 
-	if (!ipa3_ctx->enable_clock_scaling)
+	IPADBG_LOW("idx = %d\n", idx);
+
+	if (!ipa3_ctx->enable_clock_scaling) {
+		ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
 		return 0;
+	}
 
 	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
 		IPAERR("not supported in this mode\n");
 		return 0;
 	}
 
-	IPADBG_LOW("idx = %d\n", idx);
-
 	if (idx <= 0 || idx >= ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases) {
 		IPAERR("bad voltage\n");
 		return -EINVAL;
@@ -4374,7 +4282,7 @@
 		IPAERR("uC panic handler failed %d\n", res);
 
 	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) != 0)
-		ipahal_print_all_regs();
+		ipahal_print_all_regs(false);
 
 	return NOTIFY_DONE;
 }
@@ -4410,8 +4318,8 @@
 	int result;
 
 	result = gsi_configure_regs(ipa3_res.transport_mem_base,
-		ipa3_res.transport_mem_size,
-		ipa3_res.ipa_mem_base);
+				    ipa3_res.transport_mem_size,
+				    ipa3_res.ipa_mem_base);
 	if (result) {
 		IPAERR("Failed to configure GSI registers\n");
 		return -EINVAL;
@@ -4504,12 +4412,19 @@
 	if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
 		ipa3_proxy_clk_vote();
 
-	/* SMMU was already attached if used, safe to do allocations */
-	if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
-		ipa3_ctx->pdev)) {
-		IPAERR("fail to init ipahal\n");
-		result = -EFAULT;
-		goto fail_ipahal;
+	/*
+	 * SMMU was already attached if used, safe to do allocations
+	 *
+	 * NOTE WELL: On an emulation system, this allocation is done
+	 *            in ipa3_pre_init()
+	 */
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
+		if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
+				ipa3_ctx->pdev)) {
+			IPAERR("fail to init ipahal\n");
+			result = -EFAULT;
+			goto fail_ipahal;
+		}
 	}
 
 	result = ipa3_init_hw();
@@ -4644,9 +4559,18 @@
 	gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
 	gsi_props.ee = resource_p->ee;
 	gsi_props.intr = GSI_INTR_IRQ;
-	gsi_props.irq = resource_p->transport_irq;
 	gsi_props.phys_addr = resource_p->transport_mem_base;
 	gsi_props.size = resource_p->transport_mem_size;
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		gsi_props.irq = resource_p->emulator_irq;
+		gsi_props.emulator_intcntrlr_client_isr = ipa3_get_isr();
+		gsi_props.emulator_intcntrlr_addr =
+		    resource_p->emulator_intcntrlr_mem_base;
+		gsi_props.emulator_intcntrlr_size =
+		    resource_p->emulator_intcntrlr_mem_size;
+	} else {
+		gsi_props.irq = resource_p->transport_irq;
+	}
 	gsi_props.notify_cb = ipa_gsi_notify_cb;
 	gsi_props.req_clk_cb = NULL;
 	gsi_props.rel_clk_cb = NULL;
@@ -4714,12 +4638,12 @@
 
 	ipa3_register_panic_hdlr();
 
+	ipa3_debugfs_init();
+
 	mutex_lock(&ipa3_ctx->lock);
 	ipa3_ctx->ipa_initialization_complete = true;
 	mutex_unlock(&ipa3_ctx->lock);
 
-	ipa3_debugfs_init();
-
 	ipa3_trigger_ipa_ready_cbs();
 	complete_all(&ipa3_ctx->init_completion_obj);
 	pr_info("IPA driver initialization was successful.\n");
@@ -4732,15 +4656,16 @@
 	gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
 fail_register_device:
 	ipa3_destroy_flt_tbl_idrs();
-	ipa3_proxy_clk_unvote();
 fail_allok_pkt_init:
 	ipa3_nat_ipv6ct_destroy_devices();
 fail_nat_ipv6ct_init_dev:
 	ipa3_free_dma_task_for_gsi();
 fail_dma_task:
 fail_init_hw:
-	ipahal_destroy();
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION)
+		ipahal_destroy();
 fail_ipahal:
+	ipa3_proxy_clk_unvote();
 
 	return result;
 }
@@ -4749,10 +4674,24 @@
 {
 	int result;
 	const struct firmware *fw;
+	const char *path = IPA_FWS_PATH;
 
-	IPADBG("Manual FW loading process initiated\n");
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		switch (ipa3_get_emulation_type()) {
+		case IPA_HW_v3_5_1:
+			path = IPA_FWS_PATH_3_5_1;
+			break;
+		case IPA_HW_v4_0:
+			path = IPA_FWS_PATH_4_0;
+			break;
+		default:
+			break;
+		}
+	}
 
-	result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->cdev.dev);
+	IPADBG("Manual FW loading (%s) process initiated\n", path);
+
+	result = request_firmware(&fw, path, ipa3_ctx->cdev.dev);
 	if (result < 0) {
 		IPAERR("request_firmware failed, error %d\n", result);
 		return result;
@@ -4764,7 +4703,13 @@
 
 	IPADBG("FWs are available for loading\n");
 
-	result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		result = emulator_load_fws(fw,
+					   ipa3_res.transport_mem_base,
+					   ipa3_res.transport_mem_size);
+	} else {
+		result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
+	}
 	if (result) {
 		IPAERR("Manual IPA FWs loading has failed\n");
 		release_firmware(fw);
@@ -4783,6 +4728,7 @@
 	release_firmware(fw);
 
 	IPADBG("Manual FW loading process is complete\n");
+
 	return 0;
 }
 
@@ -4816,7 +4762,8 @@
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
-	if (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5))
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION &&
+	    (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)))
 		result = ipa3_pil_load_ipa_fws();
 	else
 		result = ipa3_manual_load_ipa_fws();
@@ -5094,6 +5041,7 @@
 	ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
 	ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
 	ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm;
+	ipa3_ctx->wdi_over_pcie = resource_p->wdi_over_pcie;
 	ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
 	ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config;
 	ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0];
@@ -5153,7 +5101,8 @@
 		goto fail_init_mem_partition;
 	}
 
-	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL) {
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL &&
+	    ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
 		ipa3_ctx->ctrl->msm_bus_data_ptr =
 			msm_bus_cl_get_pdata(ipa3_ctx->master_pdev);
 		if (ipa3_ctx->ctrl->msm_bus_data_ptr == NULL) {
@@ -5203,6 +5152,28 @@
 		goto fail_remap;
 	}
 
+	IPADBG(
+	    "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%pK) with len (0x%x)\n",
+	    resource_p->ipa_mem_base,
+	    ipa3_ctx->ctrl->ipa_reg_base_ofst,
+	    resource_p->ipa_mem_base + ipa3_ctx->ctrl->ipa_reg_base_ofst,
+	    ipa3_ctx->mmio,
+	    resource_p->ipa_mem_size);
+
+	/*
+	 * Emulation requires ipahal be initialized early...for FW
+	 * download, hence...
+	 */
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		if (ipahal_init(ipa3_ctx->ipa_hw_type,
+				ipa3_ctx->mmio,
+				&(ipa3_ctx->master_pdev->dev))) {
+			IPAERR("fail to init ipahal\n");
+			result = -EFAULT;
+			goto fail_ipahal_init;
+		}
+	}
+
 	mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
 
 	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
@@ -5411,10 +5382,13 @@
 	 * We can't register the GSI driver yet, as it expects
 	 * the GSI FW to be up and running before the registration.
 	 *
-	 * For IPA3.0, the GSI configuration is done by the GSI driver.
+	 * For IPA3.0 and the emulation system, the GSI configuration
+	 * is done by the GSI driver.
+	 *
 	 * For IPA3.1 (and on), the GSI configuration is done by TZ.
 	 */
-	if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
+	if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0 ||
+	    ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
 		result = ipa3_gsi_pre_fw_load_init();
 		if (result) {
 			IPAERR("gsi pre FW loading config failed\n");
@@ -5492,6 +5466,9 @@
 fail_create_transport_wq:
 	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
 fail_init_hw:
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
+		ipahal_destroy();
+fail_ipahal_init:
 	iounmap(ipa3_ctx->mmio);
 fail_remap:
 	ipa3_disable_clks();
@@ -5804,6 +5781,7 @@
 			&ipa_drv_res->ee);
 	if (result)
 		ipa_drv_res->ee = 0;
+	IPADBG(":ee = %u\n", ipa_drv_res->ee);
 
 	ipa_drv_res->apply_rg10_wa =
 		of_property_read_bool(pdev->dev.of_node,
@@ -5816,7 +5794,7 @@
 		of_property_read_bool(pdev->dev.of_node,
 		"qcom,do-not-use-ch-gsi-20");
 	IPADBG(": GSI CH 20 WA is = %s\n",
-		ipa_drv_res->apply_rg10_wa
+		ipa_drv_res->gsi_ch20_wa
 		? "Needed" : "Not needed");
 
 	elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
@@ -5895,6 +5873,32 @@
 		return result;
 	}
 
+	ipa_drv_res->wdi_over_pcie =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,wlan-ce-db-over-pcie");
+	IPADBG("Is wdi_over_pcie ? (%s)\n",
+				ipa3_ctx->wdi_over_pcie ? "Yes":"No");
+
+	/*
+	 * If we're on emulator, get its interrupt controller's mem
+	 * start and size
+	 */
+	if (ipa_drv_res->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		resource = platform_get_resource_byname(
+		    pdev, IORESOURCE_MEM, "intctrl-base");
+		if (!resource) {
+			IPAERR(":Can't find intctrl-base resource\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->emulator_intcntrlr_mem_base =
+		    resource->start;
+		ipa_drv_res->emulator_intcntrlr_mem_size =
+		    resource_size(resource);
+		IPADBG(":using intctrl-base at 0x%x of size 0x%x\n",
+		       ipa_drv_res->emulator_intcntrlr_mem_base,
+		       ipa_drv_res->emulator_intcntrlr_mem_size);
+	}
+
 	return 0;
 }
 
@@ -6276,7 +6280,7 @@
 			iova_p, &pa_p, size_p);
 		ipa3_iommu_map(cb->mapping->domain,
 			iova_p, pa_p, size_p,
-			IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+			IOMMU_READ | IOMMU_WRITE);
 	}
 
 
@@ -6689,5 +6693,216 @@
 	return 0;
 }
 
+/**************************************************************
+ *            PCIe Version
+ *************************************************************/
+
+int ipa3_pci_drv_probe(
+	struct pci_dev            *pci_dev,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	int result;
+	struct ipa3_plat_drv_res *ipa_drv_res;
+	u32 bar0_offset;
+	u32 mem_start;
+	u32 mem_end;
+	uint32_t bits;
+	uint32_t ipa_start, gsi_start, intctrl_start;
+	struct device *dev;
+	static struct platform_device platform_dev;
+
+	if (!pci_dev || !api_ctrl || !pdrv_match) {
+		IPAERR(
+		    "Bad arg: pci_dev (%pK) and/or api_ctrl (%pK) and/or pdrv_match (%pK)\n",
+		    pci_dev, api_ctrl, pdrv_match);
+		return -EOPNOTSUPP;
+	}
+
+	dev = &(pci_dev->dev);
+
+	IPADBG("IPA PCI driver probing started\n");
+
+	/*
+	 * Follow PCI driver flow here.
+	 * pci_enable_device:  Enables device and assigns resources
+	 * pci_request_region:  Makes BAR0 address region usable
+	 */
+	result = pci_enable_device(pci_dev);
+	if (result < 0) {
+		IPAERR("pci_enable_device() failed\n");
+		return -EOPNOTSUPP;
+	}
+
+	result = pci_request_region(pci_dev, 0, "IPA Memory");
+	if (result < 0) {
+		IPAERR("pci_request_region() failed\n");
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * When in the PCI/emulation environment, &platform_dev is
+	 * passed to get_ipa_dts_configuration(), but is unused, since
+	 * all usages of it in the function are replaced by CPP
+	 * relative to definitions in ipa_emulation_stubs.h.  Passing
+	 * &platform_dev makes code validity tools happy.
+	 */
+	if (get_ipa_dts_configuration(&platform_dev, &ipa3_res) != 0) {
+		IPAERR("get_ipa_dts_configuration() failed\n");
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	ipa_drv_res = &ipa3_res;
+
+	result =
+		of_property_read_u32(NULL, "emulator-bar0-offset",
+				     &bar0_offset);
+	if (result) {
+		IPAERR(":get resource failed for emulator-bar0-offset!\n");
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -ENODEV;
+	}
+	IPADBG(":using emulator-bar0-offset 0x%08X\n", bar0_offset);
+
+	ipa_start     = ipa_drv_res->ipa_mem_base;
+	gsi_start     = ipa_drv_res->transport_mem_base;
+	intctrl_start = ipa_drv_res->emulator_intcntrlr_mem_base;
+
+	/*
+	 * Where will we be inerrupted at?
+	 */
+	ipa_drv_res->emulator_irq = pci_dev->irq;
+	IPADBG(
+	    "EMULATION PCI_INTERRUPT_PIN(%u)\n",
+	    ipa_drv_res->emulator_irq);
+
+	/*
+	 * Set the ipa_mem_base to the PCI base address of BAR0
+	 */
+	mem_start = pci_resource_start(pci_dev, 0);
+	mem_end   = pci_resource_end(pci_dev, 0);
+
+	IPADBG("PCI START                = 0x%x\n", mem_start);
+	IPADBG("PCI END                  = 0x%x\n", mem_end);
+
+	ipa_drv_res->ipa_mem_base = mem_start + bar0_offset;
+
+	smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+	smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
+	ipa_drv_res->transport_mem_base =
+	    ipa_drv_res->ipa_mem_base + (gsi_start - ipa_start);
+
+	ipa_drv_res->emulator_intcntrlr_mem_base =
+	    ipa_drv_res->ipa_mem_base + (intctrl_start - ipa_start);
+
+	IPADBG("ipa_mem_base                = 0x%x\n",
+	       ipa_drv_res->ipa_mem_base);
+	IPADBG("ipa_mem_size                = 0x%x\n",
+	       ipa_drv_res->ipa_mem_size);
+
+	IPADBG("transport_mem_base          = 0x%x\n",
+	       ipa_drv_res->transport_mem_base);
+	IPADBG("transport_mem_size          = 0x%x\n",
+	       ipa_drv_res->transport_mem_size);
+
+	IPADBG("emulator_intcntrlr_mem_base = 0x%x\n",
+	       ipa_drv_res->emulator_intcntrlr_mem_base);
+	IPADBG("emulator_intcntrlr_mem_size = 0x%x\n",
+	       ipa_drv_res->emulator_intcntrlr_mem_size);
+
+	result = ipa3_bind_api_controller(ipa_drv_res->ipa_hw_type, api_ctrl);
+	if (result != 0) {
+		IPAERR("ipa3_bind_api_controller() failed\n");
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return result;
+	}
+
+	bits = (ipa_drv_res->use_64_bit_dma_mask) ? 64 : 32;
+
+	if (dma_set_mask(dev, DMA_BIT_MASK(bits)) != 0) {
+		IPAERR("dma_set_mask(%pK, %u) failed\n", dev, bits);
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	if (dma_set_coherent_mask(dev, DMA_BIT_MASK(bits)) != 0) {
+		IPAERR("dma_set_coherent_mask(%pK, %u) failed\n", dev, bits);
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	pci_set_master(pci_dev);
+
+	memset(&platform_dev, 0, sizeof(platform_dev));
+	platform_dev.dev = *dev;
+
+	/* Proceed to real initialization */
+	result = ipa3_pre_init(&ipa3_res, &platform_dev);
+	if (result) {
+		IPAERR("ipa3_init failed\n");
+		pci_clear_master(pci_dev);
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return result;
+	}
+
+	return result;
+}
+
+/*
+ * The following returns transport register memory location and
+ * size...
+ */
+int ipa3_get_transport_info(
+	phys_addr_t *phys_addr_ptr,
+	unsigned long *size_ptr)
+{
+	if (!phys_addr_ptr || !size_ptr) {
+		IPAERR("Bad arg: phys_addr_ptr(%pK) and/or size_ptr(%pK)\n",
+		       phys_addr_ptr, size_ptr);
+		return -EINVAL;
+	}
+
+	*phys_addr_ptr = ipa3_res.transport_mem_base;
+	*size_ptr      = ipa3_res.transport_mem_size;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_get_transport_info);
+
+static uint emulation_type = IPA_HW_v4_0;
+
+/*
+ * The following returns emulation type...
+ */
+uint ipa3_get_emulation_type(void)
+{
+	return emulation_type;
+}
+
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("IPA HW device driver");
+
+/*
+ * Module parameter. Invoke as follows:
+ *     insmod ipat.ko emulation_type=[13|14|...|N]
+ * Examples:
+ *   insmod ipat.ko emulation_type=13 # for IPA 3.5.1
+ *   insmod ipat.ko emulation_type=14 # for IPA 4.0
+ *
+ * NOTE: The emulation_type values need to come from: enum ipa_hw_type
+ *
+ */
+
+module_param(emulation_type, uint, 0000);
+MODULE_PARM_DESC(
+	emulation_type,
+	"IPA emulation type (Use 13 for IPA 3.5.1, 14 for IPA 4.0)");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index fe39440..5bcd49e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -635,6 +635,69 @@
 	return 0;
 }
 
+void ipa3_register_lock_unlock_callback(int (*client_cb)(bool is_lock),
+						u32 ipa_ep_idx)
+{
+	struct ipa3_ep_context *ep;
+
+	IPADBG("entry\n");
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return;
+	}
+
+	if (client_cb == NULL) {
+		IPAERR("Bad Param");
+		return;
+	}
+
+	ep->client_lock_unlock = client_cb;
+	IPADBG("exit\n");
+}
+
+void ipa3_deregister_lock_unlock_callback(u32 ipa_ep_idx)
+{
+	struct ipa3_ep_context *ep;
+
+	IPADBG("entry\n");
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return;
+	}
+
+	if (ep->client_lock_unlock == NULL) {
+		IPAERR("client_lock_unlock is already NULL");
+		return;
+	}
+
+	ep->client_lock_unlock = NULL;
+	IPADBG("exit\n");
+}
+
+static void client_lock_unlock_cb(u32 ipa_ep_idx, bool is_lock)
+{
+	struct ipa3_ep_context *ep;
+
+	IPADBG("entry\n");
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return;
+	}
+
+	if (ep->client_lock_unlock)
+		ep->client_lock_unlock(is_lock);
+
+	IPADBG("exit\n");
+}
 
 int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
 			     struct ipa_req_chan_out_params *out_params)
@@ -1259,6 +1322,46 @@
 	return result;
 }
 
+/*
+ * Set USB PROD pipe delay for MBIM/RMNET config
+ * Clocks, should be voted before calling this API
+ * locks should be taken before calling this API
+ */
+
+void ipa3_set_usb_prod_pipe_delay(void)
+{
+	int result;
+	int pipe_idx;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_ctrl;
+
+	memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_ctrl.ipa_ep_delay = true;
+
+
+	pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+
+	if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
+		IPAERR("client (%d) not valid\n", IPA_CLIENT_USB_PROD);
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[pipe_idx];
+
+	/* Setting delay on USB_PROD with skip_ep_cfg */
+	client_lock_unlock_cb(pipe_idx, true);
+	if (ep->valid && ep->skip_ep_cfg) {
+		ep->ep_delay_set = ep_ctrl.ipa_ep_delay;
+		result = ipa3_cfg_ep_ctrl(pipe_idx, &ep_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed result=%d\n",
+				pipe_idx, result);
+		else
+			IPADBG("client (ep: %d) success\n", pipe_idx);
+	}
+	client_lock_unlock_cb(pipe_idx, false);
+}
+
 void ipa3_xdci_ep_delay_rm(u32 clnt_hdl)
 {
 	struct ipa3_ep_context *ep;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index ee9c49c..10abdcf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -73,6 +73,8 @@
 	__stringify(DEL_L2TP_VLAN_MAPPING),
 	__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
 	__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
+	__stringify(ADD_BRIDGE_VLAN_MAPPING),
+	__stringify(DEL_BRIDGE_VLAN_MAPPING),
 };
 
 const char *ipa3_hdr_l2_type_name[] = {
@@ -372,6 +374,8 @@
 
 	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
 			link) {
+		if (entry->cookie != IPA_HDR_COOKIE)
+			continue;
 		nbytes = scnprintf(
 			dbg_buff,
 			IPA_MAX_MSG_LEN,
@@ -556,6 +560,12 @@
 	if (attrib->tc_eq_present)
 		pr_err("tc:%d ", attrib->tc_eq);
 
+	if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) {
+		IPAERR_RL("num_offset_meq_128  Max %d passed value %d\n",
+		IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128);
+		return -EPERM;
+	}
+
 	for (i = 0; i < attrib->num_offset_meq_128; i++) {
 		for (j = 0; j < 16; j++) {
 			addr[j] = attrib->offset_meq_128[i].value[j];
@@ -567,6 +577,12 @@
 			mask, addr);
 	}
 
+	if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) {
+		IPAERR_RL("num_offset_meq_32  Max %d passed value %d\n",
+		IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32);
+		return -EPERM;
+	}
+
 	for (i = 0; i < attrib->num_offset_meq_32; i++)
 		pr_err(
 			   "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
@@ -574,6 +590,12 @@
 			   attrib->offset_meq_32[i].mask,
 			   attrib->offset_meq_32[i].value);
 
+	if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) {
+		IPAERR_RL("num_ihl_offset_meq_32  Max %d passed value %d\n",
+		IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32);
+		return -EPERM;
+	}
+
 	for (i = 0; i < attrib->num_ihl_offset_meq_32; i++)
 		pr_err(
 			"(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
@@ -588,6 +610,14 @@
 			attrib->metadata_meq32.mask,
 			attrib->metadata_meq32.value);
 
+	if (attrib->num_ihl_offset_range_16 >
+			IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) {
+		IPAERR_RL("num_ihl_offset_range_16  Max %d passed value %d\n",
+			IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS,
+			attrib->num_ihl_offset_range_16);
+		return -EPERM;
+	}
+
 	for (i = 0; i < attrib->num_ihl_offset_range_16; i++)
 		pr_err(
 			   "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
@@ -780,7 +810,11 @@
 			pr_err("rule_id:%u prio:%u retain_hdr:%u ",
 				rules[rl].id, rules[rl].priority,
 				rules[rl].retain_hdr);
-			ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+			res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+			if (res) {
+				IPAERR_RL("failed read attrib eq\n");
+				goto bail;
+			}
 		}
 
 		pr_err("=== Routing Table %d = Non-Hashable Rules ===\n", tbl);
@@ -811,7 +845,11 @@
 			pr_err("rule_id:%u prio:%u retain_hdr:%u\n",
 				rules[rl].id, rules[rl].priority,
 				rules[rl].retain_hdr);
-			ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+			res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+			if (res) {
+				IPAERR_RL("failed read attrib eq\n");
+				goto bail;
+			}
 		}
 		pr_err("\n");
 	}
@@ -885,6 +923,7 @@
 	u32 rt_tbl_idx;
 	u32 bitmap;
 	bool eq;
+	int res = 0;
 
 	mutex_lock(&ipa3_ctx->lock);
 
@@ -894,6 +933,8 @@
 		tbl = &ipa3_ctx->flt_tbl[j][ip];
 		i = 0;
 		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			if (entry->cookie != IPA_FLT_COOKIE)
+				continue;
 			if (entry->rule.eq_attrib_type) {
 				rt_tbl_idx = entry->rule.rt_tbl_idx;
 				bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
@@ -919,18 +960,23 @@
 				pr_err("pdn index %d, set metadata %d ",
 					entry->rule.pdn_idx,
 					entry->rule.set_metadata);
-			if (eq)
-				ipa3_attrib_dump_eq(
-					&entry->rule.eq_attrib);
-			else
+			if (eq) {
+				res = ipa3_attrib_dump_eq(
+						&entry->rule.eq_attrib);
+				if (res) {
+					IPAERR_RL("failed read attrib eq\n");
+					goto bail;
+				}
+			} else
 				ipa3_attrib_dump(
 					&entry->rule.attrib, ip);
 			i++;
 		}
 	}
+bail:
 	mutex_unlock(&ipa3_ctx->lock);
 
-	return 0;
+	return res;
 }
 
 static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
@@ -985,7 +1031,11 @@
 				pr_err("pdn: %u, set_metadata: %u ",
 					rules[rl].rule.pdn_idx,
 					rules[rl].rule.set_metadata);
-			ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+			res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+			if (res) {
+				IPAERR_RL("failed read attrib eq\n");
+				goto bail;
+			}
 		}
 
 		pr_err("=== Filtering Table ep:%d = Non-Hashable Rules ===\n",
@@ -1013,7 +1063,11 @@
 				pr_err("pdn: %u, set_metadata: %u ",
 					rules[rl].rule.pdn_idx,
 					rules[rl].rule.set_metadata);
-			ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+			res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+			if (res) {
+				IPAERR_RL("failed read attrib eq\n");
+				goto bail;
+			}
 		}
 		pr_err("\n");
 	}
@@ -1898,7 +1952,7 @@
 		size_t count, loff_t *ppos)
 {
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	ipahal_print_all_regs();
+	ipahal_print_all_regs(true);
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 
 	return 0;
@@ -2239,6 +2293,13 @@
 		goto fail;
 	}
 
+	file = debugfs_create_u32("clk_rate", IPA_READ_ONLY_MODE,
+		dent, &ipa3_ctx->curr_ipa_clk_rate);
+	if (!file) {
+		IPAERR("could not create clk_rate file\n");
+		goto fail;
+	}
+
 	ipa_debugfs_init_stats(dent);
 
 	return;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 9a0f44a..7603152 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -35,6 +35,9 @@
 #define IPA_GENERIC_AGGR_TIME_LIMIT 1
 #define IPA_GENERIC_AGGR_PKT_LIMIT 0
 
+#define IPA_GSB_AGGR_BYTE_LIMIT 14
+#define IPA_GSB_RX_BUFF_BASE_SZ 16384
+
 #define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
 #define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
 		(X) + NET_SKB_PAD) +\
@@ -646,7 +649,6 @@
 	atomic_set(&comp->cnt, 2);
 
 	sys = ipa3_ctx->ep[ep_idx].sys;
-	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
 	if (num_desc == 1) {
 		if (descr->callback || descr->user1)
@@ -685,7 +687,6 @@
 		kfree(comp);
 
 bail:
-	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	return result;
 }
 
@@ -1063,6 +1064,13 @@
 		goto fail_gen2;
 	}
 
+	result = gsi_start_channel(ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_start_channel failed res=%d ep=%d.\n", result,
+			ipa_ep_idx);
+		goto fail_gen2;
+	}
+
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
 
@@ -1112,11 +1120,6 @@
 		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
 
 	ipa3_disable_data_path(clnt_hdl);
-	if (ep->napi_enabled) {
-		do {
-			usleep_range(95, 105);
-		} while (atomic_read(&ep->sys->curr_polling_state));
-	}
 
 	if (IPA_CLIENT_IS_PROD(ep->client)) {
 		do {
@@ -1130,9 +1133,6 @@
 		} while (1);
 	}
 
-	if (IPA_CLIENT_IS_CONS(ep->client))
-		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
-	flush_workqueue(ep->sys->wq);
 	/* channel stop might fail on timeout if IPA is busy */
 	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
 		result = ipa3_stop_gsi_channel(clnt_hdl);
@@ -1140,7 +1140,7 @@
 			break;
 
 		if (result != -GSI_STATUS_AGAIN &&
-		    result != -GSI_STATUS_TIMED_OUT)
+			result != -GSI_STATUS_TIMED_OUT)
 			break;
 	}
 
@@ -1149,6 +1149,17 @@
 		ipa_assert();
 		return result;
 	}
+
+	if (ep->napi_enabled) {
+		do {
+			usleep_range(95, 105);
+		} while (atomic_read(&ep->sys->curr_polling_state));
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+	flush_workqueue(ep->sys->wq);
+
 	result = ipa3_reset_gsi_channel(clnt_hdl);
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("Failed to reset chan: %d.\n", result);
@@ -2935,7 +2946,6 @@
 			INIT_DELAYED_WORK(&sys->replenish_rx_work,
 				ipa3_replenish_rx_work_func);
 			atomic_set(&sys->curr_polling_state, 0);
-			sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
 			sys->rx_pool_sz = in->desc_fifo_sz /
 				IPA_FIFO_ELEMENT_SIZE - 1;
 			if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
@@ -2943,8 +2953,23 @@
 			sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
 			sys->get_skb = ipa3_get_skb_ipa_rx;
 			sys->free_skb = ipa3_free_skb_rx;
-			sys->free_rx_wrapper = ipa3_free_rx_wrapper;
-			sys->repl_hdlr = ipa3_replenish_rx_cache;
+			/* recycle skb for GSB use case */
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+				sys->free_rx_wrapper =
+					ipa3_free_rx_wrapper;
+				sys->repl_hdlr =
+					ipa3_replenish_rx_cache;
+				/* Overwrite buffer size & aggr limit for GSB */
+				sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+					IPA_GSB_RX_BUFF_BASE_SZ);
+				in->ipa_ep_cfg.aggr.aggr_byte_limit =
+					IPA_GSB_AGGR_BYTE_LIMIT;
+			} else {
+				sys->free_rx_wrapper =
+					ipa3_free_rx_wrapper;
+				sys->repl_hdlr = ipa3_replenish_rx_cache;
+				sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
+			}
 		} else if (in->client ==
 				IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
 			IPADBG("assigning policy to client:%d",
@@ -3683,8 +3708,13 @@
 	ep->gsi_mem_info.chan_ring_base_vaddr =
 		gsi_channel_props.ring_base_vaddr;
 
-	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
 	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_channel_props.prefetch_mode =
+		ipa_get_ep_prefetch_mode(ep->client);
 	if (ep->client == IPA_CLIENT_APPS_CMD_PROD)
 		gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT;
 	else
@@ -3718,15 +3748,11 @@
 		goto fail_write_channel_scratch;
 	}
 
-	result = gsi_start_channel(ep->gsi_chan_hdl);
-	if (result != GSI_STATUS_SUCCESS)
-		goto fail_start_channel;
 	if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS)
 		gsi_config_channel_mode(ep->gsi_chan_hdl,
 				GSI_CHAN_MODE_POLL);
 	return 0;
 
-fail_start_channel:
 fail_write_channel_scratch:
 	if (gsi_dealloc_channel(ep->gsi_chan_hdl)
 		!= GSI_STATUS_SUCCESS) {
@@ -3923,7 +3949,12 @@
 		dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
 		&dma_addr, 0);
 	gsi_channel_props.ring_base_addr = dma_addr;
-	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+
 	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
 	gsi_channel_props.low_weight = 1;
 	gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dtsi_replacement.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dtsi_replacement.c
new file mode 100644
index 0000000..5fe2294
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dtsi_replacement.c
@@ -0,0 +1,765 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/msm_ipa.h>
+#include "ipa_i.h"
+#include "ipa_emulation_stubs.h"
+
+# undef strsame
+# define strsame(x, y) \
+	(!strcmp((x), (y)))
+
+/*
+ * The following enum values used to index tables below.
+ */
+enum dtsi_index_e {
+	DTSI_INDEX_3_5_1 = 0,
+	DTSI_INDEX_4_0   = 1,
+};
+
+struct dtsi_replacement_u32 {
+	char *key;
+	u32 value;
+};
+
+struct dtsi_replacement_u32_table {
+	struct dtsi_replacement_u32 *p_table;
+	u32 num_entries;
+};
+
+struct dtsi_replacement_bool {
+	char *key;
+	bool value;
+};
+
+struct dtsi_replacement_bool_table {
+	struct dtsi_replacement_bool *p_table;
+	u32 num_entries;
+};
+
+struct dtsi_replacement_u32_array {
+	char *key;
+	u32 *p_value;
+	u32 num_elements;
+};
+
+struct dtsi_replacement_u32_array_table {
+	struct dtsi_replacement_u32_array *p_table;
+	u32 num_entries;
+};
+
+struct dtsi_replacement_resource_table {
+	struct resource *p_table;
+	u32 num_entries;
+};
+
+/*
+ * Any of the data below with _4_0 in the name represent data taken
+ * from the 4.0 dtsi file.
+ *
+ * Any of the data below with _3_5_1 in the name represent data taken
+ * from the 3.5.1 dtsi file.
+ */
+static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_0[] = {
+	{"qcom,use-ipa-tethering-bridge",       true},
+	{"qcom,modem-cfg-emb-pipe-flt",         true},
+	{"qcom,ipa-wdi2",                       true},
+	{"qcom,use-64-bit-dma-mask",            false},
+	{"qcom,bandwidth-vote-for-ipa",         false},
+	{"qcom,skip-uc-pipe-reset",             false},
+	{"qcom,tethered-flow-control",          true},
+	{"qcom,use-rg10-limitation-mitigation", false},
+	{"qcom,do-not-use-ch-gsi-20",           false},
+	{"qcom,use-ipa-pm",                     false},
+};
+
+static struct dtsi_replacement_bool ipa3_plat_drv_bool_3_5_1[] = {
+	{"qcom,use-ipa-tethering-bridge",       true},
+	{"qcom,modem-cfg-emb-pipe-flt",         true},
+	{"qcom,ipa-wdi2",                       true},
+	{"qcom,use-64-bit-dma-mask",            false},
+	{"qcom,bandwidth-vote-for-ipa",         true},
+	{"qcom,skip-uc-pipe-reset",             false},
+	{"qcom,tethered-flow-control",          false},
+	{"qcom,use-rg10-limitation-mitigation", false},
+	{"qcom,do-not-use-ch-gsi-20",           false},
+	{"qcom,use-ipa-pm",                     false},
+};
+
+static struct dtsi_replacement_bool_table
+ipa3_plat_drv_bool_table[] = {
+	{ ipa3_plat_drv_bool_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_bool_3_5_1) },
+	{ ipa3_plat_drv_bool_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_bool_4_0) },
+};
+
+static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_0[] = {
+	{"qcom,ipa-hw-ver",                     IPA_HW_v4_0},
+	{"qcom,ipa-hw-mode",                    3},
+	{"qcom,wan-rx-ring-size",               192},
+	{"qcom,lan-rx-ring-size",               192},
+	{"qcom,ee",                             0},
+	{"emulator-bar0-offset",                0x01C00000},
+};
+
+static struct dtsi_replacement_u32 ipa3_plat_drv_u32_3_5_1[] = {
+	{"qcom,ipa-hw-ver",                     IPA_HW_v3_5_1},
+	{"qcom,ipa-hw-mode",                    3},
+	{"qcom,wan-rx-ring-size",               192},
+	{"qcom,lan-rx-ring-size",               192},
+	{"qcom,ee",                             0},
+	{"emulator-bar0-offset",                0x01C00000},
+};
+
+static struct dtsi_replacement_u32_table ipa3_plat_drv_u32_table[] = {
+	{ ipa3_plat_drv_u32_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_3_5_1) },
+	{ ipa3_plat_drv_u32_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_4_0) },
+};
+
+static u32 mhi_event_ring_id_limits_array_4_0[] = {
+	9, 10
+};
+
+static u32 mhi_event_ring_id_limits_array_3_5_1[] = {
+	IPA_MHI_GSI_EVENT_RING_ID_START, IPA_MHI_GSI_EVENT_RING_ID_END
+};
+
+static u32 ipa_tz_unlock_reg_array_4_0[] = {
+	0x04043583c, 0x00001000
+};
+
+static u32 ipa_tz_unlock_reg_array_3_5_1[] = {
+	0x04043583c, 0x00001000
+};
+
+static u32 ipa_ram_mmap_array_4_0[] = {
+	0x00000280, 0x00000000, 0x00000000, 0x00000288, 0x00000078,
+	0x00004000, 0x00000308, 0x00000078, 0x00004000, 0x00000388,
+	0x00000078, 0x00004000, 0x00000408, 0x00000078, 0x00004000,
+	0x0000000F, 0x00000000, 0x00000007, 0x00000008, 0x0000000E,
+	0x00000488, 0x00000078, 0x00004000, 0x00000508, 0x00000078,
+	0x00004000, 0x0000000F, 0x00000000, 0x00000007, 0x00000008,
+	0x0000000E, 0x00000588, 0x00000078, 0x00004000, 0x00000608,
+	0x00000078, 0x00004000, 0x00000688, 0x00000140, 0x000007C8,
+	0x00000000, 0x00000800, 0x000007D0, 0x00000200, 0x000009D0,
+	0x00000200, 0x00000000, 0x00000000, 0x00000000, 0x000013F0,
+	0x0000100C, 0x000023FC, 0x00000000, 0x000023FC, 0x00000000,
+	0x000023FC, 0x00000000, 0x000023FC, 0x00000000, 0x00000080,
+	0x00000200, 0x00002800, 0x000023FC, 0x00000000, 0x000023FC,
+	0x00000000, 0x000023FC, 0x00000000, 0x000023FC, 0x00000000,
+	0x00002400, 0x00000400, 0x00000BD8, 0x00000050, 0x00000C30,
+	0x00000060, 0x00000C90, 0x00000140, 0x00000DD0, 0x00000180,
+	0x00000F50, 0x00000180, 0x000010D0, 0x00000180, 0x00001250,
+	0x00000180, 0x000013D0, 0x00000020
+};
+
+static u32 ipa_ram_mmap_array_3_5_1[] = {
+	0x00000280, 0x00000000, 0x00000000, 0x00000288, 0x00000078,
+	0x00004000, 0x00000308, 0x00000078, 0x00004000, 0x00000388,
+	0x00000078, 0x00004000, 0x00000408, 0x00000078, 0x00004000,
+	0x0000000F, 0x00000000, 0x00000007, 0x00000008, 0x0000000E,
+	0x00000488, 0x00000078, 0x00004000, 0x00000508, 0x00000078,
+	0x00004000, 0x0000000F, 0x00000000, 0x00000007, 0x00000008,
+	0x0000000E, 0x00000588, 0x00000078, 0x00004000, 0x00000608,
+	0x00000078, 0x00004000, 0x00000688, 0x00000140, 0x000007C8,
+	0x00000000, 0x00000800, 0x000007D0, 0x00000200, 0x000009D0,
+	0x00000200, 0x00000000, 0x00000000, 0x00000000, 0x00000BD8,
+	0x00001024, 0x00002000, 0x00000000, 0x00002000, 0x00000000,
+	0x00002000, 0x00000000, 0x00002000, 0x00000000, 0x00000080,
+	0x00000200, 0x00002000, 0x00002000, 0x00000000, 0x00002000,
+	0x00000000, 0x00002000, 0x00000000, 0x00002000, 0x00000000,
+	0x00001C00, 0x00000400
+};
+
+struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_4_0[] = {
+	{"qcom,mhi-event-ring-id-limits",
+	 mhi_event_ring_id_limits_array_4_0,
+	 ARRAY_SIZE(mhi_event_ring_id_limits_array_4_0) },
+	{"qcom,ipa-tz-unlock-reg",
+	 ipa_tz_unlock_reg_array_4_0,
+	 ARRAY_SIZE(ipa_tz_unlock_reg_array_4_0) },
+	{"qcom,ipa-ram-mmap",
+	 ipa_ram_mmap_array_4_0,
+	 ARRAY_SIZE(ipa_ram_mmap_array_4_0) },
+};
+
+struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_3_5_1[] = {
+	{"qcom,mhi-event-ring-id-limits",
+	 mhi_event_ring_id_limits_array_3_5_1,
+	 ARRAY_SIZE(mhi_event_ring_id_limits_array_3_5_1) },
+	{"qcom,ipa-tz-unlock-reg",
+	 ipa_tz_unlock_reg_array_3_5_1,
+	 ARRAY_SIZE(ipa_tz_unlock_reg_array_3_5_1) },
+	{"qcom,ipa-ram-mmap",
+	 ipa_ram_mmap_array_3_5_1,
+	 ARRAY_SIZE(ipa_ram_mmap_array_3_5_1) },
+};
+
+struct dtsi_replacement_u32_array_table
+ipa3_plat_drv_u32_array_table[] = {
+	{ ipa3_plat_drv_u32_array_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_array_3_5_1) },
+	{ ipa3_plat_drv_u32_array_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_array_4_0) },
+};
+
+#define INTCTRL_OFFSET       0x083C0000
+#define INTCTRL_SIZE         0x00000110
+
+#define IPA_BASE_OFFSET_4_0  0x01e00000
+#define IPA_BASE_SIZE_4_0    0x00034000
+#define GSI_BASE_OFFSET_4_0  0x01e04000
+#define GSI_BASE_SIZE_4_0    0x00028000
+
+struct resource ipa3_plat_drv_resource_4_0[] = {
+	/*
+	 * PLEASE NOTE WELL: The following offset values below
+	 * ("ipa-base", "gsi-base", and "intctrl-base") are used to
+	 * calculate offsets relative to the PCI BAR0 address provided
+	 * by the PCI probe.  After their use to calculate the
+	 * offsets, they are not used again, since PCI ultimately
+	 * dictates where things live.
+	 */
+	{
+		IPA_BASE_OFFSET_4_0,
+		(IPA_BASE_OFFSET_4_0 + IPA_BASE_SIZE_4_0),
+		"ipa-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		GSI_BASE_OFFSET_4_0,
+		(GSI_BASE_OFFSET_4_0 + GSI_BASE_SIZE_4_0),
+		"gsi-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	/*
+	 * The following entry is germane only to the emulator
+	 * environment.  It is needed to locate the emulator's PCI
+	 * interrupt controller...
+	 */
+	{
+		INTCTRL_OFFSET,
+		(INTCTRL_OFFSET + INTCTRL_SIZE),
+		"intctrl-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		IPA_PIPE_MEM_START_OFST,
+		(IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
+		"ipa-pipe-mem",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"gsi-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"ipa-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+};
+
+#define IPA_BASE_OFFSET_3_5_1  0x01e00000
+#define IPA_BASE_SIZE_3_5_1    0x00034000
+#define GSI_BASE_OFFSET_3_5_1  0x01e04000
+#define GSI_BASE_SIZE_3_5_1    0x0002c000
+
+struct resource ipa3_plat_drv_resource_3_5_1[] = {
+	/*
+	 * PLEASE NOTE WELL: The following offset values below
+	 * ("ipa-base", "gsi-base", and "intctrl-base") are used to
+	 * calculate offsets relative to the PCI BAR0 address provided
+	 * by the PCI probe.  After their use to calculate the
+	 * offsets, they are not used again, since PCI ultimately
+	 * dictates where things live.
+	 */
+	{
+		IPA_BASE_OFFSET_3_5_1,
+		(IPA_BASE_OFFSET_3_5_1 + IPA_BASE_SIZE_3_5_1),
+		"ipa-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		GSI_BASE_OFFSET_3_5_1,
+		(GSI_BASE_OFFSET_3_5_1 + GSI_BASE_SIZE_3_5_1),
+		"gsi-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	/*
+	 * The following entry is germane only to the emulator
+	 * environment.  It is needed to locate the emulator's PCI
+	 * interrupt controller...
+	 */
+	{
+		INTCTRL_OFFSET,
+		(INTCTRL_OFFSET + INTCTRL_SIZE),
+		"intctrl-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		IPA_PIPE_MEM_START_OFST,
+		(IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
+		"ipa-pipe-mem",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"gsi-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"ipa-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+};
+
+struct dtsi_replacement_resource_table
+ipa3_plat_drv_resource_table[] = {
+	{ ipa3_plat_drv_resource_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_resource_3_5_1) },
+	{ ipa3_plat_drv_resource_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_resource_4_0) },
+};
+
+/*
+ * The following code uses the data above...
+ */
+static u32 emulator_type_to_index(void)
+{
+	/*
+	 * Use the input parameter to the IPA driver loadable module,
+	 * which specifies the type of hardware the driver is running
+	 * on.
+	 */
+	u32 index = DTSI_INDEX_4_0;
+	uint emulation_type = ipa3_get_emulation_type();
+
+	switch (emulation_type) {
+	case IPA_HW_v3_5_1:
+		index = DTSI_INDEX_3_5_1;
+		break;
+	case IPA_HW_v4_0:
+		index = DTSI_INDEX_4_0;
+		break;
+	default:
+		break;
+	}
+
+	IPADBG("emulation_type(%u) emulation_index(%u)\n",
+	       emulation_type, index);
+
+	return index;
+}
+
+/* From include/linux/of.h */
+/**
+ * emulator_of_property_read_bool - Find from a property
+ * @np:         device node from which the property value is to be read.
+ * @propname:   name of the property to be searched.
+ *
+ * Search for a property in a device node.
+ * Returns true if the property exists false otherwise.
+ */
+bool emulator_of_property_read_bool(
+	const struct device_node *np,
+	const char *propname)
+{
+	u16 i;
+	u32 index;
+	struct dtsi_replacement_bool *ipa3_plat_drv_boolP;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_bool_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_bool_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	ipa3_plat_drv_boolP =
+	    ipa3_plat_drv_bool_table[index].p_table;
+
+	for (i = 0;
+	     i < ipa3_plat_drv_bool_table[index].num_entries;
+	     i++) {
+		if (strsame(ipa3_plat_drv_boolP[i].key, propname)) {
+			IPADBG(
+			    "Found value %u for propname %s index %u\n",
+			    ipa3_plat_drv_boolP[i].value,
+			    propname,
+			    index);
+			return ipa3_plat_drv_boolP[i].value;
+		}
+	}
+
+	IPADBG("Did not find match for propname %s index %u\n",
+	       propname,
+	       index);
+
+	return false;
+}
+
+/* From include/linux/of.h */
+int emulator_of_property_read_u32(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_value)
+{
+	u16 i;
+	u32 index;
+	struct dtsi_replacement_u32 *ipa3_plat_drv_u32P;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_u32_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	ipa3_plat_drv_u32P =
+	    ipa3_plat_drv_u32_table[index].p_table;
+
+	for (i = 0;
+	     i < ipa3_plat_drv_u32_table[index].num_entries;
+	     i++) {
+		if (strsame(ipa3_plat_drv_u32P[i].key, propname)) {
+			*out_value = ipa3_plat_drv_u32P[i].value;
+			IPADBG(
+			    "Found value %u for propname %s index %u\n",
+			    ipa3_plat_drv_u32P[i].value,
+			    propname,
+			    index);
+			return 0;
+		}
+	}
+
+	IPADBG("Did not find match for propname %s index %u\n",
+	       propname,
+	       index);
+
+	return -EINVAL;
+}
+
+/* From include/linux/of.h */
+/**
+ * emulator_of_property_read_u32_array - Find and read an array of 32
+ * bit integers from a property.
+ *
+ * @np:         device node from which the property value is to be read.
+ * @propname:   name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz:         number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+int emulator_of_property_read_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz)
+{
+	u16 i;
+	u32 index;
+	struct dtsi_replacement_u32_array *u32_arrayP;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_u32_array_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	u32_arrayP =
+		ipa3_plat_drv_u32_array_table[index].p_table;
+	for (i = 0;
+	     i < ipa3_plat_drv_u32_array_table[index].num_entries;
+	     i++) {
+		if (strsame(
+			u32_arrayP[i].key, propname)) {
+			u32 num_elements =
+			    u32_arrayP[i].num_elements;
+			u32 *p_element =
+			    &u32_arrayP[i].p_value[0];
+			size_t j = 0;
+
+			if (num_elements > sz) {
+				IPAERR(
+				    "Found array of %u values for propname %s; only room for %u elements in copy buffer\n",
+				    num_elements,
+				    propname,
+				    (unsigned int) sz);
+				return -EOVERFLOW;
+			}
+
+			while (j++ < num_elements)
+				*out_values++ = *p_element++;
+
+			IPADBG(
+			    "Found array of values starting with %u for propname %s index %u\n",
+			    u32_arrayP[i].p_value[0],
+			    propname,
+			    index);
+
+			return 0;
+		}
+	}
+
+	IPADBG("Did not find match for propname %s index %u\n",
+	       propname,
+	       index);
+
+	return -EINVAL;
+}
+
+/* From drivers/base/platform.c */
+/**
+ * emulator_platform_get_resource_byname - get a resource for a device by name
+ * @dev: platform device
+ * @type: resource type
+ * @name: resource name
+ */
+struct resource *emulator_platform_get_resource_byname(
+	struct platform_device *dev,
+	unsigned int type,
+	const char *name)
+{
+	u16 i;
+	u32 index;
+	struct resource *ipa3_plat_drv_resourceP;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_resource_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_resource_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	ipa3_plat_drv_resourceP =
+		ipa3_plat_drv_resource_table[index].p_table;
+	for (i = 0;
+	     i < ipa3_plat_drv_resource_table[index].num_entries;
+	     i++) {
+		struct resource *r = &ipa3_plat_drv_resourceP[i];
+
+		if (type == resource_type(r) && strsame(r->name, name)) {
+			IPADBG(
+			    "Found start 0x%x size %u for name %s index %u\n",
+			    (unsigned int) (r->start),
+			    (unsigned int) (resource_size(r)),
+			    name,
+			    index);
+			return r;
+		}
+	}
+
+	IPADBG("Did not find match for name %s index %u\n",
+	       name,
+	       index);
+
+	return NULL;
+}
+
+/* From drivers/of/base.c */
+/**
+ * emulator_of_property_count_elems_of_size - Count the number of
+ * elements in a property
+ *
+ * @np:         device node from which the property value is to
+ *              be read. Not used.
+ * @propname:   name of the property to be searched.
+ * @elem_size:  size of the individual element
+ *
+ * Search for a property and count the number of elements of size
+ * elem_size in it. Returns number of elements on success, -EINVAL if
+ * the property does not exist or its length does not match a multiple
+ * of elem_size and -ENODATA if the property does not have a value.
+ */
+int emulator_of_property_count_elems_of_size(
+	const struct device_node *np,
+	const char *propname,
+	int elem_size)
+{
+	u32 index;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+
+	/*
+	 * Use elem_size to determine which table to search for the
+	 * specified property name
+	 */
+	if (elem_size == sizeof(u32)) {
+		u16 i;
+		struct dtsi_replacement_u32_array *u32_arrayP;
+
+		if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) {
+			IPADBG(
+			    "Did not find ipa3_plat_drv_u32_array_table for index %u\n",
+			    index);
+			return false;
+		}
+
+		u32_arrayP =
+			ipa3_plat_drv_u32_array_table[index].p_table;
+
+		for (i = 0;
+		     i < ipa3_plat_drv_u32_array_table[index].num_entries;
+		     i++) {
+			if (strsame(u32_arrayP[i].key, propname)) {
+				if (u32_arrayP[i].p_value == NULL) {
+					IPADBG(
+					    "Found no elements for propname %s index %u\n",
+					    propname,
+					    index);
+					return -ENODATA;
+				}
+
+				IPADBG(
+				    "Found %u elements for propname %s index %u\n",
+				    u32_arrayP[i].num_elements,
+				    propname,
+				    index);
+
+				return u32_arrayP[i].num_elements;
+			}
+		}
+
+		IPADBG(
+		    "Found no match in table with elem_size %d for propname %s index %u\n",
+		    elem_size,
+		    propname,
+		    index);
+
+		return -EINVAL;
+	}
+
+	IPAERR(
+	    "Found no tables with element size %u to search for propname %s index %u\n",
+	    elem_size,
+	    propname,
+	    index);
+
+	return -EINVAL;
+}
+
+int emulator_of_property_read_variable_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz_min,
+	size_t sz_max)
+{
+	return emulator_of_property_read_u32_array(
+	    np, propname, out_values, sz_max);
+}
+
+resource_size_t emulator_resource_size(const struct resource *res)
+{
+	return res->end - res->start;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h
new file mode 100644
index 0000000..cf4c7c9
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h
@@ -0,0 +1,125 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_IPA_EMULATION_STUBS_H_)
+# define _IPA_EMULATION_STUBS_H_
+
+# define clk_get(x, y) ((struct clk *) -(MAX_ERRNO+1))
+# define clk_put(x)                do { } while (0)
+# define clk_prepare(x)            do { } while (0)
+# define clk_enable(x)             do { } while (0)
+# define clk_set_rate(x, y)        do { } while (0)
+# define clk_disable_unprepare(x)  do { } while (0)
+
+# define outer_flush_range(x, y)
+# define __flush_dcache_area(x, y)
+# define __cpuc_flush_dcache_area(x, y) __flush_dcache_area(x, y)
+
+/* Point several API calls to these new EMULATION functions */
+# define of_property_read_bool(np, propname)	 \
+	emulator_of_property_read_bool(NULL, propname)
+# define of_property_read_u32(np, propname, out_value)   \
+	emulator_of_property_read_u32(NULL, propname, out_value)
+# define of_property_read_u32_array(np, propname, out_values, sz)	\
+	emulator_of_property_read_u32_array(NULL, propname, out_values, sz)
+# define platform_get_resource_byname(dev, type, name) \
+	emulator_platform_get_resource_byname(NULL, type, name)
+# define of_property_count_elems_of_size(np, propname, elem_size) \
+	emulator_of_property_count_elems_of_size(NULL, propname, elem_size)
+# define of_property_read_variable_u32_array( \
+	np, propname, out_values, sz_min, sz_max) \
+	emulator_of_property_read_variable_u32_array( \
+		NULL, propname, out_values, sz_min, sz_max)
+# define resource_size(res) \
+	emulator_resource_size(res)
+
+/**
+ * emulator_of_property_read_bool - Findfrom a property
+ * @np:         device node used to find the property value. (not used)
+ * @propname:   name of the property to be searched.
+ *
+ * Search for a property in a device node.
+ * Returns true if the property exists false otherwise.
+ */
+bool emulator_of_property_read_bool(
+	const struct device_node *np,
+	const char *propname);
+
+int emulator_of_property_read_u32(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_value);
+
+/**
+ * emulator_of_property_read_u32_array - Find and read an array of 32
+ * bit integers from a property.
+ *
+ * @np:         device node used to find the property value. (not used)
+ * @propname:   name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz:         number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+int emulator_of_property_read_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz);
+
+/**
+ * emulator_platform_get_resource_byname - get a resource for a device
+ * by name
+ *
+ * @dev: platform device
+ * @type: resource type
+ * @name: resource name
+ */
+struct resource *emulator_platform_get_resource_byname(
+	struct platform_device *dev,
+	unsigned int type,
+	const char *name);
+
+/**
+ * emulator_of_property_count_elems_of_size - Count the number of
+ * elements in a property
+ *
+ * @np:         device node used to find the property value. (not used)
+ * @propname:   name of the property to be searched.
+ * @elem_size:  size of the individual element
+ *
+ * Search for a property and count the number of elements of size
+ * elem_size in it. Returns number of elements on success, -EINVAL if
+ * the property does not exist or its length does not match a multiple
+ * of elem_size and -ENODATA if the property does not have a value.
+ */
+int emulator_of_property_count_elems_of_size(
+	const struct device_node *np,
+	const char *propname,
+	int elem_size);
+
+int emulator_of_property_read_variable_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz_min,
+	size_t sz_max);
+
+resource_size_t emulator_resource_size(
+	const struct resource *res);
+
+#endif /* #if !defined(_IPA_EMULATION_STUBS_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 29fd547..b090151 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -61,8 +61,10 @@
 	gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
 
 	res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
-	if (res)
+	if (res) {
 		IPAERR_RL("failed to generate flt h/w rule\n");
+		return res;
+	}
 
 	return 0;
 }
@@ -1012,41 +1014,12 @@
 		goto error;
 	}
 
+	if (__ipa_validate_flt_rule(&frule->rule, &rt_tbl, ip))
+		goto error;
+
 	if (entry->rt_tbl)
 		entry->rt_tbl->ref_cnt--;
 
-	if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
-		if (!frule->rule.eq_attrib_type) {
-			if (!frule->rule.rt_tbl_hdl) {
-				IPAERR_RL("invalid RT tbl\n");
-				goto error;
-			}
-
-			rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
-			if (rt_tbl == NULL) {
-				IPAERR_RL("RT tbl not found\n");
-				goto error;
-			}
-
-			if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
-				IPAERR_RL("RT table cookie is invalid\n");
-				goto error;
-			}
-		} else {
-			if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
-				IPA_MEM_PART(v4_modem_rt_index_hi) :
-				IPA_MEM_PART(v6_modem_rt_index_hi))) {
-				IPAERR_RL("invalid RT tbl\n");
-				goto error;
-			}
-		}
-	} else {
-		if (frule->rule.rt_tbl_idx > 0) {
-			IPAERR_RL("invalid RT tbl\n");
-			goto error;
-		}
-	}
-
 	entry->rule = frule->rule;
 	entry->rt_tbl = rt_tbl;
 	if (entry->rt_tbl)
@@ -1445,8 +1418,16 @@
 			}
 		}
 	}
-	mutex_unlock(&ipa3_ctx->lock);
 
+	/* commit the change to IPA-HW */
+	if (ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4) ||
+		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6)) {
+		IPAERR("fail to commit flt-rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EPERM;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index cecbef0..4eb8edb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -996,8 +996,9 @@
 	struct ipa_hdr_offset_entry *off_next;
 	struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
 	struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
-	int i, end = 0;
-	bool user_rule = false;
+	struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+	struct ipa3_hdr_proc_ctx_tbl *htbl_proc = &ipa3_ctx->hdr_proc_ctx_tbl;
+	int i;
 
 	/*
 	 * issue a reset on the routing module since routing rules point to
@@ -1035,9 +1036,6 @@
 			return -EFAULT;
 		}
 
-		if (entry->ipacm_installed)
-			user_rule = true;
-
 		if (!user_only || entry->ipacm_installed) {
 			if (entry->is_hdr_proc_ctx) {
 				dma_unmap_single(ipa3_ctx->pdev,
@@ -1045,9 +1043,15 @@
 					entry->hdr_len,
 					DMA_TO_DEVICE);
 				entry->proc_ctx = NULL;
+			} else {
+				/* move the offset entry to free list */
+				entry->offset_entry->ipacm_installed = 0;
+				list_move(&entry->offset_entry->link,
+				&htbl->head_free_offset_list[
+					entry->offset_entry->bin]);
 			}
 			list_del(&entry->link);
-			ipa3_ctx->hdr_tbl.hdr_cnt--;
+			htbl->hdr_cnt--;
 			entry->ref_cnt = 0;
 			entry->cookie = 0;
 
@@ -1056,53 +1060,37 @@
 			kmem_cache_free(ipa3_ctx->hdr_cache, entry);
 		}
 	}
-	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
-		list_for_each_entry_safe(off_entry, off_next,
+
+	/* only clean up offset_list and free_offset_list on global reset */
+	if (!user_only) {
+		for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+			list_for_each_entry_safe(off_entry, off_next,
 					 &ipa3_ctx->hdr_tbl.head_offset_list[i],
 					 link) {
-
-			/*
-			 * do not remove the default exception header which is
-			 * at offset 0
-			 */
-			if (off_entry->offset == 0)
-				continue;
-
-			if (!user_only ||
-					off_entry->ipacm_installed) {
+				/**
+				 * do not remove the default exception
+				 * header which is at offset 0
+				 */
+				if (off_entry->offset == 0)
+					continue;
 				list_del(&off_entry->link);
 				kmem_cache_free(ipa3_ctx->hdr_offset_cache,
 					off_entry);
-			} else {
-				if (off_entry->offset +
-					ipa_hdr_bin_sz[off_entry->bin] > end) {
-					end = off_entry->offset +
-						ipa_hdr_bin_sz[off_entry->bin];
-					IPADBG("replace end = %d\n", end);
-				}
 			}
-		}
-		list_for_each_entry_safe(off_entry, off_next,
+			list_for_each_entry_safe(off_entry, off_next,
 				&ipa3_ctx->hdr_tbl.head_free_offset_list[i],
 				link) {
-
-			if (!user_only ||
-					off_entry->ipacm_installed) {
 				list_del(&off_entry->link);
 				kmem_cache_free(ipa3_ctx->hdr_offset_cache,
 					off_entry);
 			}
 		}
+		/* there is one header of size 8 */
+		ipa3_ctx->hdr_tbl.end = 8;
+		ipa3_ctx->hdr_tbl.hdr_cnt = 1;
 	}
 
-	IPADBG("hdr_tbl.end = %d\n", end);
-	if (user_rule) {
-		ipa3_ctx->hdr_tbl.end = end;
-		IPADBG("hdr_tbl.end = %d\n", end);
-	}
 	IPADBG("reset hdr proc ctx\n");
-	user_rule = false;
-	end = 0;
 	list_for_each_entry_safe(
 		ctx_entry,
 		ctx_next,
@@ -1115,13 +1103,14 @@
 			return -EFAULT;
 		}
 
-		if (entry->ipacm_installed)
-			user_rule = true;
-
 		if (!user_only ||
 				ctx_entry->ipacm_installed) {
+			/* move the offset entry to appropriate free list */
+			list_move(&ctx_entry->offset_entry->link,
+				&htbl_proc->head_free_offset_list[
+					ctx_entry->offset_entry->bin]);
 			list_del(&ctx_entry->link);
-			ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt--;
+			htbl_proc->proc_ctx_cnt--;
 			ctx_entry->ref_cnt = 0;
 			ctx_entry->cookie = 0;
 
@@ -1131,48 +1120,39 @@
 				ctx_entry);
 		}
 	}
-	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
-		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+	/* only clean up offset_list and free_offset_list on global reset */
+	if (!user_only) {
+		for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+			list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
 				&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
 				link) {
-
-			if (!user_only ||
-					ctx_off_entry->ipacm_installed) {
 				list_del(&ctx_off_entry->link);
 				kmem_cache_free(
 					ipa3_ctx->hdr_proc_ctx_offset_cache,
 					ctx_off_entry);
-			} else {
-				if (ctx_off_entry->offset +
-					ipa_hdr_bin_sz[ctx_off_entry->bin]
-					> end) {
-					end = ctx_off_entry->offset +
-					ipa_hdr_bin_sz[ctx_off_entry->bin];
-					IPADBG("replace hdr_proc as %d\n", end);
-				}
 			}
-		}
-		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
-			&ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
-			link) {
-
-			if (!user_only ||
-					ctx_off_entry->ipacm_installed) {
+			list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+				&ipa3_ctx->hdr_proc_ctx_tbl.
+				head_free_offset_list[i], link) {
 				list_del(&ctx_off_entry->link);
 				kmem_cache_free(
 					ipa3_ctx->hdr_proc_ctx_offset_cache,
 					ctx_off_entry);
 			}
 		}
+		ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
+		ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
 	}
 
-	IPADBG("hdr_proc_tbl.end = %d\n", end);
-	if (user_rule) {
-		ipa3_ctx->hdr_proc_ctx_tbl.end = end;
-		IPADBG("hdr_proc_tbl.end = %d\n", end);
+	/* commit the change to IPA-HW */
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EFAULT;
 	}
+
 	mutex_unlock(&ipa3_ctx->lock);
-
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 56b5740..43f4c74 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -203,6 +203,188 @@
 #define IPA_WDI_TX_DB_RES			7
 #define IPA_WDI_MAX_RES				8
 
+#ifdef CONFIG_ARM64
+/* Outer caches unsupported on ARM64 platforms */
+# define outer_flush_range(x, y)
+# define __cpuc_flush_dcache_area __flush_dcache_area
+#endif
+
+#define IPA_GPIO_IN_QUERY_CLK_IDX 0
+#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
+#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+			       x == IPA_MODE_MOBILE_AP_WAN || \
+			       x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_A5_MUX_HEADER_LENGTH (8)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define CLEANUP_TAG_PROCESS_TIMEOUT 500
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
+
+#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
+#define IPA_MHI_GSI_EVENT_RING_ID_START 10
+#define IPA_MHI_GSI_EVENT_RING_ID_END 12
+
+#define IPA_SMEM_SIZE (8 * 1024)
+
+#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000
+#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000
+#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10
+
+/* round addresses for closes page per SMMU requirements */
+#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
+	do { \
+		(iova_p) = rounddown((iova), PAGE_SIZE); \
+		(pa_p) = rounddown((pa), PAGE_SIZE); \
+		(size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
+	} while (0)
+
+
+/* The relative location in /lib/firmware where the FWs will reside */
+#define IPA_FWS_PATH         "ipa/ipa_fws.elf"
+/*
+ * The following paths below are used when building the system for the
+ * emulation environment or when IPA_EMULATION_COMPILE == 1.
+ *
+ * As new hardware platforms are added into the emulation environment,
+ * please add the appropriate paths here for their firmwares.
+ */
+#define IPA_FWS_PATH_4_0     "ipa/4.0/ipa_fws.elf"
+#define IPA_FWS_PATH_3_5_1   "ipa/3.5.1/ipa_fws.elf"
+
+#ifdef CONFIG_COMPAT
+#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				compat_uptr_t)
+#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_INIT_IPV6CT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_TABLE_DMA_CMD, \
+				compat_uptr_t)
+#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_NAT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_IPV6CT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_MODIFY_PDN, \
+				compat_uptr_t)
+#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				compat_uptr_t)
+#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PULL_MSG, \
+				compat_uptr_t)
+#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_ADD_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_DEL_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GENERATE_FLT_EQ, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+				compat_uptr_t)
+#define IPA_IOC_WRITE_QMAPID32  _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_WRITE_QMAPID, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_FLT_RULE, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_RT_RULE, \
+				compat_uptr_t)
+#endif /* #ifdef CONFIG_COMPAT */
+
+#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
+#define TZ_MEM_PROTECT_REGION_ID 0x10
+
 struct ipa3_active_client_htable_entry {
 	struct hlist_node list;
 	char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
@@ -558,6 +740,8 @@
  * @qmi_request_sent: Indicates whether QMI request to enable clear data path
  *					request is sent or not.
  * @napi_enabled: when true, IPA call client callback to start polling
+ * @client_lock_unlock: callback function to take mutex lock/unlock for USB
+ *				clients
  */
 struct ipa3_ep_context {
 	int valid;
@@ -590,6 +774,8 @@
 	u32 eot_in_poll_err;
 	bool ep_delay_set;
 
+	int (*client_lock_unlock)(bool is_lock);
+
 	/* sys MUST be the last element of this struct */
 	struct ipa3_sys_context *sys;
 };
@@ -912,11 +1098,13 @@
  * @IPA_HW_Normal: Regular IPA hardware
  * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation
  * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge
+ * @IPA_HW_Emulation: IPA emulation hardware
  */
 enum ipa3_hw_mode {
-	IPA_HW_MODE_NORMAL  = 0,
-	IPA_HW_MODE_VIRTUAL = 1,
-	IPA_HW_MODE_PCIE    = 2
+	IPA_HW_MODE_NORMAL    = 0,
+	IPA_HW_MODE_VIRTUAL   = 1,
+	IPA_HW_MODE_PCIE      = 2,
+	IPA_HW_MODE_EMULATION = 3,
 };
 
 enum ipa3_config_this_ep {
@@ -1207,6 +1395,7 @@
  * @mode: IPA operating mode
  * @mmio: iomem
  * @ipa_wrapper_base: IPA wrapper base address
+ * @ipa_wrapper_size: size of the memory pointed to by ipa_wrapper_base
  * @hdr_tbl: IPA header table
  * @hdr_proc_ctx_tbl: IPA processing context table
  * @rt_tbl_set: list of routing tables each of which is a list of rules
@@ -1415,6 +1604,7 @@
 	struct mutex ipa_cne_evt_lock;
 	bool use_ipa_pm;
 	bool vlan_mode_iface[IPA_VLAN_IF_MAX];
+	bool wdi_over_pcie;
 };
 
 struct ipa3_plat_drv_res {
@@ -1423,6 +1613,9 @@
 	u32 ipa_mem_size;
 	u32 transport_mem_base;
 	u32 transport_mem_size;
+	u32 emulator_intcntrlr_mem_base;
+	u32 emulator_intcntrlr_mem_size;
+	u32 emulator_irq;
 	u32 ipa_irq;
 	u32 transport_irq;
 	u32 ipa_pipe_mem_start_ofst;
@@ -1446,6 +1639,7 @@
 	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
 	bool use_ipa_pm;
 	struct ipa_pm_init_params pm_init;
+	bool wdi_over_pcie;
 };
 
 /**
@@ -1726,6 +1920,9 @@
 int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
 
 void ipa3_xdci_ep_delay_rm(u32 clnt_hdl);
+void ipa3_register_lock_unlock_callback(int (*client_cb)(bool), u32 ipa_ep_idx);
+void ipa3_deregister_lock_unlock_callback(u32 ipa_ep_idx);
+void ipa3_set_usb_prod_pipe_delay(void);
 
 int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	bool should_force_clear, u32 qmi_req_id, bool is_dpl);
@@ -2063,6 +2260,8 @@
  */
 int ipa3_get_ep_mapping(enum ipa_client_type client);
 
+enum gsi_prefetch_mode ipa_get_ep_prefetch_mode(enum ipa_client_type client);
+
 bool ipa3_is_ready(void);
 
 void ipa3_proxy_clk_vote(void);
@@ -2355,6 +2554,10 @@
 void ipa3_inc_acquire_wakelock(void);
 void ipa3_dec_release_wakelock(void);
 int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base);
+int emulator_load_fws(
+	const struct firmware *firmware,
+	u32 transport_mem_base,
+	u32 transport_mem_size);
 int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
 const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
 int ipa_gsi_ch20_wa(void);
@@ -2381,4 +2584,9 @@
 void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc,
 	struct ipahal_imm_cmd_pyld *cmd_pyld);
 int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res);
+uint ipa3_get_emulation_type(void);
+int ipa3_get_transport_info(
+	phys_addr_t *phys_addr_ptr,
+	unsigned long *size_ptr);
+irq_handler_t ipa3_get_isr(void);
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
index d69d6ae..46b7434 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -339,6 +339,12 @@
 	ipa3_dec_client_disable_clks_no_block(&log_info);
 	return IRQ_HANDLED;
 }
+
+irq_handler_t ipa3_get_isr(void)
+{
+	return ipa3_isr;
+}
+
 /**
 * ipa3_add_interrupt_handler() - Adds handler to an interrupt type
 * @interrupt:		Interrupt type
@@ -486,21 +492,35 @@
 		return -ENOMEM;
 	}
 
-	res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
-				IRQF_TRIGGER_RISING, "ipa", ipa_dev);
-	if (res) {
-		IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq);
-		return -ENODEV;
+	/*
+	 * NOTE:
+	 *
+	 *  We'll only register an isr on non-emulator (ie. real UE)
+	 *  systems.
+	 *
+	 *  On the emulator, emulator_soft_irq_isr() will be calling
+	 *  ipa3_isr, so hence, no isr registration here, and instead,
+	 *  we'll pass the address of ipa3_isr to the gsi layer where
+	 *  emulator interrupts are handled...
+	 */
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
+		res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
+					IRQF_TRIGGER_RISING, "ipa", ipa_dev);
+		if (res) {
+			IPAERR(
+			    "fail to register IPA IRQ handler irq=%d\n",
+			    ipa_irq);
+			return -ENODEV;
+		}
+		IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
+
+		res = enable_irq_wake(ipa_irq);
+		if (res)
+			IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
+				   ipa_irq, res);
+		else
+			IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
 	}
-	IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
-
-	res = enable_irq_wake(ipa_irq);
-	if (res)
-		IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
-				ipa_irq, res);
-	else
-		IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
-
 	spin_lock_init(&suspend_wa_lock);
 	return 0;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 82cd8187..dd97038 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -56,9 +56,9 @@
 
 
 #define IPA_MHI_FUNC_ENTRY() \
-	IPA_MHI_DBG_LOW("ENTRY\n")
+	IPA_MHI_DBG("ENTRY\n")
 #define IPA_MHI_FUNC_EXIT() \
-	IPA_MHI_DBG_LOW("EXIT\n")
+	IPA_MHI_DBG("EXIT\n")
 
 #define IPA_MHI_MAX_UL_CHANNELS 1
 #define IPA_MHI_MAX_DL_CHANNELS 1
@@ -198,6 +198,7 @@
 	union __packed gsi_channel_scratch ch_scratch;
 	struct ipa3_ep_context *ep;
 	const struct ipa_gsi_ep_config *ep_cfg;
+	bool burst_mode_enabled = false;
 
 	IPA_MHI_FUNC_ENTRY();
 
@@ -280,8 +281,21 @@
 	ch_props.ring_len = params->ch_ctx_host->rlen;
 	ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
 			params->ch_ctx_host->rbase);
-	ch_props.use_db_eng = GSI_CHAN_DB_MODE;
+
+	if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
+		params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
+		burst_mode_enabled = true;
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 &&
+		!burst_mode_enabled)
+		ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		ch_props.use_db_eng = GSI_CHAN_DB_MODE;
+
 	ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	ch_props.prefetch_mode =
+		ipa_get_ep_prefetch_mode(client);
 	ch_props.low_weight = 1;
 	ch_props.err_cb = params->ch_err_cb;
 	ch_props.chan_user_data = params->channel;
@@ -307,9 +321,9 @@
 		ch_scratch.mhi.outstanding_threshold = 0;
 	}
 	ch_scratch.mhi.oob_mod_threshold = 4;
-	if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
-		params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
-		ch_scratch.mhi.burst_mode_enabled = true;
+
+	if (burst_mode_enabled) {
+		ch_scratch.mhi.burst_mode_enabled = burst_mode_enabled;
 		ch_scratch.mhi.polling_configuration =
 			ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host,
 				(ch_props.ring_len / ch_props.re_size));
@@ -549,6 +563,7 @@
 	int res;
 	int ipa_ep_idx;
 	struct ipa3_ep_context *ep;
+	union __packed gsi_channel_scratch gsi_ch_scratch;
 
 	IPA_MHI_FUNC_ENTRY();
 
@@ -563,11 +578,34 @@
 		/*
 		 * set polling mode bit to DB mode before
 		 * resuming the channel
+		 *
+		 * For MHI-->IPA pipes:
+		 * when resuming due to transition to M0,
+		 * set the polling mode bit to 0.
+		 * In other cases, restore it's value form
+		 * when you stopped the channel.
+		 * Here, after successful resume client move to M0 state.
+		 * So, by default setting polling mode bit to 0.
+		 *
+		 * For IPA-->MHI pipe:
+		 * always restore the polling mode bit.
 		 */
-		res = gsi_write_channel_scratch(
-			ep->gsi_chan_hdl, ch_scratch);
+
+		res = gsi_read_channel_scratch(
+			ep->gsi_chan_hdl, &gsi_ch_scratch);
 		if (res) {
-			IPA_MHI_ERR("write ch scratch fail %d\n"
+			IPA_MHI_ERR("Read ch scratch fail %d\n"
+				, res);
+			return res;
+		}
+
+		if (IPA_CLIENT_IS_PROD(client))
+			gsi_ch_scratch.mhi.polling_mode = false;
+
+		res = gsi_write_channel_scratch(
+			ep->gsi_chan_hdl, gsi_ch_scratch);
+		if (res) {
+			IPA_MHI_ERR("Write ch scratch fail %d\n"
 				, res);
 			return res;
 		}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index b48f2c4..7065e2c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1258,6 +1258,11 @@
 		goto bail;
 	}
 
+	if (!ipa3_ctx->nat_mem.dev.is_dev_init) {
+		IPAERR_RL("NAT hasn't been initialized\n");
+		return -EPERM;
+	}
+
 	for (cnt = 0; cnt < dma->entries; ++cnt) {
 		result = ipa3_table_validate_table_dma_one(&dma->dma[cnt]);
 		if (result) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
index 0772dde..47a03f9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
@@ -16,7 +16,7 @@
 #include <linux/msm_ipa.h>
 
 /* internal to ipa */
-#define IPA_PM_MAX_CLIENTS 12 /* actual max is value -1 since we start from 1*/
+#define IPA_PM_MAX_CLIENTS 32 /* actual max is value -1 since we start from 1*/
 #define IPA_PM_MAX_EX_CL 64
 #define IPA_PM_THRESHOLD_MAX 5
 #define IPA_PM_EXCEPTION_MAX 2
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index 3351a33..3210a70 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -399,7 +399,7 @@
 
 static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
 
-static inline int ipa3_wwan_set_modem_perf_profile(int throughput)
+static inline int ipa3_wwan_set_modem_perf_profile(int throughput);
 static inline int ipa3_qmi_enable_per_client_stats(
 	struct ipa_enable_per_client_stats_req_msg_v01 *req,
 	struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 7861896..736c0fb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1577,6 +1577,15 @@
 			}
 		}
 	}
+
+	/* commit the change to IPA-HW */
+	if (ipa3_ctx->ctrl->ipa3_commit_rt(IPA_IP_v4) ||
+		ipa3_ctx->ctrl->ipa3_commit_rt(IPA_IP_v6)) {
+		IPAERR("fail to commit rt-rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EPERM;
+	}
 	mutex_unlock(&ipa3_ctx->lock);
 
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index f5ef141..e746229 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -818,6 +818,11 @@
 {
 	u32 opcode;
 
+	if (ipa3_ctx->ipa_hw_type > IPA_HW_v4_0) {
+		IPADBG_LOW("not supported past IPA v4.0\n");
+		return 0;
+	}
+
 	/*
 	 * If the uC interface has not been initialized yet,
 	 * don't notify the uC on the enable/disable
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index f4068bf..2401166 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -508,6 +508,8 @@
 		result = -EFAULT;
 		goto fail_smmu_map_dl;
 	}
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	IPADBG("client %d (ep: %d) connected\n", in->dl.client,
 		ipa_ep_idx_dl);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 13d2511..3cbc0c6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -756,16 +756,21 @@
 		return -EINVAL;
 	}
 
-	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
-		if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT ||
-			in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
-			IPAERR("alignment failure on TX\n");
-			return -EINVAL;
-		}
-	} else {
-		if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
-			IPAERR("alignment failure on RX\n");
-			return -EINVAL;
+	if (!in->smmu_enabled) {
+		if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+			if (in->u.dl.comp_ring_base_pa %
+				IPA_WDI_RING_ALIGNMENT ||
+				in->u.dl.ce_ring_base_pa %
+				IPA_WDI_RING_ALIGNMENT) {
+				IPAERR("alignment failure on TX\n");
+					return -EINVAL;
+			}
+		} else {
+			if (in->u.ul.rdy_ring_base_pa %
+				IPA_WDI_RING_ALIGNMENT) {
+				IPAERR("alignment failure on RX\n");
+				return -EINVAL;
+			}
 		}
 	}
 
@@ -795,43 +800,73 @@
 			cmd.size = sizeof(*tx_2);
 		else
 			cmd.size = sizeof(*tx);
-		IPADBG("comp_ring_base_pa=0x%pa\n",
-				&in->u.dl.comp_ring_base_pa);
-		IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
-		IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa);
-		IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
-		IPADBG("ce_ring_doorbell_pa=0x%pa\n",
-				&in->u.dl.ce_door_bell_pa);
-		IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+		if (in->smmu_enabled) {
+			IPADBG("comp_ring_size=%d\n",
+				in->u.dl_smmu.comp_ring_size);
+			IPADBG("ce_ring_size=%d\n", in->u.dl_smmu.ce_ring_size);
+			IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+					&in->u.dl_smmu.ce_door_bell_pa);
+			IPADBG("num_tx_buffers=%d\n",
+				in->u.dl_smmu.num_tx_buffers);
+		} else {
+			IPADBG("comp_ring_base_pa=0x%pa\n",
+					&in->u.dl.comp_ring_base_pa);
+			IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
+			IPADBG("ce_ring_base_pa=0x%pa\n",
+				&in->u.dl.ce_ring_base_pa);
+			IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
+			IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+					&in->u.dl.ce_door_bell_pa);
+			IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+		}
 	} else {
 		if (ipa3_ctx->ipa_wdi2)
 			cmd.size = sizeof(*rx_2);
 		else
 			cmd.size = sizeof(*rx);
-		IPADBG("rx_ring_base_pa=0x%pa\n",
-			&in->u.ul.rdy_ring_base_pa);
-		IPADBG("rx_ring_size=%d\n",
-			in->u.ul.rdy_ring_size);
-		IPADBG("rx_ring_rp_pa=0x%pa\n",
-			&in->u.ul.rdy_ring_rp_pa);
-		IPADBG("rx_comp_ring_base_pa=0x%pa\n",
-			&in->u.ul.rdy_comp_ring_base_pa);
-		IPADBG("rx_comp_ring_size=%d\n",
-			in->u.ul.rdy_comp_ring_size);
-		IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
-			&in->u.ul.rdy_comp_ring_wp_pa);
-		ipa3_ctx->uc_ctx.rdy_ring_base_pa =
-			in->u.ul.rdy_ring_base_pa;
-		ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
-			in->u.ul.rdy_ring_rp_pa;
-		ipa3_ctx->uc_ctx.rdy_ring_size =
-			in->u.ul.rdy_ring_size;
-		ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa =
-			in->u.ul.rdy_comp_ring_base_pa;
-		ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
-			in->u.ul.rdy_comp_ring_wp_pa;
-		ipa3_ctx->uc_ctx.rdy_comp_ring_size =
-			in->u.ul.rdy_comp_ring_size;
+		if (in->smmu_enabled) {
+			IPADBG("rx_ring_size=%d\n",
+				in->u.ul_smmu.rdy_ring_size);
+			IPADBG("rx_ring_rp_pa=0x%pa\n",
+				&in->u.ul_smmu.rdy_ring_rp_pa);
+			IPADBG("rx_comp_ring_size=%d\n",
+				in->u.ul_smmu.rdy_comp_ring_size);
+			IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+				&in->u.ul_smmu.rdy_comp_ring_wp_pa);
+			ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
+				in->u.ul_smmu.rdy_ring_rp_pa;
+			ipa3_ctx->uc_ctx.rdy_ring_size =
+				in->u.ul_smmu.rdy_ring_size;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+				in->u.ul_smmu.rdy_comp_ring_wp_pa;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_size =
+				in->u.ul_smmu.rdy_comp_ring_size;
+		} else {
+			IPADBG("rx_ring_base_pa=0x%pa\n",
+				&in->u.ul.rdy_ring_base_pa);
+			IPADBG("rx_ring_size=%d\n",
+				in->u.ul.rdy_ring_size);
+			IPADBG("rx_ring_rp_pa=0x%pa\n",
+				&in->u.ul.rdy_ring_rp_pa);
+			IPADBG("rx_comp_ring_base_pa=0x%pa\n",
+				&in->u.ul.rdy_comp_ring_base_pa);
+			IPADBG("rx_comp_ring_size=%d\n",
+				in->u.ul.rdy_comp_ring_size);
+			IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+				&in->u.ul.rdy_comp_ring_wp_pa);
+			ipa3_ctx->uc_ctx.rdy_ring_base_pa =
+				in->u.ul.rdy_ring_base_pa;
+			ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
+				in->u.ul.rdy_ring_rp_pa;
+			ipa3_ctx->uc_ctx.rdy_ring_size =
+				in->u.ul.rdy_ring_size;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa =
+				in->u.ul.rdy_comp_ring_base_pa;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+				in->u.ul.rdy_comp_ring_wp_pa;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_size =
+				in->u.ul.rdy_comp_ring_size;
+		}
 	}
 
 	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
@@ -945,10 +980,11 @@
 			tx->comp_ring_size = len;
 			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
 				in->u.dl.ce_ring_size;
-			IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
+			IPADBG("TX CE ring smmu_en=%d ring_size=%d %d 0x%lx\n",
 					in->smmu_enabled,
 					in->u.dl_smmu.ce_ring_size,
-					in->u.dl.ce_ring_size);
+					in->u.dl.ce_ring_size,
+					va);
 			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
 						in->smmu_enabled,
 						in->u.dl.ce_ring_base_pa,
@@ -975,8 +1011,19 @@
 				result = -ENOMEM;
 				goto uc_timeout;
 			}
-			tx->ce_ring_doorbell_pa = va;
-			tx->num_tx_buffers = in->u.dl.num_tx_buffers;
+
+			IPADBG("CE doorbell pa: 0x%pa va:0x%lx\n", &pa, va);
+			IPADBG("Is wdi_over_pcie ? (%s)\n",
+				ipa3_ctx->wdi_over_pcie ? "Yes":"No");
+
+			if (ipa3_ctx->wdi_over_pcie)
+				tx->ce_ring_doorbell_pa = pa;
+			else
+				tx->ce_ring_doorbell_pa = va;
+
+			tx->num_tx_buffers = in->smmu_enabled ?
+				in->u.dl_smmu.num_tx_buffers :
+				in->u.dl.num_tx_buffers;
 			tx->ipa_pipe_number = ipa_ep_idx;
 		}
 		out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5d6d3cd..b7a561e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -24,6 +24,13 @@
 #include "ipahal/ipahal_hw_stats.h"
 #include "../ipa_rm_i.h"
 
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "ipa_emulation_stubs.h"
+#endif
+
 #define IPA_V3_0_CLK_RATE_SVS2 (37.5 * 1000 * 1000UL)
 #define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
 #define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
@@ -964,12 +971,12 @@
 	/* IPA_3_5_1 */
 	[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD]          = {
 			true, IPA_v3_5_GROUP_UL_DL, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 1, 8, 16, IPA_EE_UC } },
 	[IPA_3_5_1][IPA_CLIENT_USB_PROD]            = {
 			true, IPA_v3_5_GROUP_UL_DL, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_APPS_LAN_PROD] = {
@@ -979,7 +986,7 @@
 			{ 8, 7, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_APPS_WAN_PROD] = {
 			true, IPA_v3_5_GROUP_UL_DL, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 3, 16, 32, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD]		= {
@@ -1119,86 +1126,86 @@
 			true,
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
-			{ 6, 2, 8, 16, IPA_EE_UC } },
+			{ 6, 2, 8, 16, IPA_EE_UC, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_USB_PROD]            = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
-			{ 0, 8, 8, 16, IPA_EE_AP } },
+			{ 0, 8, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_APPS_LAN_PROD]   = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 8, 10, 8, 16, IPA_EE_AP } },
+			{ 8, 10, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_APPS_WAN_PROD] = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
-			{ 2, 3, 16, 32, IPA_EE_AP } },
+			{ 2, 3, 16, 32, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_APPS_CMD_PROD]	  = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 5, 4, 20, 24, IPA_EE_AP } },
+			{ 5, 4, 20, 24, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_ODU_PROD]            = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_ETHERNET_PROD]	  = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
-			{ 9, 0, 8, 16, IPA_EE_UC } },
+			{ 9, 0, 8, 16, IPA_EE_UC, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_Q6_WAN_PROD]         = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+			{ 3, 0, 16, 32, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_Q6_CMD_PROD]	  = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 4, 1, 20, 24, IPA_EE_Q6 } },
+			{ 4, 1, 20, 24, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	/* Only for test purpose */
 	[IPA_4_0][IPA_CLIENT_TEST_PROD]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{0, 8, 8, 16, IPA_EE_AP } },
+			{0, 8, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST1_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{0, 8, 8, 16, IPA_EE_AP } },
+			{0, 8, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST2_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST3_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 7, 9, 8, 16, IPA_EE_AP } },
+			{ 7, 9, 8, 16, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_TEST4_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{8, 10, 8, 16, IPA_EE_AP } },
+			{8, 10, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 
 
 	[IPA_4_0][IPA_CLIENT_WLAN1_CONS]          = {
@@ -1206,73 +1213,73 @@
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 18, 3, 6, 9, IPA_EE_UC } },
+			{ 18, 3, 6, 9, IPA_EE_UC, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_WLAN2_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 20, 13, 9, 9, IPA_EE_AP } },
+			{ 20, 13, 9, 9, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_WLAN3_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 21, 14, 9, 9, IPA_EE_AP } },
+			{ 21, 14, 9, 9, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_USB_CONS]            = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 19, 12, 9, 9, IPA_EE_AP } },
+			{ 19, 12, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_USB_DPL_CONS]        = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 15, 7, 5, 5, IPA_EE_AP } },
+			{ 15, 7, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_APPS_LAN_CONS]       = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 10, 5, 9, 9, IPA_EE_AP } },
+			{ 10, 5, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_APPS_WAN_CONS]       = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_ODU_EMB_CONS]        = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 17, 1, 17, 17, IPA_EE_AP } },
+			{ 17, 1, 17, 17, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_ETHERNET_CONS]	  = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 22, 1, 17, 17, IPA_EE_UC } },
+			{ 22, 1, 17, 17, IPA_EE_UC, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_Q6_LAN_CONS]         = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 14, 4, 9, 9, IPA_EE_Q6 } },
+			{ 14, 4, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_Q6_WAN_CONS]         = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 13, 3, 9, 9, IPA_EE_Q6 } },
+			{ 13, 3, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 16, 5, 9, 9, IPA_EE_Q6 } },
+			{ 16, 5, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	/* Only for test purpose */
 	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_4_0][IPA_CLIENT_TEST_CONS]           = {
@@ -1280,38 +1287,38 @@
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST1_CONS]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST2_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 12, 2, 5, 5, IPA_EE_AP } },
+			{ 12, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST3_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 19, 12, 9, 9, IPA_EE_AP } },
+			{ 19, 12, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST4_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 21, 14, 9, 9, IPA_EE_AP } },
+			{ 21, 14, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_0][IPA_CLIENT_DUMMY_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 31, 31, 8, 8, IPA_EE_AP } },
+			{ 31, 31, 8, 8, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 
 	/* IPA_4_0_MHI */
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
@@ -1319,74 +1326,74 @@
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 2, 3, 16, 32, IPA_EE_AP } },
+			{ 2, 3, 16, 32, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_CMD_PROD]	  = {
 			true, IPA_v4_0_MHI_GROUP_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 5, 4, 20, 24, IPA_EE_AP } },
+			{ 5, 4, 20, 24, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_MHI_PROD]            = {
 			true, IPA_v4_0_MHI_GROUP_PCIE,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_PCIE,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD]         = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+			{ 3, 0, 16, 32, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_CMD_PROD]	  = {
 			true, IPA_v4_0_MHI_GROUP_PCIE,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 4, 1, 20, 24, IPA_EE_Q6 } },
+			{ 4, 1, 20, 24, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
 			true, IPA_v4_0_MHI_GROUP_DMA,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 7, 9, 8, 16, IPA_EE_AP } },
+			{ 7, 9, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
 			true, IPA_v4_0_MHI_GROUP_DMA,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 8, 10, 8, 16, IPA_EE_AP } },
+			{ 8, 10, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	/* Only for test purpose */
 	[IPA_4_0_MHI][IPA_CLIENT_TEST_PROD]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{0, 8, 8, 16, IPA_EE_AP } },
+			{0, 8, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST1_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{0, 8, 8, 16, IPA_EE_AP } },
+			{0, 8, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST2_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST3_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{7, 9, 8, 16, IPA_EE_AP } },
+			{7, 9, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST4_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 8, 10, 8, 16, IPA_EE_AP } },
+			{ 8, 10, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 
 
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
@@ -1394,87 +1401,87 @@
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 10, 5, 9, 9, IPA_EE_AP } },
+			{ 10, 5, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_CONS]       = {
 			true, IPA_v4_0_MHI_GROUP_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_MHI_CONS]            = {
 			true, IPA_v4_0_MHI_GROUP_PCIE,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 17, 1, 17, 17, IPA_EE_AP } },
+			{ 17, 1, 17, 17, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_CONS]         = {
 			true, IPA_v4_0_MHI_GROUP_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 14, 4, 9, 9, IPA_EE_Q6 } },
+			{ 14, 4, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_CONS]         = {
 			true, IPA_v4_0_MHI_GROUP_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 13, 3, 9, 9, IPA_EE_Q6 } },
+			{ 13, 3, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
 			true, IPA_v4_0_MHI_GROUP_DMA,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 20, 13, 9, 9, IPA_EE_AP } },
+			{ 20, 13, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
 			true, IPA_v4_0_MHI_GROUP_DMA,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 21, 14, 9, 9, IPA_EE_AP } },
+			{ 21, 14, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 16, 5, 9, 9, IPA_EE_Q6 } },
+			{ 16, 5, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	/* Only for test purpose */
 	[IPA_4_0_MHI][IPA_CLIENT_TEST_CONS]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST1_CONS]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST2_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 12, 2, 5, 5, IPA_EE_AP } },
+			{ 12, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST3_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 19, 12, 9, 9, IPA_EE_AP } },
+			{ 19, 12, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST4_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 21, 14, 9, 9, IPA_EE_AP } },
+			{ 21, 14, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_0_MHI][IPA_CLIENT_DUMMY_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 31, 31, 8, 8, IPA_EE_AP } },
+			{ 31, 31, 8, 8, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 };
 
 /**
@@ -1965,6 +1972,29 @@
 		IPADBG("ipa_qmb_select_by_address_cons_en = %d\n",
 				comp_cfg.ipa_qmb_select_by_address_cons_en);
 	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg);
+		IPADBG("Before comp config\n");
+		IPADBG("gsi_multi_inorder_rd_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_rd_dis);
+
+		IPADBG("gsi_multi_inorder_wr_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_wr_dis);
+
+		comp_cfg.gsi_multi_inorder_rd_dis = true;
+		comp_cfg.gsi_multi_inorder_wr_dis = true;
+
+		ipahal_write_reg_fields(IPA_COMP_CFG, &comp_cfg);
+
+		ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg);
+		IPADBG("After comp config\n");
+		IPADBG("gsi_multi_inorder_rd_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_rd_dis);
+
+		IPADBG("gsi_multi_inorder_wr_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_wr_dis);
+	}
 }
 
 /**
@@ -1984,10 +2014,13 @@
 	max_writes.qmb_1_max_writes = 2;
 
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) {
-		max_writes.qmb_1_max_writes = 4;
 		max_reads.qmb_1_max_reads = 12;
+		max_writes.qmb_1_max_writes = 4;
 	}
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		max_reads.qmb_0_max_reads = 12;
+
 	ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, &max_writes);
 	ipahal_write_reg_fields(IPA_QSB_MAX_READS, &max_reads);
 }
@@ -2123,6 +2156,28 @@
 	return ipa_ep_idx;
 }
 
+
+/**
+ * ipa_get_ep_prefetch_mode() - provide prefetch_mode endpoint
+ * @client: client type
+ *
+ * Return value: prefetch_mode
+ */
+enum gsi_prefetch_mode ipa_get_ep_prefetch_mode(enum ipa_client_type client)
+{
+	enum gsi_prefetch_mode prefetch_mode;
+
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR_RL("Bad client number: client =%d\n", client);
+		return -IPA_EP_NOT_ALLOCATED;
+	}
+
+	prefetch_mode = ipa3_ep_mapping[ipa3_get_hw_type_index()][client].
+		ipa_gsi_ep_info.prefetch_mode;
+
+	return prefetch_mode;
+}
+
 /**
  * ipa3_get_gsi_ep_info() - provide gsi ep information
  * @client: IPA client value
@@ -2322,6 +2377,32 @@
 }
 
 /**
+ * ipa3_get_client_by_pipe() - return client type relative to pipe
+ * index
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client type
+ */
+static enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx)
+{
+	int j = 0;
+
+	for (j = 0; j < IPA_CLIENT_MAX; j++) {
+		const struct ipa_ep_configuration *iec_ptr =
+			&(ipa3_ep_mapping[ipa3_get_hw_type_index()][j]);
+		if (iec_ptr->valid &&
+		    iec_ptr->ipa_gsi_ep_info.ipa_ep_num == pipe_idx)
+			break;
+	}
+
+	if (j == IPA_CLIENT_MAX)
+		IPADBG("ipa3_get_client_by_pipe(%d) can't find client\n",
+		       pipe_idx);
+
+	return j;
+}
+
+/**
  * ipa_init_ep_flt_bitmap() - Initialize the bitmap
  * that represents the End-points that supports filtering
  */
@@ -4870,7 +4951,8 @@
 	}
 
 	/* move resource group configuration from HLOS to TZ */
-	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION &&
+	    ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
 		IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n");
 		return;
 	}
@@ -5179,6 +5261,77 @@
 }
 
 /**
+ * emulator_load_single_fw() - load firmware into emulator's memory
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ * @phdr: ELF program header
+ * @fw_base: memory location to which firmware should get loaded
+ * @offset_from_base: offset to start relative to fw_base
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+static int emulator_load_single_fw(
+	const struct firmware   *firmware,
+	const struct elf32_phdr *phdr,
+	void __iomem            *fw_base,
+	uint32_t                offset_from_base)
+{
+	int index;
+	uint32_t ofb;
+	const uint32_t *elf_data_ptr;
+
+	IPADBG("firmware(%pK) phdr(%pK) fw_base(%pK) offset_from_base(0x%x)\n",
+	       firmware, phdr, fw_base, offset_from_base);
+
+	if (phdr->p_offset > firmware->size) {
+		IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n",
+			phdr->p_offset, firmware->size);
+		return -EINVAL;
+	}
+	if ((firmware->size - phdr->p_offset) < phdr->p_filesz) {
+		IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n",
+			phdr->p_offset, phdr->p_filesz, firmware->size);
+		return -EINVAL;
+	}
+
+	if (phdr->p_memsz % sizeof(uint32_t)) {
+		IPAERR("FW mem size %u doesn't align to 32bit\n",
+			phdr->p_memsz);
+		return -EFAULT;
+	}
+
+	if (phdr->p_filesz > phdr->p_memsz) {
+		IPAERR("FW image too big src_size=%u dst_size=%u\n",
+			phdr->p_filesz, phdr->p_memsz);
+		return -EFAULT;
+	}
+
+	IPADBG("ELF: p_memsz(0x%x) p_filesz(0x%x) p_filesz/4(0x%x)\n",
+	       (uint32_t) phdr->p_memsz,
+	       (uint32_t) phdr->p_filesz,
+	       (uint32_t) (phdr->p_filesz/sizeof(uint32_t)));
+
+	/* Set the entire region to 0s */
+	ofb = offset_from_base;
+	for (index = 0; index < phdr->p_memsz/sizeof(uint32_t); index++) {
+		writel_relaxed(0, fw_base + ofb);
+		ofb += sizeof(uint32_t);
+	}
+
+	elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset);
+
+	/* Write the FW */
+	ofb = offset_from_base;
+	for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) {
+		writel_relaxed(*elf_data_ptr, fw_base + ofb);
+		elf_data_ptr++;
+		ofb += sizeof(uint32_t);
+	}
+
+	return 0;
+}
+
+/**
  * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
  *
  * @firmware: Structure which contains the FW data from the user space.
@@ -5286,6 +5439,240 @@
 	return 0;
 }
 
+/*
+ * The following needed for the EMULATION system. On a non-emulation
+ * system (ie. the real UE), this is functionality is done in the
+ * TZ...
+ */
+#define IPA_SPARE_REG_1_VAL (0xC0000805)
+
+static void ipa_gsi_setup_reg(void)
+{
+	u32 reg_val, start;
+	int i;
+	const struct ipa_gsi_ep_config *gsi_ep_info_cfg;
+	enum ipa_client_type type;
+
+	IPADBG("Setting up registers in preparation for firmware download\n");
+
+	/* enable GSI interface */
+	ipahal_write_reg(IPA_GSI_CONF, 1);
+
+	/*
+	 * Before configuring the FIFOs need to unset bit 30 in the
+	 * spare register
+	 */
+	ipahal_write_reg(IPA_SPARE_REG_1,
+			 (IPA_SPARE_REG_1_VAL & (~(1 << 30))));
+
+	/* setup IPA_ENDP_GSI_CFG_TLV_n reg */
+	start = 0;
+	ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
+	IPADBG("ipa_num_pipes=%u\n", ipa3_ctx->ipa_num_pipes);
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		IPADBG("for ep %d client is %d\n", i, type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		IPADBG("Config is true");
+		reg_val = (gsi_ep_info_cfg->ipa_if_tlv << 16) + start;
+		start += gsi_ep_info_cfg->ipa_if_tlv;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG_TLV_n, i, reg_val);
+	}
+
+	/* setup IPA_ENDP_GSI_CFG_AOS_n reg */
+	start = 0;
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		reg_val = (gsi_ep_info_cfg->ipa_if_aos << 16) + start;
+		start += gsi_ep_info_cfg->ipa_if_aos;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG_AOS_n, i, reg_val);
+	}
+
+	/* setup IPA_ENDP_GSI_CFG1_n reg */
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		reg_val = (1 << 16) +
+			((u32)gsi_ep_info_cfg->ipa_gsi_chan_num << 8) +
+			gsi_ep_info_cfg->ee;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG1_n, i, reg_val);
+	}
+
+	/*
+	 * Setup IPA_ENDP_GSI_CFG2_n reg: this register must be setup
+	 * as last one
+	 */
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		reg_val = 1 << 31;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG2_n, i, reg_val);
+		reg_val = 0;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG2_n, i, reg_val);
+	}
+
+	/*
+	 * After configuring the FIFOs need to set bit 30 in the spare
+	 * register
+	 */
+	ipahal_write_reg(IPA_SPARE_REG_1,
+			 (IPA_SPARE_REG_1_VAL | (1 << 30)));
+}
+
+/**
+ * emulator_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ *
+ * Return value: 0 on success, negative otherwise
+ *
+ */
+int emulator_load_fws(
+	const struct firmware *firmware,
+	u32 transport_mem_base,
+	u32 transport_mem_size)
+{
+	const struct elf32_hdr *ehdr;
+	const struct elf32_phdr *phdr;
+	void __iomem *gsi_base;
+	uint32_t hps_seq_offs, dps_seq_offs;
+	unsigned long gsi_offset;
+	int rc;
+
+	IPADBG("Loading firmware(%pK)\n", firmware);
+
+	if (!firmware) {
+		IPAERR("firmware pointer passed to function is NULL\n");
+		return -EINVAL;
+	}
+
+	/* One program header per FW image: GSI, DPS and HPS */
+	if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) {
+		IPAERR(
+		    "Missing ELF and Program headers firmware size=%zu\n",
+		    firmware->size);
+		return -EINVAL;
+	}
+
+	ehdr = (struct elf32_hdr *) firmware->data;
+
+	ipa_assert_on(!ehdr);
+
+	if (ehdr->e_phnum != 3) {
+		IPAERR("Unexpected number of ELF program headers\n");
+		return -EINVAL;
+	}
+
+	hps_seq_offs = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_FIRST);
+	dps_seq_offs = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_FIRST);
+
+	/*
+	 * Each ELF program header represents a FW image and contains:
+	 *  p_vaddr : The starting address to which the FW needs to loaded.
+	 *  p_memsz : The size of the IRAM (where the image loaded)
+	 *  p_filesz: The size of the FW image embedded inside the ELF
+	 *  p_offset: Absolute offset to the image from the head of the ELF
+	 *
+	 * NOTE WELL: On the emulation platform, the p_vaddr address
+	 *            is not relevant and is unused.  This is because
+	 *            on the emulation platform, the registers'
+	 *            address location is mutable, since it's mapped
+	 *            in via a PCIe probe.  Given this, it is the
+	 *            mapped address info that's used while p_vaddr is
+	 *            ignored.
+	 */
+	phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr));
+
+	phdr += 2;
+
+	/*
+	 * Attempt to load IPA HPS FW image
+	 */
+	if (phdr->p_memsz > ipahal_get_hps_img_mem_size()) {
+		IPAERR("Invalid IPA HPS img size memsz=%d dps_mem_size=%u\n",
+		       phdr->p_memsz, ipahal_get_hps_img_mem_size());
+		return -EINVAL;
+	}
+	IPADBG("Loading HPS FW\n");
+	rc = emulator_load_single_fw(
+	    firmware, phdr, ipa3_ctx->mmio, hps_seq_offs);
+	if (rc)
+		return rc;
+	IPADBG("Loading HPS FW complete\n");
+
+	--phdr;
+
+	/*
+	 * Attempt to load IPA DPS FW image
+	 */
+	if (phdr->p_memsz > ipahal_get_dps_img_mem_size()) {
+		IPAERR("Invalid IPA DPS img size memsz=%d dps_mem_size=%u\n",
+		       phdr->p_memsz, ipahal_get_dps_img_mem_size());
+		return -EINVAL;
+	}
+	IPADBG("Loading DPS FW\n");
+	rc = emulator_load_single_fw(
+	    firmware, phdr, ipa3_ctx->mmio, dps_seq_offs);
+	if (rc)
+		return rc;
+	IPADBG("Loading DPS FW complete\n");
+
+	/*
+	 * Run gsi register setup which is normally done in TZ on
+	 * non-EMULATION systems...
+	 */
+	ipa_gsi_setup_reg();
+
+	/*
+	 * Map to the GSI base...
+	 */
+	gsi_base = ioremap_nocache(transport_mem_base, transport_mem_size);
+
+	IPADBG("GSI base(0x%x) mapped to (%pK) with len (0x%x)\n",
+	       transport_mem_base,
+	       gsi_base,
+	       transport_mem_size);
+
+	if (!gsi_base) {
+		IPAERR("ioremap_nocache failed\n");
+		return -EFAULT;
+	}
+
+	--phdr;
+
+	/*
+	 * Attempt to load GSI FW image
+	 */
+	if (phdr->p_memsz > transport_mem_size) {
+		IPAERR(
+		    "Invalid GSI FW img size memsz=%d transport_mem_size=%u\n",
+		    phdr->p_memsz, transport_mem_size);
+		return -EINVAL;
+	}
+	IPADBG("Loading GSI FW\n");
+	gsi_get_inst_ram_offset_and_size(&gsi_offset, NULL);
+	rc = emulator_load_single_fw(
+	    firmware, phdr, gsi_base, (uint32_t) gsi_offset);
+	iounmap(gsi_base);
+	if (rc)
+		return rc;
+	IPADBG("Loading GSI FW complete\n");
+
+	IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n");
+
+	return 0;
+}
+
 /**
  * ipa3_is_msm_device() - Is the running device a MSM or MDM?
  *  Determine according to IPA version
@@ -5379,4 +5766,3 @@
 	desc->len = cmd_pyld->len;
 	desc->type = IPA_IMM_CMD_DESC;
 }
-
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 1254fe3..530adef 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/errno.h>
 #include <linux/ipc_logging.h>
 #include <linux/debugfs.h>
 #include <linux/ipa.h>
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 26b7f0f..d46f13c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -55,6 +55,14 @@
 			" %s:%d " fmt, ## args); \
 	} while (0)
 
+#define IPAHAL_DBG_REG_IPC_ONLY(fmt, args...) \
+	do { \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			" %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			" %s:%d " fmt, ## args); \
+	} while (0)
+
 #define IPAHAL_ERR_RL(fmt, args...) \
 		do { \
 			pr_err_ratelimited_ipa(IPAHAL_DRV_NAME " %s:%d " fmt, \
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index ce59488..2dbea95 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -133,6 +133,11 @@
 	__stringify(IPA_FEC_ATTR_EE_n),
 	__stringify(IPA_MBIM_DEAGGR_FEC_ATTR_EE_n),
 	__stringify(IPA_GEN_DEAGGR_FEC_ATTR_EE_n),
+	__stringify(IPA_GSI_CONF),
+	__stringify(IPA_ENDP_GSI_CFG1_n),
+	__stringify(IPA_ENDP_GSI_CFG2_n),
+	__stringify(IPA_ENDP_GSI_CFG_AOS_n),
+	__stringify(IPA_ENDP_GSI_CFG_TLV_n),
 };
 
 static void ipareg_construct_dummy(enum ipahal_reg_name reg,
@@ -1971,6 +1976,21 @@
 	[IPA_HW_v3_5][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = {
 		ipareg_construct_hps_queue_weights,
 		ipareg_parse_hps_queue_weights, 0x000005a4, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_GSI_CONF] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002790, 0x0, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG1_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002794, 0x4, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG2_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002A2C, 0x4, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG_AOS_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000029A8, 0x4, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG_TLV_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002924, 0x4, 0, 0, 0 },
 
 	/* IPAv4.0 */
 	[IPA_HW_v4_0][IPA_IRQ_SUSPEND_INFO_EE_n] = {
@@ -2217,7 +2237,7 @@
 		0x00000CC0, 0x70, 10, 23, 1},
 };
 
-int ipahal_print_all_regs(void)
+int ipahal_print_all_regs(bool print_to_dmesg)
 {
 	int i, j;
 
@@ -2236,9 +2256,16 @@
 
 		j = ipahal_reg_objs[ipahal_ctx->hw_type][i].n_start;
 
-		if (j == ipahal_reg_objs[ipahal_ctx->hw_type][i].n_end)
-			IPAHAL_DBG_REG("%s=0x%x\n", ipahal_reg_name_str(i),
-				ipahal_read_reg_n(i, j));
+		if (j == ipahal_reg_objs[ipahal_ctx->hw_type][i].n_end) {
+			if (print_to_dmesg)
+				IPAHAL_DBG_REG("%s=0x%x\n",
+					ipahal_reg_name_str(i),
+					ipahal_read_reg_n(i, j));
+			else
+				IPAHAL_DBG_REG_IPC_ONLY("%s=0x%x\n",
+					ipahal_reg_name_str(i),
+					ipahal_read_reg_n(i, j));
+		}
 
 		for (; j < ipahal_reg_objs[ipahal_ctx->hw_type][i].n_end; j++)
 			IPAHAL_DBG_REG("%s_%u=0x%x\n", ipahal_reg_name_str(i),
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 7e8e8ba..fdf4fd1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -134,6 +134,11 @@
 	IPA_FEC_ATTR_EE_n,
 	IPA_MBIM_DEAGGR_FEC_ATTR_EE_n,
 	IPA_GEN_DEAGGR_FEC_ATTR_EE_n,
+	IPA_GSI_CONF,
+	IPA_ENDP_GSI_CFG1_n,
+	IPA_ENDP_GSI_CFG2_n,
+	IPA_ENDP_GSI_CFG_AOS_n,
+	IPA_ENDP_GSI_CFG_TLV_n,
 	IPA_REG_MAX,
 };
 
@@ -519,7 +524,7 @@
 };
 
 
-int ipahal_print_all_regs(void);
+int ipahal_print_all_regs(bool print_to_dmesg);
 
 /*
  * ipahal_reg_name_str() - returns string that represent the register
@@ -641,4 +646,3 @@
 	struct ipahal_reg_valmask *valmask);
 
 #endif /* _IPAHAL_REG_H_ */
-
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index 82bee5d..e18e534 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,2 +1,8 @@
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
+
 obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
 ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o ipa_pm_ut.o
diff --git a/drivers/platform/msm/ipa/test/ipa_pm_ut.c b/drivers/platform/msm/ipa/test/ipa_pm_ut.c
index e07040a..83b705d 100644
--- a/drivers/platform/msm/ipa/test/ipa_pm_ut.c
+++ b/drivers/platform/msm/ipa/test/ipa_pm_ut.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,18 +23,32 @@
 
 static int ipa_pm_ut_setup(void **ppriv)
 {
+	int i;
 
 	IPA_UT_DBG("Start Setup\n");
 
 	/* decrement UT vote */
 	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT");
 
+	/*decouple PM from RPM */
+	ipa3_ctx->enable_clock_scaling = false;
+
+	if (ipa3_ctx->use_ipa_pm) {
+		for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+			ipa_pm_deactivate_sync(i);
+			ipa_pm_deregister(i);
+		}
+
+		ipa_pm_destroy();
+	}
+
 	return 0;
 }
 
 static int ipa_pm_ut_teardown(void *priv)
 {
 	IPA_UT_DBG("Start Teardown\n");
+	IPA_UT_ERR("WARNING: IPA_PM HAS BEEN DESTROYED, REBOOT TO RE_INIT\n");
 
 	/* undo UT vote */
 	IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT");
@@ -106,7 +120,7 @@
 	struct callback_param user_data;
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -223,7 +237,7 @@
 	struct callback_param user_data;
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -327,7 +341,7 @@
 	struct callback_param user_data;
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -436,7 +450,7 @@
 
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -648,7 +662,7 @@
 	struct callback_param user_data;
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -797,7 +811,7 @@
 	struct callback_param user_data;
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -885,7 +899,7 @@
 	unsigned long flags;
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -957,7 +971,7 @@
 	int i, hdl_USB, hdl_WLAN, vote;
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -1095,7 +1109,7 @@
 	int hdl_USB, hdl_WLAN, vote, idx;
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -1210,7 +1224,7 @@
 	int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx;
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -1377,7 +1391,7 @@
 	int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx;
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000}
 	};
 
@@ -1542,7 +1556,7 @@
 	};
 
 	struct ipa_pm_init_params init_params = {
-		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.threshold_size = 2,
 		.default_threshold = {600, 1000},
 		.exception_size = 1,
 		.exceptions[0] = exceptions,
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
index 212557c..7f65956 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -327,6 +327,8 @@
 	u32 prod_hdl;
 	u32 cons_hdl;
 	u32 test_prod_hdl;
+	phys_addr_t transport_phys_addr;
+	unsigned long transport_size;
 };
 
 static struct ipa_test_mhi_context *test_mhi_ctx;
@@ -780,11 +782,6 @@
 
 	IPA_UT_DBG("Start Setup\n");
 
-	if (!gsi_ctx) {
-		IPA_UT_ERR("No GSI ctx\n");
-		return -EINVAL;
-	}
-
 	if (!ipa3_ctx) {
 		IPA_UT_ERR("No IPA ctx\n");
 		return -EINVAL;
@@ -797,11 +794,20 @@
 		return -ENOMEM;
 	}
 
-	test_mhi_ctx->gsi_mmio = ioremap_nocache(gsi_ctx->per.phys_addr,
-		gsi_ctx->per.size);
-	if (!test_mhi_ctx) {
+	rc = ipa3_get_transport_info(&test_mhi_ctx->transport_phys_addr,
+				     &test_mhi_ctx->transport_size);
+	if (rc != 0) {
+		IPA_UT_ERR("ipa3_get_transport_info() failed\n");
+		rc = -EFAULT;
+		goto fail_free_ctx;
+	}
+
+	test_mhi_ctx->gsi_mmio =
+	    ioremap_nocache(test_mhi_ctx->transport_phys_addr,
+			    test_mhi_ctx->transport_size);
+	if (!test_mhi_ctx->gsi_mmio) {
 		IPA_UT_ERR("failed to remap GSI HW size=%lu\n",
-			gsi_ctx->per.size);
+			   test_mhi_ctx->transport_size);
 		rc = -EFAULT;
 		goto fail_free_ctx;
 	}
@@ -1385,7 +1391,7 @@
 		/* write value to event ring doorbell */
 		IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
 			p_events[event_ring_index].wp,
-			&(gsi_ctx->per.phys_addr),
+			&(test_mhi_ctx->transport_phys_addr),
 			GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
 			event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0));
 		iowrite32(p_events[event_ring_index].wp,
@@ -1432,7 +1438,7 @@
 				IPA_UT_LOG(
 					"DB to channel 0x%llx: base %pa ofst 0x%x\n"
 					, p_channels[channel_idx].wp
-					, &(gsi_ctx->per.phys_addr)
+					, &(test_mhi_ctx->transport_phys_addr)
 					, GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(
 						channel_idx, 0));
 				iowrite32(p_channels[channel_idx].wp,
@@ -3324,4 +3330,3 @@
 		ipa_mhi_test_in_loop_channel_reset_ipa_holb,
 		true, IPA_HW_v3_0, IPA_HW_MAX),
 } IPA_UT_DEFINE_SUITE_END(mhi);
-
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
index bcbcd87..dad3ec9 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_framework.c
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
@@ -1036,9 +1036,10 @@
  * If IPA driver already ready, continue initialization immediately.
  * if not, wait for IPA ready notification by IPA driver context
  */
-static int __init ipa_ut_module_init(void)
+int __init ipa_ut_module_init(void)
 {
-	int ret;
+	int ret = 0;
+	bool init_framewok = true;
 
 	IPA_UT_INFO("Loading IPA test module...\n");
 
@@ -1050,14 +1051,34 @@
 	mutex_init(&ipa_ut_ctx->lock);
 
 	if (!ipa_is_ready()) {
+		init_framewok = false;
+
 		IPA_UT_DBG("IPA driver not ready, registering callback\n");
+
 		ret = ipa_register_ipa_ready_cb(ipa_ut_ipa_ready_cb, NULL);
 
 		/*
-		 * If we received -EEXIST, IPA has initialized. So we need
-		 * to continue the initing process.
+		 * If the call to ipa_register_ipa_ready_cb() above
+		 * returns 0, this means that we've succeeded in
+		 * queuing up a future call to ipa_ut_framework_init()
+		 * and that the call to it will be made once the IPA
+		 * becomes ready.  If this is the case, the call to
+		 * ipa_ut_framework_init() below need not be made.
+		 *
+		 * If the call to ipa_register_ipa_ready_cb() above
+		 * returns -EEXIST, it means that during the call to
+		 * ipa_register_ipa_ready_cb(), the IPA has become
+		 * ready, and hence, no indirect call to
+		 * ipa_ut_framework_init() will be made, so we need to
+		 * call it ourselves below.
+		 *
+		 * If the call to ipa_register_ipa_ready_cb() above
+		 * return something other than 0 or -EEXIST, that's a
+		 * hard error.
 		 */
-		if (ret != -EEXIST) {
+		if (ret == -EEXIST) {
+			init_framewok = true;
+		} else {
 			if (ret) {
 				IPA_UT_ERR("IPA CB reg failed - %d\n", ret);
 				kfree(ipa_ut_ctx);
@@ -1067,12 +1088,15 @@
 		}
 	}
 
-	ret = ipa_ut_framework_init();
-	if (ret) {
-		IPA_UT_ERR("framework init failed\n");
-		kfree(ipa_ut_ctx);
-		ipa_ut_ctx = NULL;
+	if (init_framewok) {
+		ret = ipa_ut_framework_init();
+		if (ret) {
+			IPA_UT_ERR("framework init failed\n");
+			kfree(ipa_ut_ctx);
+			ipa_ut_ctx = NULL;
+		}
 	}
+
 	return ret;
 }
 
@@ -1081,7 +1105,7 @@
  *
  * Destroys the Framework and removes its context
  */
-static void ipa_ut_module_exit(void)
+void ipa_ut_module_exit(void)
 {
 	IPA_UT_DBG("Entry\n");
 
@@ -1093,8 +1117,9 @@
 	ipa_ut_ctx = NULL;
 }
 
+#if IPA_EMULATION_COMPILE == 0 /* On real UE, we have a module */
 module_init(ipa_ut_module_init);
 module_exit(ipa_ut_module_exit);
-
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("IPA Unit Test module");
+#endif
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
index 1c16e5a..645385e 100644
--- a/drivers/platform/msm/mhi_dev/mhi.c
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -824,13 +824,13 @@
 }
 EXPORT_SYMBOL(mhi_dev_send_ee_event);
 
-static void mhi_dev_trigger_cb(void)
+static void mhi_dev_trigger_cb(enum mhi_client_channel ch_id)
 {
 	struct mhi_dev_ready_cb_info *info;
 	enum mhi_ctrl_info state_data;
 
 	list_for_each_entry(info, &mhi_ctx->client_cb_list, list)
-		if (info->cb) {
+		if (info->cb && info->cb_data.channel == ch_id) {
 			mhi_ctrl_state_info(info->cb_data.channel, &state_data);
 			info->cb_data.ctrl_info = state_data;
 			info->cb(&info->cb_data);
@@ -1021,10 +1021,9 @@
 		if (rc)
 			pr_err("Error sending command completion event\n");
 
-		/* Trigger callback to clients */
-		mhi_dev_trigger_cb();
-
 		mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
+		/* Trigger callback to clients */
+		mhi_dev_trigger_cb(ch_id);
 		if (ch_id == MHI_CLIENT_MBIM_OUT)
 			kobject_uevent_env(&mhi_ctx->dev->kobj,
 						KOBJ_CHANGE, connected);
@@ -1418,16 +1417,20 @@
 	union mhi_dev_ring_element_type *el;
 	int rc = 0;
 	struct mhi_req *req = (struct mhi_req *)mreq;
-	struct mhi_req *local_req = NULL;
 	union mhi_dev_ring_element_type *compl_ev = NULL;
 	struct mhi_dev *mhi = NULL;
 	unsigned long flags;
+	size_t transfer_len;
+	u32 snd_cmpl;
+	uint32_t rd_offset;
 
 	client = req->client;
 	ch = client->channel;
 	mhi = ch->ring->mhi_dev;
 	el = req->el;
-	local_req = req;
+	transfer_len = req->len;
+	snd_cmpl = req->snd_cmpl;
+	rd_offset = req->rd_offset;
 	ch->curr_ereq->context = ch;
 
 	dma_unmap_single(&mhi_ctx->pdev->dev, req->dma,
@@ -1441,14 +1444,13 @@
 		compl_ev->evt_tr_comp.chid = ch->ch_id;
 		compl_ev->evt_tr_comp.type =
 				MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
-		compl_ev->evt_tr_comp.len = el->tre.len;
+		compl_ev->evt_tr_comp.len = transfer_len;
 		compl_ev->evt_tr_comp.code = MHI_CMD_COMPL_CODE_EOT;
 		compl_ev->evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
-				local_req->rd_offset * TR_RING_ELEMENT_SZ;
+						rd_offset * TR_RING_ELEMENT_SZ;
 		ch->curr_ereq->num_events++;
 
-		if (ch->curr_ereq->num_events >= MAX_TR_EVENTS ||
-				local_req->snd_cmpl){
+		if (ch->curr_ereq->num_events >= MAX_TR_EVENTS || snd_cmpl) {
 			mhi_log(MHI_MSG_VERBOSE,
 					"num of tr events %d for ch %d\n",
 					ch->curr_ereq->num_events, ch->ch_id);
@@ -1570,6 +1572,12 @@
 {
 	struct mhi_dev *mhi = dev_id;
 
+	if (!atomic_read(&mhi->mhi_dev_wake)) {
+		pm_stay_awake(mhi->dev);
+		atomic_set(&mhi->mhi_dev_wake, 1);
+		mhi_log(MHI_MSG_VERBOSE, "acquiring mhi wakelock in ISR\n");
+	}
+
 	disable_irq_nosync(mhi->mhi_irq);
 	schedule_work(&mhi->chdb_ctrl_work);
 	mhi_log(MHI_MSG_VERBOSE, "mhi irq triggered\n");
@@ -1881,6 +1889,7 @@
 			(struct mhi_dev_client_cb_reason *cb))
 {
 	int rc = 0;
+	int i = 0;
 	struct mhi_dev_channel *ch;
 	struct platform_device *pdev;
 
@@ -1904,6 +1913,38 @@
 		goto exit;
 	}
 
+	/* Pre allocate event requests */
+	ch->ereqs = kcalloc(MHI_MAX_EVT_REQ, sizeof(*ch->ereqs), GFP_KERNEL);
+	if (!ch->ereqs) {
+		rc = -ENOMEM;
+		goto free_client;
+	}
+	/* pre allocate buffers to queue transfer completion events */
+	ch->tr_events = kcalloc(MHI_MAX_EVT_REQ,
+				MAX_TR_EVENTS * sizeof(*ch->tr_events),
+				GFP_KERNEL);
+	if (!ch->tr_events) {
+		rc = -ENOMEM;
+		goto free_ereqs;
+	}
+
+	/*
+	 * Organize the above allocated event request block and
+	 * completion event block into linked lists. Each event
+	 * request includes a pointer to a block of MAX_TR_EVENTS
+	 * completion events.
+	 */
+	INIT_LIST_HEAD(&mhi_ctx->ch[chan_id].event_req_buffers);
+	for (i = 0; i < MHI_MAX_EVT_REQ; ++i) {
+		ch->ereqs[i].tr_events = ch->tr_events + i * MAX_TR_EVENTS;
+		list_add_tail(&ch->ereqs[i].list,
+				&mhi_ctx->ch[chan_id].event_req_buffers);
+	}
+	mhi_ctx->ch[chan_id].curr_ereq =
+		container_of(mhi_ctx->ch[chan_id].event_req_buffers.next,
+				struct event_req, list);
+	list_del_init(&mhi_ctx->ch[chan_id].curr_ereq->list);
+
 	ch->active_client = (*handle_client);
 	(*handle_client)->channel = ch;
 	(*handle_client)->event_trigger = mhi_dev_client_cb_reason;
@@ -1916,6 +1957,13 @@
 	else if (ch->state == MHI_DEV_CH_STOPPED)
 		ch->state = MHI_DEV_CH_PENDING_START;
 
+	goto exit;
+
+free_ereqs:
+	kfree(ch->ereqs);
+	ch->ereqs = NULL;
+free_client:
+	kfree(*handle_client);
 exit:
 	mutex_unlock(&ch->ch_lock);
 	return rc;
@@ -1949,14 +1997,12 @@
 			mhi_log(MHI_MSG_ERROR,
 				"Trying to close an active channel (%d)\n",
 				ch->ch_id);
-			mutex_unlock(&ch->ch_lock);
 			rc = -EAGAIN;
 			goto exit;
 		} else if (ch->tre_loc) {
 			mhi_log(MHI_MSG_ERROR,
 				"Trying to close channel (%d) when a TRE is active",
 				ch->ch_id);
-			mutex_unlock(&ch->ch_lock);
 			rc = -EAGAIN;
 			goto exit;
 		}
@@ -1964,6 +2010,10 @@
 
 	ch->state = MHI_DEV_CH_CLOSED;
 	ch->active_client = NULL;
+	kfree(ch->ereqs);
+	kfree(ch->tr_events);
+	ch->ereqs = NULL;
+	ch->tr_events = NULL;
 	kfree(handle);
 exit:
 	mutex_unlock(&ch->ch_lock);
@@ -2401,8 +2451,10 @@
 		return;
 	}
 
-	if (mhi_ctx->config_iatu || mhi_ctx->mhi_int)
+	if (mhi_ctx->config_iatu || mhi_ctx->mhi_int) {
+		mhi_ctx->mhi_int_en = true;
 		enable_irq(mhi_ctx->mhi_irq);
+	}
 
 	mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONFIGURED);
 }
@@ -2452,8 +2504,10 @@
 	 * If channel is open during registration, no callback is issued.
 	 * Instead return -EEXIST to notify the client. Clients request
 	 * is added to the list to notify future state change notification.
+	 * Channel struct may not be allocated yet if this function is called
+	 * early during boot - add an explicit check for non-null "ch".
 	 */
-	if (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED) {
+	if (mhi_ctx->ch && (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED)) {
 		mutex_unlock(&mhi_ctx->mhi_lock);
 		return -EEXIST;
 	}
@@ -2666,40 +2720,8 @@
 	if (!mhi->ch)
 		return -ENOMEM;
 
-
-	for (i = 0; i < mhi->cfg.channels; i++) {
+	for (i = 0; i < mhi->cfg.channels; i++)
 		mutex_init(&mhi->ch[i].ch_lock);
-		if (i == MHI_CLIENT_IP_SW_4_OUT || i == MHI_CLIENT_IP_SW_4_IN) {
-			int nreq = 0;
-
-			INIT_LIST_HEAD(&mhi->ch[i].event_req_buffers);
-			while (nreq < MHI_MAX_EVT_REQ) {
-				struct event_req *ereq;
-				/* Pre allocate event requests */
-				ereq = kzalloc(sizeof(struct event_req),
-						GFP_KERNEL);
-				if (!ereq)
-					return -ENOMEM;
-
-				/* pre allocate buffers to queue
-				 * transfer completion events
-				 */
-				ereq->tr_events = kzalloc(RING_ELEMENT_TYPE_SZ*
-						MAX_TR_EVENTS, GFP_KERNEL);
-				if (!ereq->tr_events) {
-					kfree(ereq);
-					return -ENOMEM;
-				}
-				list_add_tail(&ereq->list,
-						&mhi->ch[i].event_req_buffers);
-				nreq++;
-			}
-			mhi->ch[i].curr_ereq =
-				container_of(mhi->ch[i].event_req_buffers.next,
-						struct event_req, list);
-			list_del_init(&mhi->ch[i].curr_ereq->list);
-		}
-	}
 
 	spin_lock_init(&mhi->lock);
 	mhi->mmio_backup = devm_kzalloc(&pdev->dev,
@@ -2832,8 +2854,6 @@
 
 	INIT_LIST_HEAD(&mhi_ctx->event_ring_list);
 	INIT_LIST_HEAD(&mhi_ctx->process_ring_list);
-	INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
-	mutex_init(&mhi_ctx->mhi_lock);
 	mutex_init(&mhi_ctx->mhi_event_lock);
 	mutex_init(&mhi_ctx->mhi_write_test);
 
@@ -2983,6 +3003,14 @@
 			dev_err(&pdev->dev,
 				"Failed to create IPC logging context\n");
 		}
+		/*
+		 * The below list and mutex should be initialized
+		 * before calling mhi_uci_init to avoid crash in
+		 * mhi_register_state_cb when accessing these.
+		 */
+		INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
+		mutex_init(&mhi_ctx->mhi_lock);
+
 		mhi_uci_init();
 		mhi_update_state_info(MHI_DEV_UEVENT_CTRL,
 						MHI_STATE_CONFIGURED);
diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h
index b9120fc..3559021 100644
--- a/drivers/platform/msm/mhi_dev/mhi.h
+++ b/drivers/platform/msm/mhi_dev/mhi.h
@@ -484,7 +484,13 @@
 	struct mutex			ch_lock;
 	/* client which the current inbound/outbound message is for */
 	struct mhi_dev_client		*active_client;
-
+	/*
+	 * Pointer to event request structs used to temporarily store
+	 * completion events and meta data before sending them to host
+	 */
+	struct event_req		*ereqs;
+	/* Pointer to completion event buffers */
+	union mhi_dev_ring_element_type *tr_events;
 	struct list_head		event_req_buffers;
 	struct event_req		*curr_ereq;
 
@@ -605,6 +611,8 @@
 
 	/*Register for interrupt */
 	bool				mhi_int;
+	bool				mhi_int_en;
+
 	/* Registered client callback list */
 	struct list_head		client_cb_list;
 
@@ -702,7 +710,9 @@
 	MHI_CLIENT_SMCT_IN = 45,
 	MHI_CLIENT_IP_SW_4_OUT  = 46,
 	MHI_CLIENT_IP_SW_4_IN  = 47,
-	MHI_MAX_SOFTWARE_CHANNELS = 48,
+	MHI_CLIENT_ADB_OUT = 48,
+	MHI_CLIENT_ADB_IN = 49,
+	MHI_MAX_SOFTWARE_CHANNELS,
 	MHI_CLIENT_TEST_OUT = 60,
 	MHI_CLIENT_TEST_IN = 61,
 	MHI_CLIENT_RESERVED_1_LOWER = 62,
@@ -712,6 +722,7 @@
 	MHI_CLIENT_RESERVED_2_LOWER = 102,
 	MHI_CLIENT_RESERVED_2_UPPER = 127,
 	MHI_MAX_CHANNELS = 102,
+	MHI_CLIENT_INVALID = 0xFFFFFFFF
 };
 
 /* Use ID 0 for legacy /dev/mhi_ctrl. Channel 0 is used for internal only */
diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c
index 1200a36..917b258 100644
--- a/drivers/platform/msm/mhi_dev/mhi_sm.c
+++ b/drivers/platform/msm/mhi_dev/mhi_sm.c
@@ -19,6 +19,7 @@
 #include <linux/ipa_mhi.h>
 #include "mhi_hwio.h"
 #include "mhi_sm.h"
+#include <linux/interrupt.h>
 
 #define MHI_SM_DBG(fmt, args...) \
 	mhi_log(MHI_MSG_DBG, fmt, ##args)
@@ -397,15 +398,18 @@
 		break;
 	case EP_PCIE_EVENT_PM_D3_HOT:
 		res = ((curr_mstate == MHI_DEV_M3_STATE ||
-			curr_mstate == MHI_DEV_READY_STATE) &&
+			curr_mstate == MHI_DEV_READY_STATE ||
+			curr_mstate == MHI_DEV_RESET_STATE) &&
 			curr_dstate != MHI_SM_EP_PCIE_LINK_DISABLE);
 		break;
 	case EP_PCIE_EVENT_PM_D3_COLD:
 		res = (curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE ||
-			curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE);
+			curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE ||
+			curr_dstate == MHI_SM_EP_PCIE_D0_STATE);
 		break;
 	case EP_PCIE_EVENT_PM_RST_DEAST:
 		res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE ||
+			curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE ||
 			curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE);
 		break;
 	case EP_PCIE_EVENT_PM_D0:
@@ -775,6 +779,7 @@
 	struct mhi_sm_ep_pcie_event *chg_event = container_of(work,
 		struct mhi_sm_ep_pcie_event, work);
 	enum ep_pcie_event pcie_event = chg_event->event;
+	unsigned long flags;
 
 	MHI_SM_FUNC_ENTRY();
 
@@ -846,6 +851,15 @@
 
 		mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev);
 
+		spin_lock_irqsave(&mhi_sm_ctx->mhi_dev->lock, flags);
+		if ((mhi_sm_ctx->mhi_dev->mhi_int) &&
+				(!mhi_sm_ctx->mhi_dev->mhi_int_en)) {
+			enable_irq(mhi_sm_ctx->mhi_dev->mhi_irq);
+			mhi_sm_ctx->mhi_dev->mhi_int_en = true;
+			MHI_SM_DBG("Enable MHI IRQ during PCIe DEAST");
+		}
+		spin_unlock_irqrestore(&mhi_sm_ctx->mhi_dev->lock, flags);
+
 		res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
 			EP_PCIE_OPT_ENUM);
 		if (res) {
@@ -1141,6 +1155,7 @@
 {
 	struct mhi_sm_ep_pcie_event *dstate_change_evt;
 	enum ep_pcie_event event;
+	unsigned long flags;
 
 	MHI_SM_FUNC_ENTRY();
 
@@ -1173,6 +1188,16 @@
 		break;
 	case EP_PCIE_EVENT_PM_D3_HOT:
 		mhi_sm_ctx->stats.d3_hot_event_cnt++;
+
+		spin_lock_irqsave(&mhi_sm_ctx->mhi_dev->lock, flags);
+		if ((mhi_sm_ctx->mhi_dev->mhi_int) &&
+				(mhi_sm_ctx->mhi_dev->mhi_int_en)) {
+			disable_irq(mhi_sm_ctx->mhi_dev->mhi_irq);
+			mhi_sm_ctx->mhi_dev->mhi_int_en = false;
+			MHI_SM_DBG("Disable MHI IRQ during D3 HOT");
+		}
+		spin_unlock_irqrestore(&mhi_sm_ctx->mhi_dev->lock, flags);
+
 		mhi_dev_backup_mmio(mhi_sm_ctx->mhi_dev);
 		break;
 	case EP_PCIE_EVENT_PM_RST_DEAST:
@@ -1180,6 +1205,15 @@
 		break;
 	case EP_PCIE_EVENT_PM_D0:
 		mhi_sm_ctx->stats.d0_event_cnt++;
+
+		spin_lock_irqsave(&mhi_sm_ctx->mhi_dev->lock, flags);
+		if ((mhi_sm_ctx->mhi_dev->mhi_int) &&
+				(!mhi_sm_ctx->mhi_dev->mhi_int_en)) {
+			enable_irq(mhi_sm_ctx->mhi_dev->mhi_irq);
+			mhi_sm_ctx->mhi_dev->mhi_int_en = true;
+			MHI_SM_DBG("Enable MHI IRQ during D0");
+		}
+		spin_unlock_irqrestore(&mhi_sm_ctx->mhi_dev->lock, flags);
 		break;
 	case EP_PCIE_EVENT_LINKDOWN:
 		mhi_sm_ctx->stats.linkdown_event_cnt++;
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
index ed02d0d..52d324e 100644
--- a/drivers/platform/msm/mhi_dev/mhi_uci.c
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -29,15 +29,18 @@
 #include <uapi/linux/mhi.h>
 #include "mhi.h"
 
-#define MHI_DEV_NODE_NAME_LEN		13
-#define MHI_MAX_NR_OF_CLIENTS		23
 #define MHI_SOFTWARE_CLIENT_START	0
 #define MHI_SOFTWARE_CLIENT_LIMIT	(MHI_MAX_SOFTWARE_CHANNELS/2)
 #define MHI_UCI_IPC_LOG_PAGES		(100)
 
-#define MAX_NR_TRBS_PER_CHAN		1
+/* Max number of MHI write request structures (used in async writes) */
+#define MAX_UCI_WR_REQ			10
+#define MAX_NR_TRBS_PER_CHAN		9
 #define MHI_QTI_IFACE_ID		4
-#define DEVICE_NAME "mhi"
+#define DEVICE_NAME			"mhi"
+#define MAX_DEVICE_NAME_SIZE		80
+
+#define MHI_UCI_ASYNC_READ_TIMEOUT	msecs_to_jiffies(100)
 
 enum uci_dbg_level {
 	UCI_DBG_VERBOSE = 0x0,
@@ -70,7 +73,158 @@
 	u32 nr_trbs;
 	/* direction of the channel, see enum mhi_chan_dir */
 	enum mhi_chan_dir dir;
-	u32 uci_ownership;
+	/* need to register mhi channel state change callback */
+	bool register_cb;
+	/* Name of char device */
+	char *device_name;
+};
+
+/* UCI channel attributes table */
+static const struct chan_attr uci_chan_attr_table[] = {
+	{
+		MHI_CLIENT_LOOPBACK_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_LOOPBACK_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_SAHARA_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_SAHARA_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_EFS_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_EFS_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_MBIM_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_MBIM_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_QMI_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_QMI_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_IP_CTRL_0_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_IP_CTRL_0_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_IP_CTRL_1_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_IP_CTRL_1_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_DUN_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_DUN_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_ADB_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		true,
+		NULL
+	},
+	{
+		MHI_CLIENT_ADB_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		true,
+		"android_adb"
+	},
 };
 
 struct uci_ctrl {
@@ -87,6 +241,8 @@
 	u32 in_chan;
 	struct mhi_dev_client *out_handle;
 	struct mhi_dev_client *in_handle;
+	const struct chan_attr *in_chan_attr;
+	const struct chan_attr *out_chan_attr;
 	wait_queue_head_t read_wq;
 	wait_queue_head_t write_wq;
 	atomic_t read_data_ready;
@@ -101,10 +257,16 @@
 	struct mhi_uci_ctxt_t *uci_ctxt;
 	struct mutex in_chan_lock;
 	struct mutex out_chan_lock;
+	spinlock_t wr_req_lock;
+	unsigned int f_flags;
+	struct mhi_req *wreqs;
+	struct list_head wr_req_list;
+	struct completion read_done;
+	int (*send)(struct uci_client*, void*, u32);
+	int (*read)(struct uci_client*, struct mhi_req*, int*);
 };
 
 struct mhi_uci_ctxt_t {
-	struct chan_attr chan_attrib[MHI_MAX_SOFTWARE_CHANNELS];
 	struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT];
 	struct uci_ctrl ctrl_handle;
 	void (*event_notifier)(struct mhi_dev_client_cb_reason *cb);
@@ -119,6 +281,7 @@
 };
 
 #define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2)
+#define CLIENT_TO_CHAN(_CLIENT_NR) (_CLIENT_NR * 2)
 
 #define uci_log(_msg_lvl, _msg, ...) do { \
 	if (_msg_lvl >= mhi_uci_msg_lvl) { \
@@ -156,7 +319,7 @@
 {
 	int rc = 0;
 	u32 i, j;
-	struct chan_attr *chan_attributes;
+	const struct chan_attr *in_chan_attr;
 	size_t buf_size;
 	void *data_loc;
 
@@ -169,10 +332,18 @@
 		return -EINVAL;
 	}
 
-	chan_attributes = &uci_ctxt.chan_attrib[chan];
-	buf_size = chan_attributes->max_packet_size;
+	in_chan_attr = client_handle->in_chan_attr;
+	if (!in_chan_attr) {
+		uci_log(UCI_DBG_ERROR, "Null channel attributes for chan %d\n",
+				client_handle->in_chan);
+		return -EINVAL;
+	}
 
-	for (i = 0; i < (chan_attributes->nr_trbs); i++) {
+	/* Init the completion event for read */
+	init_completion(&client_handle->read_done);
+
+	buf_size = in_chan_attr->max_packet_size;
+	for (i = 0; i < (in_chan_attr->nr_trbs); i++) {
 		data_loc = kmalloc(buf_size, GFP_KERNEL);
 		if (!data_loc) {
 			rc = -ENOMEM;
@@ -191,48 +362,129 @@
 	return rc;
 }
 
-static int mhi_uci_send_packet(struct mhi_dev_client **client_handle, void *buf,
-		u32 size, u32 is_uspace_buf)
+static void mhi_uci_write_completion_cb(void *req)
 {
-	void *data_loc = NULL;
-	uintptr_t memcpy_result = 0;
-	u32 data_inserted_so_far = 0;
+	struct mhi_req *ureq = req;
 	struct uci_client *uci_handle;
+	unsigned long flags;
+
+	uci_handle = (struct uci_client *)ureq->context;
+	kfree(ureq->buf);
+	ureq->buf = NULL;
+
+	spin_lock_irqsave(&uci_handle->wr_req_lock, flags);
+	list_add_tail(&ureq->list, &uci_handle->wr_req_list);
+	spin_unlock_irqrestore(&uci_handle->wr_req_lock, flags);
+}
+
+static void mhi_uci_read_completion_cb(void *req)
+{
+	struct mhi_req *ureq = req;
+	struct uci_client *uci_handle;
+
+	uci_handle = (struct uci_client *)ureq->context;
+	complete(&uci_handle->read_done);
+}
+
+static int mhi_uci_send_sync(struct uci_client *uci_handle,
+			void *data_loc, u32 size)
+{
 	struct mhi_req ureq;
+	int ret_val;
 
-
-	uci_handle = container_of(client_handle, struct uci_client,
-					out_handle);
-
-	if (!client_handle || !buf ||
-		!size || !uci_handle)
-		return -EINVAL;
-
-	if (is_uspace_buf) {
-		data_loc = kmalloc(size, GFP_KERNEL);
-		if (!data_loc) {
-			uci_log(UCI_DBG_ERROR,
-				"Failed to allocate memory 0x%x\n",
-				size);
-			return -ENOMEM;
-		}
-		memcpy_result = copy_from_user(data_loc, buf, size);
-		if (memcpy_result)
-			goto error_memcpy;
-	} else {
-		data_loc = buf;
-	}
-	ureq.client = *client_handle;
+	ureq.client = uci_handle->out_handle;
 	ureq.buf = data_loc;
 	ureq.len = size;
 	ureq.chan = uci_handle->out_chan;
 	ureq.mode = IPA_DMA_SYNC;
 
-	data_inserted_so_far = mhi_dev_write_channel(&ureq);
+	ret_val = mhi_dev_write_channel(&ureq);
+
+	kfree(data_loc);
+	return ret_val;
+}
+
+static int mhi_uci_send_async(struct uci_client *uci_handle,
+			void *data_loc, u32 size)
+{
+	int bytes_to_write;
+	struct mhi_req *ureq;
+
+	uci_log(UCI_DBG_VERBOSE,
+		"Got async write for ch %d of size %d\n",
+		uci_handle->out_chan, size);
+
+	spin_lock_irq(&uci_handle->wr_req_lock);
+	if (list_empty(&uci_handle->wr_req_list)) {
+		uci_log(UCI_DBG_ERROR, "Write request pool empty\n");
+		spin_unlock_irq(&uci_handle->wr_req_lock);
+		return -ENOMEM;
+	}
+	ureq = container_of(uci_handle->wr_req_list.next,
+						struct mhi_req, list);
+	list_del_init(&ureq->list);
+	spin_unlock_irq(&uci_handle->wr_req_lock);
+
+	ureq->client = uci_handle->out_handle;
+	ureq->context = uci_handle;
+	ureq->buf = data_loc;
+	ureq->len = size;
+	ureq->chan = uci_handle->out_chan;
+	ureq->mode = IPA_DMA_ASYNC;
+	ureq->client_cb = mhi_uci_write_completion_cb;
+	ureq->snd_cmpl = 1;
+
+	bytes_to_write = mhi_dev_write_channel(ureq);
+	if (bytes_to_write != size)
+		goto error_async_transfer;
+
+	return bytes_to_write;
+
+error_async_transfer:
+	kfree(data_loc);
+	ureq->buf = NULL;
+	spin_lock_irq(&uci_handle->wr_req_lock);
+	list_add_tail(&ureq->list, &uci_handle->wr_req_list);
+	spin_unlock_irq(&uci_handle->wr_req_lock);
+
+	return bytes_to_write;
+}
+
+static int mhi_uci_send_packet(struct mhi_dev_client **client_handle,
+		const char __user *buf, u32 size)
+{
+	void *data_loc;
+	unsigned long memcpy_result;
+	struct uci_client *uci_handle;
+
+	if (!client_handle || !buf || !size)
+		return -EINVAL;
+
+	if (size > TRB_MAX_DATA_SIZE) {
+		uci_log(UCI_DBG_ERROR,
+			"Too big write size: %d, max supported size is %d\n",
+			size, TRB_MAX_DATA_SIZE);
+		return -EFBIG;
+	}
+
+	uci_handle = container_of(client_handle, struct uci_client,
+					out_handle);
+	data_loc = kmalloc(size, GFP_KERNEL);
+	if (!data_loc) {
+		uci_log(UCI_DBG_ERROR,
+		"Failed to allocate kernel buf for user requested size 0x%x\n",
+			size);
+		return -ENOMEM;
+	}
+	memcpy_result = copy_from_user(data_loc, buf, size);
+	if (memcpy_result)
+		goto error_memcpy;
+
+	return uci_handle->send(uci_handle, data_loc, size);
 
 error_memcpy:
 	kfree(data_loc);
-	return data_inserted_so_far;
+	return -EFAULT;
 }
 
 static unsigned int mhi_uci_ctrl_poll(struct file *file, poll_table *wait)
@@ -290,6 +542,119 @@
 	return mask;
 }
 
+static int mhi_uci_alloc_write_reqs(struct uci_client *client)
+{
+	int i;
+
+	client->wreqs = kcalloc(MAX_UCI_WR_REQ,
+				sizeof(struct mhi_req),
+				GFP_KERNEL);
+	if (!client->wreqs) {
+		uci_log(UCI_DBG_ERROR, "Write reqs alloc failed\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&client->wr_req_list);
+	for (i = 0; i < MAX_UCI_WR_REQ; ++i)
+		list_add_tail(&client->wreqs[i].list, &client->wr_req_list);
+
+	uci_log(UCI_DBG_INFO,
+		"UCI write reqs allocation successful\n");
+	return 0;
+}
+
+static int mhi_uci_read_async(struct uci_client *uci_handle,
+			struct mhi_req *ureq, int *bytes_avail)
+{
+	int ret_val = 0;
+	unsigned long compl_ret;
+
+	uci_log(UCI_DBG_ERROR,
+		"Async read for ch %d\n", uci_handle->in_chan);
+
+	ureq->mode = IPA_DMA_ASYNC;
+	ureq->client_cb = mhi_uci_read_completion_cb;
+	ureq->snd_cmpl = 1;
+	ureq->context = uci_handle;
+
+	reinit_completion(&uci_handle->read_done);
+
+	*bytes_avail = mhi_dev_read_channel(ureq);
+	uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%x bytes_read = 0x%x\n",
+		ureq->len, *bytes_avail);
+	if (*bytes_avail < 0) {
+		uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n",
+			*bytes_avail);
+		return -EIO;
+	}
+
+	if (*bytes_avail > 0) {
+		uci_log(UCI_DBG_VERBOSE,
+			"Waiting for async read completion!\n");
+		compl_ret =
+			wait_for_completion_interruptible_timeout(
+			&uci_handle->read_done,
+			MHI_UCI_ASYNC_READ_TIMEOUT);
+
+		if (compl_ret == -ERESTARTSYS) {
+			uci_log(UCI_DBG_ERROR, "Exit signal caught\n");
+			return compl_ret;
+		} else if (compl_ret == 0) {
+			uci_log(UCI_DBG_ERROR, "Read timed out for ch %d\n",
+				uci_handle->in_chan);
+			return -EIO;
+		}
+		uci_log(UCI_DBG_VERBOSE,
+			"wk up Read completed on ch %d\n", ureq->chan);
+
+		uci_handle->pkt_loc = (void *)ureq->buf;
+		uci_handle->pkt_size = ureq->actual_len;
+
+		uci_log(UCI_DBG_VERBOSE,
+			"Got pkt of sz 0x%x at adr %pK, ch %d\n",
+			uci_handle->pkt_size,
+			ureq->buf, ureq->chan);
+	} else {
+		uci_handle->pkt_loc = NULL;
+		uci_handle->pkt_size = 0;
+	}
+
+	return ret_val;
+}
+
+static int mhi_uci_read_sync(struct uci_client *uci_handle,
+			struct mhi_req *ureq, int *bytes_avail)
+{
+	int ret_val = 0;
+
+	ureq->mode = IPA_DMA_SYNC;
+	*bytes_avail = mhi_dev_read_channel(ureq);
+
+	uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%x bytes_read = 0x%x\n",
+		ureq->len, *bytes_avail);
+
+	if (*bytes_avail < 0) {
+		uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n",
+			*bytes_avail);
+		return -EIO;
+	}
+
+	if (*bytes_avail > 0) {
+		uci_handle->pkt_loc = (void *)ureq->buf;
+		uci_handle->pkt_size = ureq->actual_len;
+
+		uci_log(UCI_DBG_VERBOSE,
+			"Got pkt of sz 0x%x at adr %pK, ch %d\n",
+			uci_handle->pkt_size,
+			ureq->buf, ureq->chan);
+	} else {
+		uci_handle->pkt_loc = NULL;
+		uci_handle->pkt_size = 0;
+	}
+
+	return ret_val;
+}
+
 static int open_client_mhi_channels(struct uci_client *uci_client)
 {
 	int rc = 0;
@@ -300,16 +665,27 @@
 			uci_client->in_chan);
 	mutex_lock(&uci_client->out_chan_lock);
 	mutex_lock(&uci_client->in_chan_lock);
+
+	/* Allocate write requests for async operations */
+	if (!(uci_client->f_flags & O_SYNC)) {
+		rc = mhi_uci_alloc_write_reqs(uci_client);
+		if (rc)
+			goto handle_not_rdy_err;
+		uci_client->send = mhi_uci_send_async;
+		uci_client->read = mhi_uci_read_async;
+	} else {
+		uci_client->send = mhi_uci_send_sync;
+		uci_client->read = mhi_uci_read_sync;
+	}
+
 	uci_log(UCI_DBG_DBG,
 			"Initializing inbound chan %d.\n",
 			uci_client->in_chan);
-
 	rc = mhi_init_read_chan(uci_client, uci_client->in_chan);
-	if (rc < 0) {
+	if (rc < 0)
 		uci_log(UCI_DBG_ERROR,
 			"Failed to init inbound 0x%x, ret 0x%x\n",
 			uci_client->in_chan, rc);
-	}
 
 	rc = mhi_dev_open_channel(uci_client->out_chan,
 			&uci_client->out_handle,
@@ -320,7 +696,6 @@
 	rc = mhi_dev_open_channel(uci_client->in_chan,
 			&uci_client->in_handle,
 			uci_ctxt.event_notifier);
-
 	if (rc < 0) {
 		uci_log(UCI_DBG_ERROR,
 			"Failed to open chan %d, ret 0x%x\n",
@@ -375,6 +750,7 @@
 			return -ENOMEM;
 		}
 		uci_handle->uci_ctxt = &uci_ctxt;
+		uci_handle->f_flags = file_handle->f_flags;
 		if (!atomic_read(&uci_handle->mhi_chans_open)) {
 			uci_log(UCI_DBG_INFO,
 				"Opening channels client %d\n",
@@ -397,20 +773,11 @@
 		struct file *file_handle)
 {
 	struct uci_client *uci_handle = file_handle->private_data;
-	struct mhi_uci_ctxt_t *uci_ctxt;
-	u32 nr_in_bufs = 0;
 	int rc = 0;
-	int in_chan = 0;
-	u32 buf_size = 0;
 
 	if (!uci_handle)
 		return -EINVAL;
 
-	uci_ctxt = uci_handle->uci_ctxt;
-	in_chan = iminor(mhi_inode) + 1;
-	nr_in_bufs = uci_ctxt->chan_attrib[in_chan].nr_trbs;
-	buf_size = uci_ctxt->chan_attrib[in_chan].max_packet_size;
-
 	if (atomic_sub_return(1, &uci_handle->ref_count) == 0) {
 		uci_log(UCI_DBG_DBG,
 				"Last client left, closing channel 0x%x\n",
@@ -418,6 +785,8 @@
 		if (atomic_read(&uci_handle->mhi_chans_open)) {
 			atomic_set(&uci_handle->mhi_chans_open, 0);
 
+			if (!(uci_handle->f_flags & O_SYNC))
+				kfree(uci_handle->wreqs);
 			mutex_lock(&uci_handle->out_chan_lock);
 			rc = mhi_dev_close_channel(uci_handle->out_handle);
 			wake_up(&uci_handle->write_wq);
@@ -553,7 +922,6 @@
 	struct mutex *mutex;
 	ssize_t bytes_copied = 0;
 	u32 addr_offset = 0;
-	void *local_buf = NULL;
 	struct mhi_req ureq;
 
 	if (!file || !ubuf || !uspace_buf_size ||
@@ -569,44 +937,19 @@
 	ureq.client = client_handle;
 	ureq.buf = uci_handle->in_buf_list[0].addr;
 	ureq.len = uci_handle->in_buf_list[0].buf_size;
-	ureq.mode = IPA_DMA_SYNC;
+
 
 	uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n",
 			ureq.chan);
 	do {
 		if (!uci_handle->pkt_loc &&
-				!atomic_read(&uci_ctxt.mhi_disabled)) {
-
-			bytes_avail = mhi_dev_read_channel(&ureq);
-
-			uci_log(UCI_DBG_VERBOSE,
-				"reading from mhi_core local_buf = %p",
-				local_buf);
-			uci_log(UCI_DBG_VERBOSE,
-					"buf_size = 0x%x bytes_read = 0x%x\n",
-					 ureq.len, bytes_avail);
-
-			if (bytes_avail < 0) {
-				uci_log(UCI_DBG_ERROR,
-				"Failed to read channel ret %d\n",
-					bytes_avail);
-				ret_val =  -EIO;
+			!atomic_read(&uci_ctxt.mhi_disabled)) {
+			ret_val = uci_handle->read(uci_handle, &ureq,
+							&bytes_avail);
+			if (ret_val)
 				goto error;
-			}
-
-			if (bytes_avail > 0) {
-				uci_handle->pkt_loc = (void *) ureq.buf;
-				uci_handle->pkt_size = ureq.actual_len;
-
+			if (bytes_avail > 0)
 				*bytes_pending = (loff_t)uci_handle->pkt_size;
-				uci_log(UCI_DBG_VERBOSE,
-					"Got pkt of sz 0x%x at adr %p, ch %d\n",
-					uci_handle->pkt_size,
-					ureq.buf, ureq.chan);
-			} else {
-				uci_handle->pkt_loc = 0;
-				uci_handle->pkt_size = 0;
-			}
 		}
 		if (bytes_avail == 0) {
 
@@ -615,7 +958,10 @@
 				"No data read_data_ready %d, chan %d\n",
 				atomic_read(&uci_handle->read_data_ready),
 				ureq.chan);
-
+			if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY)) {
+				ret_val = -EAGAIN;
+				goto error;
+			}
 			ret_val = wait_event_interruptible(uci_handle->read_wq,
 				(!mhi_dev_channel_isempty(client_handle)));
 
@@ -719,10 +1065,10 @@
 	mutex_lock(&uci_handle->out_chan_lock);
 	while (!ret_val) {
 		ret_val = mhi_uci_send_packet(&uci_handle->out_handle,
-				(void *)buf, count, 1);
+						buf, count);
 		if (ret_val < 0) {
 			uci_log(UCI_DBG_ERROR,
-				"Error while writing data to MHI, chan %d, buf %p, size %d\n",
+				"Error while writing data to MHI, chan %d, buf %pK, size %d\n",
 				chan, (void *)buf, count);
 			ret_val = -EIO;
 			break;
@@ -732,6 +1078,8 @@
 				"No descriptors available, did we poll, chan %d?\n",
 				chan);
 			mutex_unlock(&uci_handle->out_chan_lock);
+			if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY))
+				return -EAGAIN;
 			ret_val = wait_event_interruptible(uci_handle->write_wq,
 				!mhi_dev_channel_isempty(
 					uci_handle->out_handle));
@@ -748,60 +1096,6 @@
 	return ret_val;
 }
 
-static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt)
-{
-	u32 i = 0;
-	u32 data_size = TRB_MAX_DATA_SIZE;
-	u32 index = 0;
-	struct uci_client *client;
-	struct chan_attr *chan_attrib = NULL;
-
-	for (i = 0; i < ARRAY_SIZE(uci_ctxt->chan_attrib); i++) {
-		chan_attrib = &uci_ctxt->chan_attrib[i];
-		switch (i) {
-		case MHI_CLIENT_LOOPBACK_OUT:
-		case MHI_CLIENT_LOOPBACK_IN:
-		case MHI_CLIENT_SAHARA_OUT:
-		case MHI_CLIENT_SAHARA_IN:
-		case MHI_CLIENT_EFS_OUT:
-		case MHI_CLIENT_EFS_IN:
-		case MHI_CLIENT_MBIM_OUT:
-		case MHI_CLIENT_MBIM_IN:
-		case MHI_CLIENT_QMI_OUT:
-		case MHI_CLIENT_QMI_IN:
-		case MHI_CLIENT_IP_CTRL_0_OUT:
-		case MHI_CLIENT_IP_CTRL_0_IN:
-		case MHI_CLIENT_IP_CTRL_1_OUT:
-		case MHI_CLIENT_IP_CTRL_1_IN:
-		case MHI_CLIENT_DUN_OUT:
-		case MHI_CLIENT_DUN_IN:
-			chan_attrib->uci_ownership = 1;
-			break;
-		default:
-			chan_attrib->uci_ownership = 0;
-			break;
-		}
-		if (chan_attrib->uci_ownership) {
-			chan_attrib->chan_id = i;
-			chan_attrib->max_packet_size = data_size;
-			index = CHAN_TO_CLIENT(i);
-			client = &uci_ctxt->client_handles[index];
-			chan_attrib->nr_trbs = 9;
-			client->in_buf_list =
-			      kmalloc(sizeof(struct mhi_dev_iov) *
-					      chan_attrib->nr_trbs,
-					GFP_KERNEL);
-			if (client->in_buf_list == NULL)
-				return -ENOMEM;
-		}
-		if (i % 2 == 0)
-			chan_attrib->dir = MHI_DIR_OUT;
-		else
-			chan_attrib->dir = MHI_DIR_IN;
-	}
-	return 0;
-}
-
 void uci_ctrl_update(struct mhi_dev_client_cb_reason *reason)
 {
 	struct uci_ctrl *uci_ctrl_handle = NULL;
@@ -846,12 +1140,11 @@
 {
 	init_waitqueue_head(&mhi_client->read_wq);
 	init_waitqueue_head(&mhi_client->write_wq);
-	mhi_client->out_chan = index * 2 + 1;
-	mhi_client->in_chan = index * 2;
 	mhi_client->client_index = index;
 
 	mutex_init(&mhi_client->in_chan_lock);
 	mutex_init(&mhi_client->out_chan_lock);
+	spin_lock_init(&mhi_client->wr_req_lock);
 
 	uci_log(UCI_DBG_DBG, "Registering chan %d.\n", mhi_client->out_chan);
 	return 0;
@@ -921,6 +1214,111 @@
 #endif
 };
 
+static int uci_device_create(struct uci_client *client)
+{
+	int r;
+	int n;
+	ssize_t dst_size;
+	unsigned int client_index;
+	static char device_name[MAX_DEVICE_NAME_SIZE];
+
+	client_index = CHAN_TO_CLIENT(client->out_chan);
+	if (uci_ctxt.client_handles[client_index].dev)
+		return -EEXIST;
+
+	cdev_init(&uci_ctxt.cdev[client_index], &mhi_uci_client_fops);
+	uci_ctxt.cdev[client_index].owner = THIS_MODULE;
+	r = cdev_add(&uci_ctxt.cdev[client_index],
+		uci_ctxt.start_ctrl_nr + client_index, 1);
+	if (IS_ERR_VALUE(r)) {
+		uci_log(UCI_DBG_ERROR,
+			"Failed to add cdev for client %d, ret 0x%x\n",
+			client_index, r);
+		return r;
+	}
+	if (!client->in_chan_attr->device_name) {
+		n = snprintf(device_name, sizeof(device_name),
+			DEVICE_NAME "_pipe_%d", CLIENT_TO_CHAN(client_index));
+		if (n >= sizeof(device_name)) {
+			uci_log(UCI_DBG_ERROR, "Device name buf too short\n");
+			r = -E2BIG;
+			goto error;
+		}
+	} else {
+		dst_size = strscpy(device_name,
+				client->in_chan_attr->device_name,
+				sizeof(device_name));
+		if (dst_size <= 0) {
+			uci_log(UCI_DBG_ERROR, "Device name buf too short\n");
+			r = dst_size;
+			goto error;
+		}
+	}
+
+	uci_ctxt.client_handles[client_index].dev =
+		device_create(uci_ctxt.mhi_uci_class, NULL,
+				uci_ctxt.start_ctrl_nr + client_index,
+				NULL, device_name);
+	if (IS_ERR(uci_ctxt.client_handles[client_index].dev)) {
+		uci_log(UCI_DBG_ERROR,
+			"Failed to create device for client %d\n",
+			client_index);
+		r = -EIO;
+		goto error;
+	}
+
+	uci_log(UCI_DBG_INFO,
+		"Created device with class 0x%pK and ctrl number %d\n",
+		uci_ctxt.mhi_uci_class,
+		uci_ctxt.start_ctrl_nr + client_index);
+
+	return 0;
+
+error:
+	cdev_del(&uci_ctxt.cdev[client_index]);
+	return r;
+}
+
+static void mhi_uci_client_cb(struct mhi_dev_client_cb_data *cb_data)
+{
+	struct uci_client *client = cb_data->user_data;
+
+	uci_log(UCI_DBG_VERBOSE, " Rcvd MHI cb for channel %d, state %d\n",
+		cb_data->channel, cb_data->ctrl_info);
+
+	if (cb_data->ctrl_info == MHI_STATE_CONNECTED)
+		uci_device_create(client);
+}
+
+static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt)
+{
+	u32 i;
+	u32 index;
+	struct uci_client *client;
+	const struct chan_attr *chan_attrib;
+
+	for (i = 0; i < ARRAY_SIZE(uci_chan_attr_table); i += 2) {
+		chan_attrib = &uci_chan_attr_table[i];
+		index = CHAN_TO_CLIENT(chan_attrib->chan_id);
+		client = &uci_ctxt->client_handles[index];
+		client->out_chan_attr = chan_attrib;
+		client->in_chan_attr = ++chan_attrib;
+		client->in_chan = index * 2;
+		client->out_chan = index * 2 + 1;
+		client->in_buf_list =
+			kcalloc(chan_attrib->nr_trbs,
+			sizeof(struct mhi_dev_iov),
+			GFP_KERNEL);
+		if (!client->in_buf_list)
+			return -ENOMEM;
+		/* Register callback with MHI if requested */
+		if (client->out_chan_attr->register_cb)
+			mhi_register_state_cb(mhi_uci_client_cb, client,
+						client->out_chan);
+	}
+	return 0;
+}
+
 int mhi_uci_init(void)
 {
 	u32 i = 0;
@@ -949,16 +1347,14 @@
 	uci_log(UCI_DBG_INFO, "Registering for MHI events.\n");
 
 	for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
-		if (uci_ctxt.chan_attrib[i * 2].uci_ownership) {
-			mhi_client = &uci_ctxt.client_handles[i];
-
-			r = mhi_register_client(mhi_client, i);
-
-			if (r) {
-				uci_log(UCI_DBG_CRITICAL,
-					"Failed to reg client %d ret %d\n",
-					r, i);
-			}
+		mhi_client = &uci_ctxt.client_handles[i];
+		if (!mhi_client->in_chan_attr)
+			continue;
+		r = mhi_register_client(mhi_client, i);
+		if (r) {
+			uci_log(UCI_DBG_CRITICAL,
+				"Failed to reg client %d ret %d\n",
+				r, i);
 		}
 	}
 
@@ -992,30 +1388,19 @@
 
 	uci_log(UCI_DBG_INFO, "Setting up device nodes.\n");
 	for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
-		if (uci_ctxt.chan_attrib[i*2].uci_ownership) {
-			cdev_init(&uci_ctxt.cdev[i], &mhi_uci_client_fops);
-			uci_ctxt.cdev[i].owner = THIS_MODULE;
-			r = cdev_add(&uci_ctxt.cdev[i],
-					uci_ctxt.start_ctrl_nr + i, 1);
-			if (IS_ERR_VALUE(r)) {
-				uci_log(UCI_DBG_ERROR,
-					"Failed to add cdev %d, ret 0x%x\n",
-					i, r);
-				goto failed_char_add;
-			}
-
-			uci_ctxt.client_handles[i].dev =
-				device_create(uci_ctxt.mhi_uci_class, NULL,
-						uci_ctxt.start_ctrl_nr + i,
-						NULL, DEVICE_NAME "_pipe_%d",
-						i * 2);
-			if (IS_ERR(uci_ctxt.client_handles[i].dev)) {
-				uci_log(UCI_DBG_ERROR,
-						"Failed to add cdev %d\n", i);
-				cdev_del(&uci_ctxt.cdev[i]);
-				goto failed_device_create;
-			}
-		}
+		mhi_client = &uci_ctxt.client_handles[i];
+		if (!mhi_client->in_chan_attr)
+			continue;
+		/*
+		 * Delay device node creation until the callback for
+		 * this client's channels is called by the MHI driver,
+		 * if one is registered.
+		 */
+		if (mhi_client->in_chan_attr->register_cb)
+			continue;
+		ret_val = uci_device_create(mhi_client);
+		if (ret_val)
+			goto failed_device_create;
 	}
 
 	/* Control node */
@@ -1052,7 +1437,6 @@
 
 	return 0;
 
-failed_char_add:
 failed_device_create:
 	while (--i >= 0) {
 		cdev_del(&uci_ctxt.cdev[i]);
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 6ce2161..267ed8d 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -317,7 +317,9 @@
 
 static int geni_se_select_dma_mode(void __iomem *base)
 {
+	int proto = get_se_proto(base);
 	unsigned int geni_dma_mode = 0;
+	unsigned int common_geni_m_irq_en;
 
 	geni_write_reg(0, base, SE_GSI_EVENT_EN);
 	geni_write_reg(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR);
@@ -326,6 +328,12 @@
 	geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR);
 	geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN);
 
+	common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
+	if (proto != UART)
+		common_geni_m_irq_en &=
+			~(M_TX_FIFO_WATERMARK_EN | M_RX_FIFO_WATERMARK_EN);
+
+	geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
 	geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
 	geni_dma_mode |= GENI_DMA_MODE_EN;
 	geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
@@ -667,9 +675,9 @@
 						geni_se_dev->cur_ab,
 						geni_se_dev->cur_ib);
 	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
-		    "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
-		    geni_se_dev->cur_ab, geni_se_dev->cur_ib,
-		    rsc->ab, rsc->ib, bus_bw_update);
+			"%s: %s: cur_ab_ib(%lu:%lu) req_ab_ib(%lu:%lu) %d\n",
+			__func__, dev_name(rsc->ctrl_dev), geni_se_dev->cur_ab,
+			geni_se_dev->cur_ib, rsc->ab, rsc->ib, bus_bw_update);
 	mutex_unlock(&geni_se_dev->geni_dev_lock);
 	return ret;
 }
@@ -765,9 +773,9 @@
 						geni_se_dev->cur_ab,
 						geni_se_dev->cur_ib);
 	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
-		    "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
-		    geni_se_dev->cur_ab, geni_se_dev->cur_ib,
-		    rsc->ab, rsc->ib, bus_bw_update);
+			"%s: %s: cur_ab_ib(%lu:%lu) req_ab_ib(%lu:%lu) %d\n",
+			__func__, dev_name(rsc->ctrl_dev), geni_se_dev->cur_ab,
+			geni_se_dev->cur_ib, rsc->ab, rsc->ib, bus_bw_update);
 	mutex_unlock(&geni_se_dev->geni_dev_lock);
 	return ret;
 }
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 9374bc8..aafb5c8 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -1119,7 +1119,7 @@
 					&ctx->usb_bam_connections[idx];
 	unsigned long peer_bam_handle;
 
-	ret = sps_phy2h(pipe_connect->dst_phy_addr, &peer_bam_handle);
+	ret = sps_phy2h(pipe_connect->src_phy_addr, &peer_bam_handle);
 	if (ret) {
 		log_event_err("%s: sps_phy2h failed (src BAM) %d\n",
 				__func__, ret);
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index 1871602..c098328 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -145,8 +145,10 @@
 {
 	struct asus_wireless_data *data = acpi_driver_data(adev);
 
-	if (data->wq)
+	if (data->wq) {
+		devm_led_classdev_unregister(&adev->dev, &data->led);
 		destroy_workqueue(data->wq);
+	}
 	return 0;
 }
 
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 73e2f0b..c4770a9 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1569,6 +1569,11 @@
 		acpi_id =
 			acpi_match_device(client->dev.driver->acpi_match_table,
 					  &client->dev);
+		if (!acpi_id) {
+			dev_err(&client->dev, "failed to match device name\n");
+			ret = -ENODEV;
+			goto error_1;
+		}
 		name = kasprintf(GFP_KERNEL, "%s-%d", acpi_id->id, num);
 	}
 	if (!name) {
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 3dd1722..8281c41 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -331,6 +331,14 @@
 	POWER_SUPPLY_ATTR(allow_hvdcp3),
 	POWER_SUPPLY_ATTR(hvdcp_opti_allowed),
 	POWER_SUPPLY_ATTR(max_pulse_allowed),
+	POWER_SUPPLY_ATTR(ignore_false_negative_isense),
+	POWER_SUPPLY_ATTR(battery_info),
+	POWER_SUPPLY_ATTR(battery_info_id),
+	POWER_SUPPLY_ATTR(enable_jeita_detection),
+	POWER_SUPPLY_ATTR(esr_actual),
+	POWER_SUPPLY_ATTR(esr_nominal),
+	POWER_SUPPLY_ATTR(soh),
+	POWER_SUPPLY_ATTR(qc_opti_disable),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 275b982..e8d91ae 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -78,6 +78,7 @@
 	struct class		qcom_batt_class;
 	struct wakeup_source	*pl_ws;
 	struct notifier_block	nb;
+	bool			pl_disable;
 };
 
 struct pl_data *the_chip;
@@ -88,6 +89,7 @@
 
 enum {
 	AICL_RERUN_WA_BIT	= BIT(0),
+	FORCE_INOV_DISABLE_BIT	= BIT(1),
 };
 
 static int debug_mask;
@@ -848,6 +850,12 @@
 		chip->pl_settled_ua = 0;
 	}
 
+	/* notify parallel state change */
+	if (chip->pl_psy && (chip->pl_disable != pl_disable)) {
+		power_supply_changed(chip->pl_psy);
+		chip->pl_disable = (bool)pl_disable;
+	}
+
 	pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
 		   pl_disable ? "disabled" : "enabled");
 
@@ -916,7 +924,8 @@
 	chip->pl_mode = pval.intval;
 
 	/* Disable autonomous votage increments for USBIN-USBIN */
-	if (IS_USBIN(chip->pl_mode)) {
+	if (IS_USBIN(chip->pl_mode)
+			&& (chip->wa_flags & FORCE_INOV_DISABLE_BIT)) {
 		if (!chip->hvdcp_hw_inov_dis_votable)
 			chip->hvdcp_hw_inov_dis_votable =
 					find_votable("HVDCP_HW_INOV_DIS");
@@ -1041,7 +1050,6 @@
 	else
 		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
 
-	rerun_election(chip->fcc_votable);
 
 	if (IS_USBIN(chip->pl_mode)) {
 		/*
@@ -1200,7 +1208,9 @@
 	switch (smb_version) {
 	case PMI8998_SUBTYPE:
 	case PM660_SUBTYPE:
-		chip->wa_flags = AICL_RERUN_WA_BIT;
+		chip->wa_flags = AICL_RERUN_WA_BIT | FORCE_INOV_DISABLE_BIT;
+		break;
+	case PMI632_SUBTYPE:
 		break;
 	default:
 		break;
@@ -1301,6 +1311,7 @@
 		goto unreg_notifier;
 	}
 
+	chip->pl_disable = true;
 	chip->qcom_batt_class.name = "qcom-battery",
 	chip->qcom_batt_class.owner = THIS_MODULE,
 	chip->qcom_batt_class.class_attrs = pl_attributes;
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index f1ce5b3..f3f2c66 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -16,11 +16,38 @@
 #include <linux/kernel.h>
 #include <linux/mutex.h>
 #include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
 #include "fg-alg.h"
 
 #define FULL_SOC_RAW		255
+#define FULL_BATT_SOC		GENMASK(31, 0)
 #define CAPACITY_DELTA_DECIPCT	500
 
+#define CENTI_ICORRECT_C0	105
+#define CENTI_ICORRECT_C1	20
+
+#define HOURS_TO_SECONDS	3600
+#define OCV_SLOPE_UV		10869
+#define MILLI_UNIT		1000
+#define MICRO_UNIT		1000000
+#define NANO_UNIT		1000000000
+
+#define DEFAULT_TTF_RUN_PERIOD_MS	10000
+#define DEFAULT_TTF_ITERM_DELTA_MA	200
+
+static const struct ttf_pt ttf_ln_table[] = {
+	{ 1000,		0 },
+	{ 2000,		693 },
+	{ 4000,		1386 },
+	{ 6000,		1792 },
+	{ 8000,		2079 },
+	{ 16000,	2773 },
+	{ 32000,	3466 },
+	{ 64000,	4159 },
+	{ 128000,	4852 },
+};
+
 /* Cycle counter APIs */
 
 /**
@@ -164,13 +191,13 @@
 }
 
 /**
- * get_cycle_count -
+ * get_bucket_cycle_count -
  * @counter: Cycle counter object
  *
  * Returns the cycle counter for a SOC bucket.
  *
  */
-int get_cycle_count(struct cycle_counter *counter)
+static int get_bucket_cycle_count(struct cycle_counter *counter)
 {
 	int count;
 
@@ -187,6 +214,68 @@
 }
 
 /**
+ * get_cycle_counts -
+ * @counter: Cycle counter object
+ * @buf: Bucket cycle counts formatted in a string returned to the caller
+ *
+ * Get cycle count for all buckets in a string format
+ */
+int get_cycle_counts(struct cycle_counter *counter, const char **buf)
+{
+	int i, rc, len = 0;
+
+	for (i = 1; i <= BUCKET_COUNT; i++) {
+		counter->id = i;
+		rc = get_bucket_cycle_count(counter);
+		if (rc < 0) {
+			pr_err("Couldn't get cycle count rc=%d\n", rc);
+			return rc;
+		}
+
+		if (sizeof(counter->str_buf) - len < 8) {
+			pr_err("Invalid length %d\n", len);
+			return -EINVAL;
+		}
+
+		len += snprintf(counter->str_buf+len, 8, "%d ", rc);
+	}
+
+	counter->str_buf[len] = '\0';
+	*buf = counter->str_buf;
+	return 0;
+}
+
+/**
+ * get_cycle_count -
+ * @counter: Cycle counter object
+ * @count: Average cycle count returned to the caller
+ *
+ * Get average cycle count for all buckets
+ */
+int get_cycle_count(struct cycle_counter *counter, int *count)
+{
+	int i, rc, temp = 0;
+
+	for (i = 1; i <= BUCKET_COUNT; i++) {
+		counter->id = i;
+		rc = get_bucket_cycle_count(counter);
+		if (rc < 0) {
+			pr_err("Couldn't get cycle count rc=%d\n", rc);
+			return rc;
+		}
+
+		temp += rc;
+	}
+
+	/*
+	 * Normalize the counter across each bucket so that we can get
+	 * the overall charge cycle count.
+	 */
+	*count = temp / BUCKET_COUNT;
+	return 0;
+}
+
+/**
  * cycle_count_init -
  * @counter: Cycle counter object
  *
@@ -289,7 +378,7 @@
  */
 static int cap_learning_process_full_data(struct cap_learning *cl)
 {
-	int rc, cc_soc_sw, cc_soc_delta_pct;
+	int rc, cc_soc_sw, cc_soc_delta_centi_pct;
 	int64_t delta_cap_uah;
 
 	rc = cl->get_cc_soc(cl->data, &cc_soc_sw);
@@ -298,20 +387,21 @@
 		return rc;
 	}
 
-	cc_soc_delta_pct =
-		div64_s64((int64_t)(cc_soc_sw - cl->init_cc_soc_sw) * 100,
+	cc_soc_delta_centi_pct =
+		div64_s64((int64_t)(cc_soc_sw - cl->init_cc_soc_sw) * 10000,
 			cl->cc_soc_max);
 
 	/* If the delta is < 50%, then skip processing full data */
-	if (cc_soc_delta_pct < 50) {
-		pr_err("cc_soc_delta_pct: %d\n", cc_soc_delta_pct);
+	if (cc_soc_delta_centi_pct < 5000) {
+		pr_err("cc_soc_delta_centi_pct: %d\n", cc_soc_delta_centi_pct);
 		return -ERANGE;
 	}
 
-	delta_cap_uah = div64_s64(cl->learned_cap_uah * cc_soc_delta_pct, 100);
+	delta_cap_uah = div64_s64(cl->learned_cap_uah * cc_soc_delta_centi_pct,
+				 10000);
 	cl->final_cap_uah = cl->init_cap_uah + delta_cap_uah;
-	pr_debug("Current cc_soc=%d cc_soc_delta_pct=%d total_cap_uah=%lld\n",
-		cc_soc_sw, cc_soc_delta_pct, cl->final_cap_uah);
+	pr_debug("Current cc_soc=%d cc_soc_delta_centi_pct=%d total_cap_uah=%lld\n",
+		cc_soc_sw, cc_soc_delta_centi_pct, cl->final_cap_uah);
 	return 0;
 }
 
@@ -339,8 +429,8 @@
 		return -EINVAL;
 	}
 
-	cl->init_cap_uah = div64_s64(cl->learned_cap_uah * batt_soc_msb,
-					FULL_SOC_RAW);
+	cl->init_cap_uah = div64_s64(cl->learned_cap_uah * batt_soc,
+					FULL_BATT_SOC);
 
 	if (cl->prime_cc_soc) {
 		/*
@@ -606,3 +696,557 @@
 	mutex_init(&cl->lock);
 	return 0;
 }
+
+/* Time to full/empty algorithm  helper functions */
+
+static void ttf_circ_buf_add(struct ttf_circ_buf *buf, int val)
+{
+	buf->arr[buf->head] = val;
+	buf->head = (buf->head + 1) % ARRAY_SIZE(buf->arr);
+	buf->size = min(++buf->size, (int)ARRAY_SIZE(buf->arr));
+}
+
+static void ttf_circ_buf_clr(struct ttf_circ_buf *buf)
+{
+	buf->size = 0;
+	buf->head = 0;
+	memset(buf->arr, 0, sizeof(buf->arr));
+}
+
+static int cmp_int(const void *a, const void *b)
+{
+	return *(int *)a - *(int *)b;
+}
+
+static int ttf_circ_buf_median(struct ttf_circ_buf *buf, int *median)
+{
+	int *temp;
+
+	if (buf->size == 0)
+		return -ENODATA;
+
+	if (buf->size == 1) {
+		*median = buf->arr[0];
+		return 0;
+	}
+
+	temp = kmalloc_array(buf->size, sizeof(*temp), GFP_KERNEL);
+	if (!temp)
+		return -ENOMEM;
+
+	memcpy(temp, buf->arr, buf->size * sizeof(*temp));
+	sort(temp, buf->size, sizeof(*temp), cmp_int, NULL);
+
+	if (buf->size % 2)
+		*median = temp[buf->size / 2];
+	else
+		*median = (temp[buf->size / 2 - 1] + temp[buf->size / 2]) / 2;
+
+	kfree(temp);
+	return 0;
+}
+
+static int ttf_lerp(const struct ttf_pt *pts, size_t tablesize,
+						s32 input, s32 *output)
+{
+	int i;
+	s64 temp;
+
+	if (pts == NULL) {
+		pr_err("Table is NULL\n");
+		return -EINVAL;
+	}
+
+	if (tablesize < 1) {
+		pr_err("Table has no entries\n");
+		return -ENOENT;
+	}
+
+	if (tablesize == 1) {
+		*output = pts[0].y;
+		return 0;
+	}
+
+	if (pts[0].x > pts[1].x) {
+		pr_err("Table is not in acending order\n");
+		return -EINVAL;
+	}
+
+	if (input <= pts[0].x) {
+		*output = pts[0].y;
+		return 0;
+	}
+
+	if (input >= pts[tablesize - 1].x) {
+		*output = pts[tablesize - 1].y;
+		return 0;
+	}
+
+	for (i = 1; i < tablesize; i++) {
+		if (input >= pts[i].x)
+			continue;
+
+		temp = ((s64)pts[i].y - pts[i - 1].y) *
+						((s64)input - pts[i - 1].x);
+		temp = div_s64(temp, pts[i].x - pts[i - 1].x);
+		*output = temp + pts[i - 1].y;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int get_time_to_full_locked(struct ttf *ttf, int *val)
+{
+	int rc, ibatt_avg, vbatt_avg, rbatt = 0, msoc = 0, act_cap_mah = 0,
+		i_cc2cv = 0, soc_cc2cv, tau, divisor, iterm = 0, ttf_mode = 0,
+		i, soc_per_step, msoc_this_step, msoc_next_step,
+		ibatt_this_step, t_predicted_this_step, ttf_slope,
+		t_predicted_cv, t_predicted = 0, charge_type = 0,
+		float_volt_uv = 0, valid = 0, charge_status = 0;
+	s64 delta_ms;
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_VALID, &valid);
+	if (rc < 0) {
+		pr_err("failed to get ttf_valid rc=%d\n", rc);
+		return rc;
+	}
+
+	if (!valid) {
+		*val = -EINVAL;
+		return 0;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_STATUS, &charge_status);
+	if (rc < 0) {
+		pr_err("failed to get charge-status rc=%d\n", rc);
+		return rc;
+	}
+
+	if (charge_status != POWER_SUPPLY_STATUS_CHARGING) {
+		*val = -EINVAL;
+		return 0;
+	}
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_MSOC, &msoc);
+	if (rc < 0) {
+		pr_err("failed to get msoc rc=%d\n", rc);
+		return rc;
+	}
+	pr_debug("TTF: msoc=%d\n", msoc);
+
+	/* the battery is considered full if the SOC is 100% */
+	if (msoc >= 100) {
+		*val = 0;
+		return 0;
+	}
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_MODE, &ttf_mode);
+
+	/* when switching TTF algorithms the TTF needs to be reset */
+	if (ttf->mode != ttf_mode) {
+		ttf_circ_buf_clr(&ttf->ibatt);
+		ttf_circ_buf_clr(&ttf->vbatt);
+		ttf->last_ttf = 0;
+		ttf->last_ms = 0;
+		ttf->mode = ttf_mode;
+	}
+
+	/* at least 10 samples are required to produce a stable IBATT */
+	if (ttf->ibatt.size < MAX_TTF_SAMPLES) {
+		*val = -1;
+		return 0;
+	}
+
+	rc = ttf_circ_buf_median(&ttf->ibatt, &ibatt_avg);
+	if (rc < 0) {
+		pr_err("failed to get IBATT AVG rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = ttf_circ_buf_median(&ttf->vbatt, &vbatt_avg);
+	if (rc < 0) {
+		pr_err("failed to get VBATT AVG rc=%d\n", rc);
+		return rc;
+	}
+
+	ibatt_avg = -ibatt_avg / MILLI_UNIT;
+	vbatt_avg /= MILLI_UNIT;
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_ITERM, &iterm);
+	if (rc < 0) {
+		pr_err("failed to get iterm rc=%d\n", rc);
+		return rc;
+	}
+	/* clamp ibatt_avg to iterm */
+	if (ibatt_avg < abs(iterm))
+		ibatt_avg = abs(iterm);
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_RBATT, &rbatt);
+	if (rc < 0) {
+		pr_err("failed to get battery resistance rc=%d\n", rc);
+		return rc;
+	}
+	rbatt /= MILLI_UNIT;
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_FCC, &act_cap_mah);
+	if (rc < 0) {
+		pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_debug(" TTF: ibatt_avg=%d vbatt_avg=%d rbatt=%d act_cap_mah=%d\n",
+				ibatt_avg, vbatt_avg, rbatt, act_cap_mah);
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_VFLOAT, &float_volt_uv);
+	if (rc < 0) {
+		pr_err("failed to get float_volt_uv rc=%d\n", rc);
+		return rc;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_TYPE, &charge_type);
+	if (rc < 0) {
+		pr_err("failed to get charge_type rc=%d\n", rc);
+		return rc;
+	}
+	/* estimated battery current at the CC to CV transition */
+	switch (ttf->mode) {
+	case TTF_MODE_NORMAL:
+		i_cc2cv = ibatt_avg * vbatt_avg /
+			max(MILLI_UNIT, float_volt_uv / MILLI_UNIT);
+		break;
+	case TTF_MODE_QNOVO:
+		i_cc2cv = min(
+			ttf->cc_step.arr[MAX_CC_STEPS - 1] / MILLI_UNIT,
+			ibatt_avg * vbatt_avg /
+			max(MILLI_UNIT, float_volt_uv / MILLI_UNIT));
+		break;
+	default:
+		pr_err("TTF mode %d is not supported\n", ttf->mode);
+		break;
+	}
+	pr_debug("TTF: i_cc2cv=%d\n", i_cc2cv);
+
+	/* if we are already in CV state then we can skip estimating CC */
+	if (charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		goto cv_estimate;
+
+	/* estimated SOC at the CC to CV transition */
+	soc_cc2cv = DIV_ROUND_CLOSEST(rbatt * i_cc2cv, OCV_SLOPE_UV);
+	soc_cc2cv = 100 - soc_cc2cv;
+	pr_debug("TTF: soc_cc2cv=%d\n", soc_cc2cv);
+
+	switch (ttf->mode) {
+	case TTF_MODE_NORMAL:
+		if (soc_cc2cv - msoc <= 0)
+			goto cv_estimate;
+
+		divisor = max(100, (ibatt_avg + i_cc2cv) / 2 * 100);
+		t_predicted = div_s64((s64)act_cap_mah * (soc_cc2cv - msoc) *
+						HOURS_TO_SECONDS, divisor);
+		break;
+	case TTF_MODE_QNOVO:
+		soc_per_step = 100 / MAX_CC_STEPS;
+		for (i = msoc / soc_per_step; i < MAX_CC_STEPS - 1; ++i) {
+			msoc_next_step = (i + 1) * soc_per_step;
+			if (i == msoc / soc_per_step)
+				msoc_this_step = msoc;
+			else
+				msoc_this_step = i * soc_per_step;
+
+			/* scale ibatt by 85% to account for discharge pulses */
+			ibatt_this_step = min(
+					ttf->cc_step.arr[i] / MILLI_UNIT,
+					ibatt_avg) * 85 / 100;
+			divisor = max(100, ibatt_this_step * 100);
+			t_predicted_this_step = div_s64((s64)act_cap_mah *
+					(msoc_next_step - msoc_this_step) *
+					HOURS_TO_SECONDS, divisor);
+			t_predicted += t_predicted_this_step;
+			pr_debug("TTF: [%d, %d] ma=%d t=%d\n",
+				msoc_this_step, msoc_next_step,
+				ibatt_this_step, t_predicted_this_step);
+		}
+		break;
+	default:
+		pr_err("TTF mode %d is not supported\n", ttf->mode);
+		break;
+	}
+
+cv_estimate:
+	pr_debug("TTF: t_predicted_cc=%d\n", t_predicted);
+
+	iterm = max(100, abs(iterm) + ttf->iterm_delta);
+	pr_debug("TTF: iterm=%d\n", iterm);
+
+	if (charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		tau = max(MILLI_UNIT, ibatt_avg * MILLI_UNIT / iterm);
+	else
+		tau = max(MILLI_UNIT, i_cc2cv * MILLI_UNIT / iterm);
+
+	rc = ttf_lerp(ttf_ln_table, ARRAY_SIZE(ttf_ln_table), tau, &tau);
+	if (rc < 0) {
+		pr_err("failed to interpolate tau rc=%d\n", rc);
+		return rc;
+	}
+
+	/* tau is scaled linearly from 95% to 100% SOC */
+	if (msoc >= 95)
+		tau = tau * 2 * (100 - msoc) / 10;
+
+	pr_debug("TTF: tau=%d\n", tau);
+	t_predicted_cv = div_s64((s64)act_cap_mah * rbatt * tau *
+						HOURS_TO_SECONDS, NANO_UNIT);
+	pr_debug("TTF: t_predicted_cv=%d\n", t_predicted_cv);
+	t_predicted += t_predicted_cv;
+
+	pr_debug("TTF: t_predicted_prefilter=%d\n", t_predicted);
+	if (ttf->last_ms != 0) {
+		delta_ms = ktime_ms_delta(ktime_get_boottime(),
+					  ms_to_ktime(ttf->last_ms));
+		if (delta_ms > 10000) {
+			ttf_slope = div64_s64(
+				((s64)t_predicted - ttf->last_ttf) *
+				MICRO_UNIT, delta_ms);
+			if (ttf_slope > -100)
+				ttf_slope = -100;
+			else if (ttf_slope < -2000)
+				ttf_slope = -2000;
+
+			t_predicted = div_s64(
+				(s64)ttf_slope * delta_ms, MICRO_UNIT) +
+				ttf->last_ttf;
+			pr_debug("TTF: ttf_slope=%d\n", ttf_slope);
+		} else {
+			t_predicted = ttf->last_ttf;
+		}
+	}
+
+	/* clamp the ttf to 0 */
+	if (t_predicted < 0)
+		t_predicted = 0;
+
+	pr_debug("TTF: t_predicted_postfilter=%d\n", t_predicted);
+	*val = t_predicted;
+	return 0;
+}
+
+/**
+ * ttf_get_time_to_full -
+ * @ttf: ttf object
+ * @val: Average time to full returned to the caller
+ *
+ * Get Average time to full the battery based on current soc, rbatt
+ * battery voltage and charge current etc.
+ */
+int ttf_get_time_to_full(struct ttf *ttf, int *val)
+{
+	int rc;
+
+	mutex_lock(&ttf->lock);
+	rc = get_time_to_full_locked(ttf, val);
+	mutex_unlock(&ttf->lock);
+
+	return rc;
+}
+
+static void ttf_work(struct work_struct *work)
+{
+	struct ttf *ttf = container_of(work,
+				struct ttf, ttf_work.work);
+	int rc, ibatt_now, vbatt_now, ttf_now, charge_status;
+	ktime_t ktime_now;
+
+	mutex_lock(&ttf->lock);
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_STATUS, &charge_status);
+	if (rc < 0) {
+		pr_err("failed to get charge_status rc=%d\n", rc);
+		goto end_work;
+	}
+	if (charge_status != POWER_SUPPLY_STATUS_CHARGING &&
+			charge_status != POWER_SUPPLY_STATUS_DISCHARGING)
+		goto end_work;
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_IBAT, &ibatt_now);
+	if (rc < 0) {
+		pr_err("failed to get battery current, rc=%d\n", rc);
+		goto end_work;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_VBAT, &vbatt_now);
+	if (rc < 0) {
+		pr_err("failed to get battery voltage, rc=%d\n", rc);
+		goto end_work;
+	}
+
+	ttf_circ_buf_add(&ttf->ibatt, ibatt_now);
+	ttf_circ_buf_add(&ttf->vbatt, vbatt_now);
+
+	if (charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+		rc = get_time_to_full_locked(ttf, &ttf_now);
+		if (rc < 0) {
+			pr_err("failed to get ttf, rc=%d\n", rc);
+			goto end_work;
+		}
+
+		/* keep the wake lock and prime the IBATT and VBATT buffers */
+		if (ttf_now < 0) {
+			/* delay for one FG cycle */
+			schedule_delayed_work(&ttf->ttf_work,
+					msecs_to_jiffies(1000));
+			mutex_unlock(&ttf->lock);
+			return;
+		}
+
+		/* update the TTF reference point every minute */
+		ktime_now = ktime_get_boottime();
+		if (ktime_ms_delta(ktime_now,
+				   ms_to_ktime(ttf->last_ms)) > 60000 ||
+				   ttf->last_ms == 0) {
+			ttf->last_ttf = ttf_now;
+			ttf->last_ms = ktime_to_ms(ktime_now);
+		}
+	}
+
+	/* recurse every 10 seconds */
+	schedule_delayed_work(&ttf->ttf_work, msecs_to_jiffies(ttf->period_ms));
+end_work:
+	ttf->awake_voter(ttf->data, false);
+	mutex_unlock(&ttf->lock);
+}
+
+/**
+ * ttf_get_time_to_empty -
+ * @ttf: ttf object
+ * @val: Average time to empty returned to the caller
+ *
+ * Get Average time to empty the battery based on current soc
+ * and average battery current.
+ */
+int ttf_get_time_to_empty(struct ttf *ttf, int *val)
+{
+	int rc, ibatt_avg, msoc, act_cap_mah, divisor, valid = 0,
+		charge_status = 0;
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_VALID, &valid);
+	if (rc < 0) {
+		pr_err("failed to get ttf_valid rc=%d\n", rc);
+		return rc;
+	}
+
+	if (!valid) {
+		*val = -EINVAL;
+		return 0;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_STATUS, &charge_status);
+	if (rc < 0) {
+		pr_err("failed to get charge-status rc=%d\n", rc);
+		return rc;
+	}
+
+	if (charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+		*val = -EINVAL;
+		return 0;
+	}
+
+	rc = ttf_circ_buf_median(&ttf->ibatt, &ibatt_avg);
+	if (rc < 0) {
+		/* try to get instantaneous current */
+		rc = ttf->get_ttf_param(ttf->data, TTF_IBAT, &ibatt_avg);
+		if (rc < 0) {
+			pr_err("failed to get battery current, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	ibatt_avg /= MILLI_UNIT;
+	/* clamp ibatt_avg to 100mA */
+	if (ibatt_avg < 100)
+		ibatt_avg = 100;
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_MSOC, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_FCC, &act_cap_mah);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		return rc;
+	}
+
+	divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc;
+	divisor = ibatt_avg * divisor / 100;
+	divisor = max(100, divisor);
+	*val = act_cap_mah * msoc * HOURS_TO_SECONDS / divisor;
+
+	pr_debug("TTF: ibatt_avg=%d msoc=%d act_cap_mah=%d TTE=%d\n",
+			ibatt_avg, msoc, act_cap_mah, *val);
+
+	return 0;
+}
+
+/**
+ * ttf_update -
+ * @ttf: ttf object
+ * @input_present: Indicator for input presence
+ *
+ * Called by FG/QG driver when there is a state change (Charging status, SOC)
+ *
+ */
+void ttf_update(struct ttf *ttf, bool input_present)
+{
+	int delay_ms;
+
+	if (ttf->input_present == input_present)
+		return;
+
+	ttf->input_present = input_present;
+	if (input_present)
+		/* wait 35 seconds for the input to settle */
+		delay_ms = 35000;
+	else
+		/* wait 5 seconds for current to settle during discharge */
+		delay_ms = 5000;
+
+	ttf->awake_voter(ttf->data, true);
+	cancel_delayed_work_sync(&ttf->ttf_work);
+	mutex_lock(&ttf->lock);
+	ttf_circ_buf_clr(&ttf->ibatt);
+	ttf_circ_buf_clr(&ttf->vbatt);
+	ttf->last_ttf = 0;
+	ttf->last_ms = 0;
+	mutex_unlock(&ttf->lock);
+	schedule_delayed_work(&ttf->ttf_work, msecs_to_jiffies(delay_ms));
+}
+
+/**
+ * ttf_tte_init -
+ * @ttf: Time to full object
+ *
+ * FG/QG have to call this during driver probe to validate the required
+ * parameters after allocating ttf object.
+ *
+ */
+int ttf_tte_init(struct ttf *ttf)
+{
+	if (!ttf)
+		return -ENODEV;
+
+	if (!ttf->awake_voter || !ttf->get_ttf_param) {
+		pr_err("Insufficient functions for supporting ttf\n");
+		return -EINVAL;
+	}
+
+	if (!ttf->iterm_delta)
+		ttf->iterm_delta = DEFAULT_TTF_ITERM_DELTA_MA;
+	if (!ttf->period_ms)
+		ttf->period_ms = DEFAULT_TTF_RUN_PERIOD_MS;
+
+	mutex_init(&ttf->lock);
+	INIT_DELAYED_WORK(&ttf->ttf_work, ttf_work);
+
+	return 0;
+}
diff --git a/drivers/power/supply/qcom/fg-alg.h b/drivers/power/supply/qcom/fg-alg.h
index 0853ad9..22e9c2b 100644
--- a/drivers/power/supply/qcom/fg-alg.h
+++ b/drivers/power/supply/qcom/fg-alg.h
@@ -15,12 +15,15 @@
 
 #define BUCKET_COUNT		8
 #define BUCKET_SOC_PCT		(256 / BUCKET_COUNT)
+#define MAX_CC_STEPS		20
+#define MAX_TTF_SAMPLES		10
 
 struct cycle_counter {
 	void		*data;
 	bool		started[BUCKET_COUNT];
 	u16		count[BUCKET_COUNT];
 	u8		last_soc[BUCKET_COUNT];
+	char		str_buf[BUCKET_COUNT * 8];
 	int		id;
 	int		last_bucket;
 	struct mutex	lock;
@@ -57,11 +60,64 @@
 	int (*prime_cc_soc)(void *data, u32 cc_soc_sw);
 };
 
+enum ttf_mode {
+	TTF_MODE_NORMAL = 0,
+	TTF_MODE_QNOVO,
+};
+
+enum ttf_param {
+	TTF_MSOC = 0,
+	TTF_VBAT,
+	TTF_IBAT,
+	TTF_FCC,
+	TTF_MODE,
+	TTF_ITERM,
+	TTF_RBATT,
+	TTF_VFLOAT,
+	TTF_CHG_TYPE,
+	TTF_CHG_STATUS,
+	TTF_VALID,
+};
+
+struct ttf_circ_buf {
+	int	arr[MAX_TTF_SAMPLES];
+	int	size;
+	int	head;
+};
+
+struct ttf_cc_step_data {
+	int arr[MAX_CC_STEPS];
+	int sel;
+};
+
+struct ttf_pt {
+	s32 x;
+	s32 y;
+};
+
+struct ttf {
+	void			*data;
+	struct ttf_circ_buf	ibatt;
+	struct ttf_circ_buf	vbatt;
+	struct ttf_cc_step_data	cc_step;
+	struct mutex		lock;
+	int			mode;
+	int			last_ttf;
+	int			input_present;
+	int			iterm_delta;
+	int			period_ms;
+	s64			last_ms;
+	struct delayed_work	ttf_work;
+	int (*get_ttf_param)(void *data, enum ttf_param, int *val);
+	int (*awake_voter)(void *data, bool vote);
+};
+
 int restore_cycle_count(struct cycle_counter *counter);
 void clear_cycle_count(struct cycle_counter *counter);
 void cycle_count_update(struct cycle_counter *counter, int batt_soc,
 		int charge_status, bool charge_done, bool input_present);
-int get_cycle_count(struct cycle_counter *counter);
+int get_cycle_count(struct cycle_counter *counter, int *count);
+int get_cycle_counts(struct cycle_counter *counter, const char **buf);
 int cycle_count_init(struct cycle_counter *counter);
 void cap_learning_abort(struct cap_learning *cl);
 void cap_learning_update(struct cap_learning *cl, int batt_temp,
@@ -70,5 +126,9 @@
 int cap_learning_init(struct cap_learning *cl);
 int cap_learning_post_profile_init(struct cap_learning *cl,
 		int64_t nom_cap_uah);
+void ttf_update(struct ttf *ttf, bool input_present);
+int ttf_get_time_to_empty(struct ttf *ttf, int *val);
+int ttf_get_time_to_full(struct ttf *ttf, int *val);
+int ttf_tte_init(struct ttf *ttf);
 
 #endif
diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c
index 36edd76..00a4533 100644
--- a/drivers/power/supply/qcom/qg-battery-profile.c
+++ b/drivers/power/supply/qcom/qg-battery-profile.c
@@ -398,10 +398,8 @@
 {
 	u8 table_index = charging ? TABLE_SOC_OCV1 : TABLE_SOC_OCV2;
 
-	if (!the_battery || !the_battery->profile) {
-		pr_err("Battery profile not loaded\n");
+	if (!the_battery || !the_battery->profile)
 		return -ENODEV;
-	}
 
 	*soc = interpolate_soc(&the_battery->profile[table_index],
 				batt_temp, UV_TO_DECIUV(ocv_uv));
@@ -416,10 +414,8 @@
 	u8 table_index = charging ? TABLE_FCC1 : TABLE_FCC2;
 	u32 fcc_mah;
 
-	if (!the_battery || !the_battery->profile) {
-		pr_err("Battery profile not loaded\n");
+	if (!the_battery || !the_battery->profile)
 		return -ENODEV;
-	}
 
 	fcc_mah = interpolate_single_row_lut(
 				&the_battery->profile[table_index],
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index 05ca452..f21b2a8 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -14,6 +14,7 @@
 
 #include <linux/kernel.h>
 #include "fg-alg.h"
+#include "qg-defs.h"
 
 struct qg_batt_props {
 	const char		*batt_type_str;
@@ -50,10 +51,25 @@
 	int			rbat_conn_mohm;
 	int			ignore_shutdown_soc_secs;
 	int			cold_temp_threshold;
+	int			esr_qual_i_ua;
+	int			esr_qual_v_uv;
+	int			esr_disable_soc;
 	bool			hold_soc_while_full;
 	bool			linearize_soc;
 	bool			cl_disable;
 	bool			cl_feedback_on;
+	bool			esr_disable;
+	bool			esr_discharge_enable;
+	bool			qg_ext_sense;
+};
+
+struct qg_esr_data {
+	u32			pre_esr_v;
+	u32			pre_esr_i;
+	u32			post_esr_v;
+	u32			post_esr_i;
+	u32			esr;
+	bool			valid;
 };
 
 struct qpnp_qg {
@@ -87,6 +103,7 @@
 	struct power_supply	*batt_psy;
 	struct power_supply	*usb_psy;
 	struct power_supply	*parallel_psy;
+	struct qg_esr_data	esr_data[QG_MAX_ESR_COUNT];
 
 	/* status variable */
 	u32			*debug_mask;
@@ -102,10 +119,18 @@
 	bool			charge_full;
 	int			charge_status;
 	int			charge_type;
+	int			chg_iterm_ma;
 	int			next_wakeup_ms;
+	int			esr_actual;
+	int			esr_nominal;
+	int			soh;
+	int			soc_reporting_ready;
+	u32			fifo_done_count;
 	u32			wa_flags;
 	u32			seq_no;
 	u32			charge_counter_uah;
+	u32			esr_avg;
+	u32			esr_last;
 	ktime_t			last_user_update_time;
 	ktime_t			last_fifo_update_time;
 
@@ -116,6 +141,7 @@
 	int			pon_soc;
 	int			batt_soc;
 	int			cc_soc;
+	int			full_soc;
 	struct alarm		alarm_timer;
 	u32			sdam_data[SDAM_MAX];
 
@@ -126,7 +152,14 @@
 	struct cap_learning	*cl;
 	/* charge counter */
 	struct cycle_counter	*counter;
-	char			counter_buf[BUCKET_COUNT * 8];
+	/* ttf */
+	struct ttf		*ttf;
+};
+
+struct ocv_all {
+	u32 ocv_uv;
+	u32 ocv_raw;
+	char ocv_type[20];
 };
 
 enum ocv_type {
@@ -134,6 +167,7 @@
 	S3_GOOD_OCV,
 	S3_LAST_OCV,
 	SDAM_PON_OCV,
+	PON_OCV_MAX,
 };
 
 enum debug_mask {
@@ -148,6 +182,7 @@
 	QG_DEBUG_BUS_READ	= BIT(8),
 	QG_DEBUG_BUS_WRITE	= BIT(9),
 	QG_DEBUG_ALG_CL		= BIT(10),
+	QG_DEBUG_ESR		= BIT(11),
 };
 
 enum qg_irq {
diff --git a/drivers/power/supply/qcom/qg-defs.h b/drivers/power/supply/qcom/qg-defs.h
index 2061208..997ff70 100644
--- a/drivers/power/supply/qcom/qg-defs.h
+++ b/drivers/power/supply/qcom/qg-defs.h
@@ -34,6 +34,7 @@
 #define GOOD_OCV_VOTER			"GOOD_OCV_VOTER"
 #define PROFILE_IRQ_DISABLE		"NO_PROFILE_IRQ_DISABLE"
 #define QG_INIT_STATE_IRQ_DISABLE	"QG_INIT_STATE_IRQ_DISABLE"
+#define TTF_AWAKE_VOTER			"TTF_AWAKE_VOTER"
 
 #define V_RAW_TO_UV(V_RAW)		div_u64(194637ULL * (u64)V_RAW, 1000)
 #define I_RAW_TO_UA(I_RAW)		div_s64(152588LL * (s64)I_RAW, 1000)
@@ -44,6 +45,9 @@
 #define UV_TO_DECIUV(a)			(a / 100)
 #define DECIUV_TO_UV(a)			(a * 100)
 
+#define QG_MAX_ESR_COUNT		10
+#define QG_MIN_ESR_COUNT		2
+
 #define CAP(min, max, value)			\
 		((min > value) ? min : ((value > max) ? max : value))
 
diff --git a/drivers/power/supply/qcom/qg-reg.h b/drivers/power/supply/qcom/qg-reg.h
index 66f9be1..e0f400d 100644
--- a/drivers/power/supply/qcom/qg-reg.h
+++ b/drivers/power/supply/qcom/qg-reg.h
@@ -17,7 +17,9 @@
 #define QG_TYPE					0x0D
 
 #define QG_STATUS1_REG				0x08
+#define QG_OK_BIT				BIT(7)
 #define BATTERY_PRESENT_BIT			BIT(0)
+#define ESR_MEAS_DONE_BIT			BIT(4)
 
 #define QG_STATUS2_REG				0x09
 #define GOOD_OCV_BIT				BIT(1)
@@ -25,9 +27,13 @@
 #define QG_STATUS3_REG				0x0A
 #define COUNT_FIFO_RT_MASK			GENMASK(3, 0)
 
+#define QG_STATUS4_REG				0x0B
+#define ESR_MEAS_IN_PROGRESS_BIT		BIT(4)
+
 #define QG_INT_RT_STS_REG			0x10
 #define FIFO_UPDATE_DONE_RT_STS_BIT		BIT(3)
 #define VBAT_LOW_INT_RT_STS_BIT			BIT(1)
+#define BATTERY_MISSING_INT_RT_STS_BIT		BIT(0)
 
 #define QG_INT_LATCHED_STS_REG			0x18
 #define FIFO_UPDATE_DONE_INT_LAT_STS_BIT	BIT(3)
@@ -60,11 +66,25 @@
 #define QG_S3_ENTRY_IBAT_THRESHOLD_REG		0x5E
 #define QG_S3_EXIT_IBAT_THRESHOLD_REG		0x5F
 
+#define QG_S5_OCV_VALIDATE_MEAS_CTL1_REG	0x60
+#define ALLOW_S5_BIT				BIT(7)
+
+#define QG_S7_PON_OCV_MEAS_CTL1_REG		0x64
+#define ADC_CONV_DLY_MASK			GENMASK(3, 0)
+
+#define QG_ESR_MEAS_TRIG_REG			0x68
+#define HW_ESR_MEAS_START_BIT			BIT(0)
+
 #define QG_S7_PON_OCV_V_DATA0_REG		0x70
 #define QG_S7_PON_OCV_I_DATA0_REG		0x72
 #define QG_S3_GOOD_OCV_V_DATA0_REG		0x74
 #define QG_S3_GOOD_OCV_I_DATA0_REG		0x76
 
+#define QG_PRE_ESR_V_DATA0_REG			0x78
+#define QG_PRE_ESR_I_DATA0_REG			0x7A
+#define QG_POST_ESR_V_DATA0_REG			0x7C
+#define QG_POST_ESR_I_DATA0_REG			0x7E
+
 #define QG_V_ACCUM_DATA0_RT_REG			0x88
 #define QG_I_ACCUM_DATA0_RT_REG			0x8B
 #define QG_ACCUM_CNT_RT_REG			0x8E
@@ -80,15 +100,22 @@
 #define QG_LAST_S3_SLEEP_V_DATA0_REG		0xCC
 
 /* SDAM offsets */
-#define QG_SDAM_VALID_OFFSET			0x46
-#define QG_SDAM_SOC_OFFSET			0x47
-#define QG_SDAM_TEMP_OFFSET			0x48
-#define QG_SDAM_RBAT_OFFSET			0x4A
-#define QG_SDAM_OCV_OFFSET			0x4C
-#define QG_SDAM_IBAT_OFFSET			0x50
-#define QG_SDAM_TIME_OFFSET			0x54
-#define QG_SDAM_CYCLE_COUNT_OFFSET		0x58
-#define QG_SDAM_LEARNED_CAPACITY_OFFSET		0x68
-#define QG_SDAM_PON_OCV_OFFSET			0x7C
+#define QG_SDAM_VALID_OFFSET			0x46 /* 1-byte 0x46 */
+#define QG_SDAM_SOC_OFFSET			0x47 /* 1-byte 0x47 */
+#define QG_SDAM_TEMP_OFFSET			0x48 /* 2-byte 0x48-0x49 */
+#define QG_SDAM_RBAT_OFFSET			0x4A /* 2-byte 0x4A-0x4B */
+#define QG_SDAM_OCV_OFFSET			0x4C /* 4-byte 0x4C-0x4F */
+#define QG_SDAM_IBAT_OFFSET			0x50 /* 4-byte 0x50-0x53 */
+#define QG_SDAM_TIME_OFFSET			0x54 /* 4-byte 0x54-0x57 */
+#define QG_SDAM_CYCLE_COUNT_OFFSET		0x58 /* 16-byte 0x58-0x67 */
+#define QG_SDAM_LEARNED_CAPACITY_OFFSET		0x68 /* 2-byte 0x68-0x69 */
+#define QG_SDAM_ESR_CHARGE_DELTA_OFFSET		0x6A /* 4-byte 0x6A-0x6D */
+#define QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET	0x6E /* 4-byte 0x6E-0x71 */
+#define QG_SDAM_ESR_CHARGE_SF_OFFSET		0x72 /* 2-byte 0x72-0x73 */
+#define QG_SDAM_ESR_DISCHARGE_SF_OFFSET		0x74 /* 2-byte 0x74-0x75 */
+#define QG_SDAM_MAX_OFFSET			0xA4
+
+/* Below offset is used by PBS */
+#define QG_SDAM_PON_OCV_OFFSET			0xBC /* 2-byte 0xBC-0xBD */
 
 #endif
diff --git a/drivers/power/supply/qcom/qg-sdam.c b/drivers/power/supply/qcom/qg-sdam.c
index 7bc4afa..a7cb97e 100644
--- a/drivers/power/supply/qcom/qg-sdam.c
+++ b/drivers/power/supply/qcom/qg-sdam.c
@@ -68,6 +68,26 @@
 		.offset = QG_SDAM_PON_OCV_OFFSET,
 		.length = 2,
 	},
+	[SDAM_ESR_CHARGE_DELTA] = {
+		.name	= "SDAM_ESR_CHARGE_DELTA",
+		.offset = QG_SDAM_ESR_CHARGE_DELTA_OFFSET,
+		.length = 4,
+	},
+	[SDAM_ESR_DISCHARGE_DELTA] = {
+		.name	= "SDAM_ESR_DISCHARGE_DELTA",
+		.offset = QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET,
+		.length = 4,
+	},
+	[SDAM_ESR_CHARGE_SF] = {
+		.name	= "SDAM_ESR_CHARGE_SF_OFFSET",
+		.offset = QG_SDAM_ESR_CHARGE_SF_OFFSET,
+		.length = 2,
+	},
+	[SDAM_ESR_DISCHARGE_SF] = {
+		.name	= "SDAM_ESR_DISCHARGE_SF_OFFSET",
+		.offset = QG_SDAM_ESR_DISCHARGE_SF_OFFSET,
+		.length = 2,
+	},
 };
 
 int qg_sdam_write(u8 param, u32 data)
@@ -91,7 +111,7 @@
 	length = sdam_info[param].length;
 	rc = regmap_bulk_write(chip->regmap, offset, (u8 *)&data, length);
 	if (rc < 0)
-		pr_err("Failed to write offset=%0x4x param=%d value=%d\n",
+		pr_err("Failed to write offset=%0x4 param=%d value=%d\n",
 					offset, param, data);
 	else
 		pr_debug("QG SDAM write param=%s value=%d\n",
@@ -117,11 +137,12 @@
 		return -EINVAL;
 	}
 
+	*data = 0;
 	offset = chip->sdam_base + sdam_info[param].offset;
 	length = sdam_info[param].length;
 	rc = regmap_raw_read(chip->regmap, offset, (u8 *)data, length);
 	if (rc < 0)
-		pr_err("Failed to read offset=%0x4x param=%d\n",
+		pr_err("Failed to read offset=%0x4 param=%d\n",
 					offset, param);
 	else
 		pr_debug("QG SDAM read param=%s value=%d\n",
@@ -143,11 +164,11 @@
 	offset = chip->sdam_base + offset;
 	rc = regmap_bulk_write(chip->regmap, offset, data, (size_t)length);
 	if (rc < 0) {
-		pr_err("Failed to write offset=%0x4x value=%d\n",
+		pr_err("Failed to write offset=%0x4 value=%d\n",
 					offset, *data);
 	} else {
 		for (i = 0; i < length; i++)
-			pr_debug("QG SDAM write offset=%0x4x value=%d\n",
+			pr_debug("QG SDAM write offset=%0x4 value=%d\n",
 					offset++, data[i]);
 	}
 
@@ -167,10 +188,10 @@
 	offset = chip->sdam_base + offset;
 	rc = regmap_raw_read(chip->regmap, offset, (u8 *)data, (size_t)length);
 	if (rc < 0) {
-		pr_err("Failed to read offset=%0x4x\n", offset);
+		pr_err("Failed to read offset=%0x4\n", offset);
 	} else {
 		for (i = 0; i < length; i++)
-			pr_debug("QG SDAM read offset=%0x4x value=%d\n",
+			pr_debug("QG SDAM read offset=%0x4 value=%d\n",
 					offset++, data[i]);
 	}
 
diff --git a/drivers/power/supply/qcom/qg-sdam.h b/drivers/power/supply/qcom/qg-sdam.h
index 10e684f..45218a8 100644
--- a/drivers/power/supply/qcom/qg-sdam.h
+++ b/drivers/power/supply/qcom/qg-sdam.h
@@ -24,6 +24,10 @@
 	SDAM_IBAT_UA,
 	SDAM_TIME_SEC,
 	SDAM_PON_OCV_UV,
+	SDAM_ESR_CHARGE_DELTA,
+	SDAM_ESR_DISCHARGE_DELTA,
+	SDAM_ESR_CHARGE_SF,
+	SDAM_ESR_DISCHARGE_SF,
 	SDAM_MAX,
 };
 
diff --git a/drivers/power/supply/qcom/qg-util.c b/drivers/power/supply/qcom/qg-util.c
index d354799..824d914 100644
--- a/drivers/power/supply/qcom/qg-util.c
+++ b/drivers/power/supply/qcom/qg-util.c
@@ -111,6 +111,22 @@
 	return rc;
 }
 
+int qg_read_raw_data(struct qpnp_qg *chip, int addr, u32 *data)
+{
+	int rc;
+	u8 reg[2] = {0};
+
+	rc = qg_read(chip, chip->qg_base + addr, &reg[0], 2);
+	if (rc < 0) {
+		pr_err("Failed to read QG addr %d rc=%d\n", addr, rc);
+		return rc;
+	}
+
+	*data = reg[0] | (reg[1] << 8);
+
+	return rc;
+}
+
 int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt)
 {
 	int rc;
diff --git a/drivers/power/supply/qcom/qg-util.h b/drivers/power/supply/qcom/qg-util.h
index 385c9e0..bb17afb 100644
--- a/drivers/power/supply/qcom/qg-util.h
+++ b/drivers/power/supply/qcom/qg-util.h
@@ -15,6 +15,7 @@
 int qg_read(struct qpnp_qg *chip, u32 addr, u8 *val, int len);
 int qg_write(struct qpnp_qg *chip, u32 addr, u8 *val, int len);
 int qg_masked_write(struct qpnp_qg *chip, int addr, u32 mask, u32 val);
+int qg_read_raw_data(struct qpnp_qg *chip, int addr, u32 *data);
 int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt);
 int get_sample_count(struct qpnp_qg *chip, u32 *sample_count);
 int get_sample_interval(struct qpnp_qg *chip, u32 *sample_interval);
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
index 015da41..deccb20 100644
--- a/drivers/power/supply/qcom/qpnp-fg.c
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -33,6 +33,7 @@
 #include <linux/ktime.h>
 #include <linux/power_supply.h>
 #include <linux/of_batterydata.h>
+#include <linux/spinlock.h>
 #include <linux/string_helpers.h>
 #include <linux/alarmtimer.h>
 #include <linux/qpnp/qpnp-revid.h>
@@ -72,6 +73,7 @@
 
 #define QPNP_FG_DEV_NAME "qcom,qpnp-fg"
 #define MEM_IF_TIMEOUT_MS	5000
+#define FG_CYCLE_MS		1500
 #define BUCKET_COUNT		8
 #define BUCKET_SOC_PCT		(256 / BUCKET_COUNT)
 
@@ -108,6 +110,7 @@
 	PMI8950		= 17,
 	PMI8996		= 19,
 	PMI8937		= 55,
+	PMI8940		= 64,
 };
 
 enum wa_flags {
@@ -150,6 +153,8 @@
 	int			min_temp;
 	int			max_temp;
 	int			vbat_est_thr_uv;
+	int			max_cap_limit;
+	int			min_cap_limit;
 };
 
 struct fg_rslow_data {
@@ -275,11 +280,45 @@
 	DATA(BATT_ID_INFO,    0x594,   3,      1,     -EINVAL),
 };
 
+enum fg_mem_backup_index {
+	FG_BACKUP_SOC = 0,
+	FG_BACKUP_CYCLE_COUNT,
+	FG_BACKUP_CC_SOC_COEFF,
+	FG_BACKUP_IGAIN,
+	FG_BACKUP_VCOR,
+	FG_BACKUP_TEMP_COUNTER,
+	FG_BACKUP_AGING_STORAGE,
+	FG_BACKUP_MAH_TO_SOC,
+	FG_BACKUP_MAX,
+};
+
+#define BACKUP(_idx, _address, _offset, _length,  _value)	\
+	[FG_BACKUP_##_idx] = {				\
+		.address = _address,			\
+		.offset = _offset,			\
+		.len = _length,			\
+		.value = _value,			\
+	}						\
+
+static struct fg_mem_data fg_backup_regs[FG_BACKUP_MAX] = {
+	/*       ID           Address, Offset, Length, Value*/
+	BACKUP(SOC,		0x564,   0,      24,     -EINVAL),
+	BACKUP(CYCLE_COUNT,	0x5E8,   0,      16,     -EINVAL),
+	BACKUP(CC_SOC_COEFF,	0x5BC,   0,      8,     -EINVAL),
+	BACKUP(IGAIN,		0x424,   0,      4,     -EINVAL),
+	BACKUP(VCOR,		0x484,   0,      4,     -EINVAL),
+	BACKUP(TEMP_COUNTER,	0x580,   0,      4,     -EINVAL),
+	BACKUP(AGING_STORAGE,	0x5E4,   0,      4,     -EINVAL),
+	BACKUP(MAH_TO_SOC,	0x4A0,   0,      4,     -EINVAL),
+};
+
 static int fg_debug_mask;
 module_param_named(
 	debug_mask, fg_debug_mask, int, 00600
 );
 
+static int fg_reset_on_lockup;
+
 static int fg_sense_type = -EINVAL;
 static int fg_restart;
 
@@ -298,9 +337,18 @@
 	sram_update_period_ms, fg_sram_update_period_ms, int, 00600
 );
 
+static bool fg_batt_valid_ocv;
+module_param_named(batt_valid_ocv, fg_batt_valid_ocv, bool, 0600
+);
+
+static int fg_batt_range_pct;
+module_param_named(batt_range_pct, fg_batt_range_pct, int, 0600
+);
+
 struct fg_irq {
 	int			irq;
-	unsigned long		disabled;
+	bool			disabled;
+	bool			wakeup;
 };
 
 enum fg_soc_irq {
@@ -348,6 +396,16 @@
 	MAX_ADDRESS,
 };
 
+enum batt_info_params {
+	BATT_INFO_NOTIFY = 0,
+	BATT_INFO_SOC,
+	BATT_INFO_RES_ID,
+	BATT_INFO_VOLTAGE,
+	BATT_INFO_TEMP,
+	BATT_INFO_FCC,
+	BATT_INFO_MAX,
+};
+
 struct register_offset {
 	u16 address[MAX_ADDRESS];
 };
@@ -395,6 +453,22 @@
 	}
 }
 
+enum slope_limit_status {
+	LOW_TEMP_CHARGE,
+	HIGH_TEMP_CHARGE,
+	LOW_TEMP_DISCHARGE,
+	HIGH_TEMP_DISCHARGE,
+	SLOPE_LIMIT_MAX,
+};
+
+#define VOLT_GAIN_MAX		3
+struct dischg_gain_soc {
+	bool			enable;
+	u32			soc[VOLT_GAIN_MAX];
+	u32			medc_gain[VOLT_GAIN_MAX];
+	u32			highc_gain[VOLT_GAIN_MAX];
+};
+
 #define THERMAL_COEFF_N_BYTES		6
 struct fg_chip {
 	struct device		*dev;
@@ -420,6 +494,7 @@
 	struct completion	first_soc_done;
 	struct power_supply	*bms_psy;
 	struct power_supply_desc	bms_psy_d;
+	spinlock_t		sec_access_lock;
 	struct mutex		rw_lock;
 	struct mutex		sysfs_restart_lock;
 	struct delayed_work	batt_profile_init;
@@ -449,6 +524,7 @@
 	struct fg_wakeup_source	update_sram_wakeup_source;
 	bool			fg_restarting;
 	bool			profile_loaded;
+	bool			soc_reporting_ready;
 	bool			use_otp_profile;
 	bool			battery_missing;
 	bool			power_supply_registered;
@@ -459,6 +535,7 @@
 	bool			charge_done;
 	bool			resume_soc_lowered;
 	bool			vbat_low_irq_enabled;
+	bool			full_soc_irq_enabled;
 	bool			charge_full;
 	bool			hold_soc_while_full;
 	bool			input_present;
@@ -467,6 +544,10 @@
 	bool			bad_batt_detection_en;
 	bool			bcl_lpm_disabled;
 	bool			charging_disabled;
+	bool			use_vbat_low_empty_soc;
+	bool			fg_shutdown;
+	bool			use_soft_jeita_irq;
+	bool			allow_false_negative_isense;
 	struct delayed_work	update_jeita_setting;
 	struct delayed_work	update_sram_data;
 	struct delayed_work	update_temp_work;
@@ -491,6 +572,7 @@
 	int			prev_status;
 	int			health;
 	enum fg_batt_aging_mode	batt_aging_mode;
+	struct alarm		hard_jeita_alarm;
 	/* capacity learning */
 	struct fg_learning_data	learning_data;
 	struct alarm		fg_cap_learning_alarm;
@@ -498,6 +580,7 @@
 	struct fg_cc_soc_data	sw_cc_soc_data;
 	/* rslow compensation */
 	struct fg_rslow_data	rslow_comp;
+	int			rconn_mohm;
 	/* cycle counter */
 	struct fg_cyc_ctr_data	cyc_ctr;
 	/* iadc compensation */
@@ -510,6 +593,8 @@
 	bool			jeita_hysteresis_support;
 	bool			batt_hot;
 	bool			batt_cold;
+	bool			batt_warm;
+	bool			batt_cool;
 	int			cold_hysteresis;
 	int			hot_hysteresis;
 	/* ESR pulse tuning */
@@ -518,6 +603,47 @@
 	bool			esr_extract_disabled;
 	bool			imptr_pulse_slow_en;
 	bool			esr_pulse_tune_en;
+	/* Slope limiter */
+	struct work_struct	slope_limiter_work;
+	struct fg_wakeup_source	slope_limit_wakeup_source;
+	bool			soc_slope_limiter_en;
+	enum slope_limit_status	slope_limit_sts;
+	u32			slope_limit_temp;
+	u32			slope_limit_coeffs[SLOPE_LIMIT_MAX];
+	/* Discharge soc gain */
+	struct work_struct	dischg_gain_work;
+	struct fg_wakeup_source	dischg_gain_wakeup_source;
+	struct dischg_gain_soc	dischg_gain;
+	/* IMA error recovery */
+	struct completion	fg_reset_done;
+	struct work_struct	ima_error_recovery_work;
+	struct fg_wakeup_source	fg_reset_wakeup_source;
+	struct mutex		ima_recovery_lock;
+	bool			ima_error_handling;
+	bool			block_sram_access;
+	bool			irqs_enabled;
+	bool			use_last_soc;
+	int			last_soc;
+	/* Validating temperature */
+	int			last_good_temp;
+	int			batt_temp_low_limit;
+	int			batt_temp_high_limit;
+	/* Validating CC_SOC */
+	struct work_struct	cc_soc_store_work;
+	struct fg_wakeup_source	cc_soc_wakeup_source;
+	int			cc_soc_limit_pct;
+	bool			use_last_cc_soc;
+	int64_t			last_cc_soc;
+	/* Sanity check */
+	struct delayed_work	check_sanity_work;
+	struct fg_wakeup_source	sanity_wakeup_source;
+	u8			last_beat_count;
+	/* Batt_info restore */
+	int			batt_info[BATT_INFO_MAX];
+	int			batt_info_id;
+	bool			batt_info_restore;
+	bool			*batt_range_ocv;
+	int			*batt_range_pct;
 };
 
 /* FG_MEMIF DEBUGFS structures */
@@ -661,17 +787,56 @@
 	return rc;
 }
 
-static int fg_masked_write(struct fg_chip *chip, u16 addr,
+static int fg_masked_write_raw(struct fg_chip *chip, u16 addr,
 		u8 mask, u8 val, int len)
 {
 	int rc;
 
 	rc = regmap_update_bits(chip->regmap, addr, mask, val);
-	if (rc) {
+	if (rc)
 		pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
-		return rc;
+
+	return rc;
+}
+
+static int fg_masked_write(struct fg_chip *chip, u16 addr,
+		u8 mask, u8 val, int len)
+{
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chip->sec_access_lock, flags);
+	rc = fg_masked_write_raw(chip, addr, mask, val, len);
+	spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+
+	return rc;
+}
+
+#define SEC_ACCESS_OFFSET	0xD0
+#define SEC_ACCESS_VALUE	0xA5
+#define PERIPHERAL_MASK		0xFF
+static int fg_sec_masked_write(struct fg_chip *chip, u16 addr, u8 mask, u8 val,
+		int len)
+{
+	int rc;
+	unsigned long flags;
+	u8 temp;
+	u16 base = addr & (~PERIPHERAL_MASK);
+
+	spin_lock_irqsave(&chip->sec_access_lock, flags);
+	temp = SEC_ACCESS_VALUE;
+	rc = fg_write(chip, &temp, base + SEC_ACCESS_OFFSET, 1);
+	if (rc) {
+		pr_err("Unable to unlock sec_access: %d\n", rc);
+		goto out;
 	}
 
+	rc = fg_masked_write_raw(chip, addr, mask, val, len);
+	if (rc)
+		pr_err("Unable to write securely to address 0x%x: %d", addr,
+			rc);
+out:
+	spin_unlock_irqrestore(&chip->sec_access_lock, flags);
 	return rc;
 }
 
@@ -952,6 +1117,7 @@
 	int rc = 0, user_cnt = 0, sublen;
 	bool access_configured = false;
 	u8 *wr_data = val, word[4];
+	u16 orig_address = address;
 	char str[DEBUG_PRINT_BUFFER_SIZE];
 
 	if (address < RAM_OFFSET)
@@ -960,8 +1126,8 @@
 	if (offset > 3)
 		return -EINVAL;
 
-	address = ((address + offset) / 4) * 4;
-	offset = (address + offset) % 4;
+	address = ((orig_address + offset) / 4) * 4;
+	offset = (orig_address + offset) % 4;
 
 	user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
 	if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
@@ -1061,50 +1227,253 @@
 #define MEM_INTF_IMA_EXP_STS		0x55
 #define MEM_INTF_IMA_HW_STS		0x56
 #define MEM_INTF_IMA_BYTE_EN		0x60
-#define IMA_ADDR_STBL_ERR		BIT(7)
-#define IMA_WR_ACS_ERR			BIT(6)
-#define IMA_RD_ACS_ERR			BIT(5)
 #define IMA_IACS_CLR			BIT(2)
 #define IMA_IACS_RDY			BIT(1)
-static int fg_check_ima_exception(struct fg_chip *chip)
+static int fg_run_iacs_clear_sequence(struct fg_chip *chip)
+{
+	int rc = 0;
+	u8 temp;
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Running IACS clear sequence\n");
+
+	/* clear the error */
+	rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+				IMA_IACS_CLR, IMA_IACS_CLR, 1);
+	if (rc) {
+		pr_err("Error writing to IMA_CFG, rc=%d\n", rc);
+		return rc;
+	}
+
+	temp = 0x4;
+	rc = fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
+	if (rc) {
+		pr_err("Error writing to MEM_INTF_ADDR_MSB, rc=%d\n", rc);
+		return rc;
+	}
+
+	temp = 0x0;
+	rc = fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
+	if (rc) {
+		pr_err("Error writing to WR_DATA3, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
+	if (rc) {
+		pr_err("Error writing to RD_DATA3, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+				IMA_IACS_CLR, 0, 1);
+	if (rc) {
+		pr_err("Error writing to IMA_CFG, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("IACS clear sequence complete!\n");
+	return rc;
+}
+
+#define IACS_ERR_BIT		BIT(0)
+#define XCT_ERR_BIT		BIT(1)
+#define DATA_RD_ERR_BIT		BIT(3)
+#define DATA_WR_ERR_BIT		BIT(4)
+#define ADDR_BURST_WRAP_BIT	BIT(5)
+#define ADDR_RNG_ERR_BIT	BIT(6)
+#define ADDR_SRC_ERR_BIT	BIT(7)
+static int fg_check_ima_exception(struct fg_chip *chip, bool check_hw_sts)
 {
 	int rc = 0, ret = 0;
-	u8 err_sts, exp_sts = 0, hw_sts = 0;
+	u8 err_sts = 0, exp_sts = 0, hw_sts = 0;
+	bool run_err_clr_seq = false;
 
 	rc = fg_read(chip, &err_sts,
 			chip->mem_base + MEM_INTF_IMA_ERR_STS, 1);
 	if (rc) {
-		pr_err("failed to read beat count rc=%d\n", rc);
+		pr_err("failed to read IMA_ERR_STS, rc=%d\n", rc);
 		return rc;
 	}
 
-	if (err_sts & (IMA_ADDR_STBL_ERR | IMA_WR_ACS_ERR | IMA_RD_ACS_ERR)) {
-		u8 temp;
-
-		fg_read(chip, &exp_sts,
+	rc = fg_read(chip, &exp_sts,
 			chip->mem_base + MEM_INTF_IMA_EXP_STS, 1);
-		fg_read(chip, &hw_sts,
-			chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
-		pr_err("IMA access failed ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
-				err_sts, exp_sts, hw_sts);
-		rc = err_sts;
-
-		/* clear the error */
-		ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
-					IMA_IACS_CLR, IMA_IACS_CLR, 1);
-		temp = 0x4;
-		ret |= fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
-		temp = 0x0;
-		ret |= fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
-		ret |= fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
-		ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
-					IMA_IACS_CLR, 0, 1);
-		if (!ret)
-			return -EAGAIN;
-
-		pr_err("Error clearing IMA exception ret=%d\n", ret);
+	if (rc) {
+		pr_err("Error in reading IMA_EXP_STS, rc=%d\n", rc);
+		return rc;
 	}
 
+	rc = fg_read(chip, &hw_sts,
+			chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
+	if (rc) {
+		pr_err("Error in reading IMA_HW_STS, rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_info_once("Initial ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+		err_sts, exp_sts, hw_sts);
+
+	if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+		pr_info("ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+			err_sts, exp_sts, hw_sts);
+
+	if (check_hw_sts) {
+		/*
+		 * Lower nibble should be equal to upper nibble before SRAM
+		 * transactions begins from SW side. If they are unequal, then
+		 * the error clear sequence should be run irrespective of IMA
+		 * exception errors.
+		 */
+		if ((hw_sts & 0x0F) != hw_sts >> 4) {
+			pr_err("IMA HW not in correct state, hw_sts=%x\n",
+				hw_sts);
+			run_err_clr_seq = true;
+		}
+	}
+
+	if (exp_sts & (IACS_ERR_BIT | XCT_ERR_BIT | DATA_RD_ERR_BIT |
+		DATA_WR_ERR_BIT | ADDR_BURST_WRAP_BIT | ADDR_RNG_ERR_BIT |
+		ADDR_SRC_ERR_BIT)) {
+		pr_err("IMA exception bit set, exp_sts=%x\n", exp_sts);
+		run_err_clr_seq = true;
+	}
+
+	if (run_err_clr_seq) {
+		ret = fg_run_iacs_clear_sequence(chip);
+		if (!ret)
+			return -EAGAIN;
+		else
+			pr_err("Error clearing IMA exception ret=%d\n", ret);
+	}
+
+	return rc;
+}
+
+static void fg_enable_irqs(struct fg_chip *chip, bool enable)
+{
+	if (!(enable ^ chip->irqs_enabled))
+		return;
+
+	if (enable) {
+		enable_irq(chip->soc_irq[DELTA_SOC].irq);
+		enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+		if (!chip->full_soc_irq_enabled) {
+			enable_irq(chip->soc_irq[FULL_SOC].irq);
+			enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+			chip->full_soc_irq_enabled = true;
+		}
+		enable_irq(chip->batt_irq[BATT_MISSING].irq);
+		if (!chip->vbat_low_irq_enabled) {
+			enable_irq(chip->batt_irq[VBATT_LOW].irq);
+			enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+			chip->vbat_low_irq_enabled = true;
+		}
+		if (!chip->use_vbat_low_empty_soc) {
+			enable_irq(chip->soc_irq[EMPTY_SOC].irq);
+			enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+		}
+		chip->irqs_enabled = true;
+	} else {
+		disable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+		disable_irq_nosync(chip->soc_irq[DELTA_SOC].irq);
+		if (chip->full_soc_irq_enabled) {
+			disable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+			disable_irq_nosync(chip->soc_irq[FULL_SOC].irq);
+			chip->full_soc_irq_enabled = false;
+		}
+		disable_irq(chip->batt_irq[BATT_MISSING].irq);
+		if (chip->vbat_low_irq_enabled) {
+			disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+			disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+			chip->vbat_low_irq_enabled = false;
+		}
+		if (!chip->use_vbat_low_empty_soc) {
+			disable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+			disable_irq_nosync(chip->soc_irq[EMPTY_SOC].irq);
+		}
+		chip->irqs_enabled = false;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("FG interrupts are %sabled\n", enable ? "en" : "dis");
+}
+
+static void fg_check_ima_error_handling(struct fg_chip *chip)
+{
+	if (chip->ima_error_handling) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("IMA error is handled already!\n");
+		return;
+	}
+	mutex_lock(&chip->ima_recovery_lock);
+	fg_enable_irqs(chip, false);
+	chip->use_last_cc_soc = true;
+	chip->ima_error_handling = true;
+	if (!work_pending(&chip->ima_error_recovery_work))
+		schedule_work(&chip->ima_error_recovery_work);
+	mutex_unlock(&chip->ima_recovery_lock);
+}
+
+#define SOC_ALG_ST		0xCF
+#define FGXCT_PRD		BIT(7)
+#define ALG_ST_CHECK_COUNT	20
+static int fg_check_alg_status(struct fg_chip *chip)
+{
+	int rc = 0, timeout = ALG_ST_CHECK_COUNT, count = 0;
+	u8 ima_opr_sts, alg_sts = 0, temp = 0;
+
+	if (!fg_reset_on_lockup)  {
+		pr_info("FG lockup detection cannot be run\n");
+		return 0;
+	}
+
+	rc = fg_read(chip, &alg_sts, chip->soc_base + SOC_ALG_ST, 1);
+	if (rc) {
+		pr_err("Error in reading SOC_ALG_ST, rc=%d\n", rc);
+		return rc;
+	}
+
+	while (1) {
+		rc = fg_read(chip, &ima_opr_sts,
+			chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
+		if (!rc && !(ima_opr_sts & FGXCT_PRD))
+			break;
+
+		if (rc) {
+			pr_err("Error in reading IMA_OPR_STS, rc=%d\n",
+				rc);
+			break;
+		}
+
+		rc = fg_read(chip, &temp, chip->soc_base + SOC_ALG_ST,
+			1);
+		if (rc) {
+			pr_err("Error in reading SOC_ALG_ST, rc=%d\n",
+				rc);
+			break;
+		}
+
+		if ((ima_opr_sts & FGXCT_PRD) && (temp == alg_sts))
+			count++;
+
+		/* Wait for ~10ms while polling ALG_ST & IMA_OPR_STS */
+		usleep_range(9000, 11000);
+
+		if (!(--timeout))
+			break;
+	}
+
+	if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+		pr_info("ima_opr_sts: %x  alg_sts: %x count=%d\n", ima_opr_sts,
+			alg_sts, count);
+
+	if (count == ALG_ST_CHECK_COUNT) {
+		/* If we are here, that means FG ALG is stuck */
+		pr_err("ALG is stuck\n");
+		fg_check_ima_error_handling(chip);
+		rc = -EBUSY;
+	}
 	return rc;
 }
 
@@ -1122,19 +1491,25 @@
 	while (1) {
 		rc = fg_read(chip, &ima_opr_sts,
 			chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
-		if (!rc && (ima_opr_sts & IMA_IACS_RDY))
+		if (!rc && (ima_opr_sts & IMA_IACS_RDY)) {
 			break;
+		} else {
+			if (!(--timeout) || rc)
+				break;
 
-		if (!(--timeout) || rc)
-			break;
-		/* delay for iacs_ready to be asserted */
-		usleep_range(5000, 7000);
+			/* delay for iacs_ready to be asserted */
+			usleep_range(5000, 7000);
+		}
 	}
 
 	if (!timeout || rc) {
-		pr_err("IACS_RDY not set\n");
+		pr_err("IACS_RDY not set, ima_opr_sts: %x\n", ima_opr_sts);
+		rc = fg_check_alg_status(chip);
+		if (rc && rc != -EBUSY)
+			pr_err("Couldn't check FG ALG status, rc=%d\n",
+				rc);
 		/* perform IACS_CLR sequence */
-		fg_check_ima_exception(chip);
+		fg_check_ima_exception(chip, false);
 		return -EBUSY;
 	}
 
@@ -1154,15 +1529,16 @@
 
 	while (len > 0) {
 		num_bytes = (offset + len) > BUF_LEN ?
-			(BUF_LEN - offset) : len;
+				(BUF_LEN - offset) : len;
 		/* write to byte_enable */
 		for (i = offset; i < (offset + num_bytes); i++)
 			byte_enable |= BIT(i);
 
 		rc = fg_write(chip, &byte_enable,
-			chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
+				chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
 		if (rc) {
-			pr_err("Unable to write to byte_en_reg rc=%d\n", rc);
+			pr_err("Unable to write to byte_en_reg rc=%d\n",
+							rc);
 			return rc;
 		}
 			/* write data */
@@ -1193,12 +1569,13 @@
 
 		rc = fg_check_iacs_ready(chip);
 		if (rc) {
-			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			pr_err("IACS_RDY failed post write to address %x offset %d rc=%d\n",
+				address, offset, rc);
 			return rc;
 		}
 
 		/* check for error condition */
-		rc = fg_check_ima_exception(chip);
+		rc = fg_check_ima_exception(chip, false);
 		if (rc) {
 			pr_err("IMA transaction failed rc=%d", rc);
 			return rc;
@@ -1239,12 +1616,13 @@
 
 		rc = fg_check_iacs_ready(chip);
 		if (rc) {
-			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			pr_err("IACS_RDY failed post read for address %x offset %d rc=%d\n",
+				address, offset, rc);
 			return rc;
 		}
 
 		/* check for error condition */
-		rc = fg_check_ima_exception(chip);
+		rc = fg_check_ima_exception(chip, false);
 		if (rc) {
 			pr_err("IMA transaction failed rc=%d", rc);
 			return rc;
@@ -1296,7 +1674,7 @@
 		 * clear, then return an error instead of waiting for it again.
 		 */
 		if  (time_count > 4) {
-			pr_err("Waited for 1.5 seconds polling RIF_MEM_ACCESS_REQ\n");
+			pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n");
 			return -ETIMEDOUT;
 		}
 
@@ -1322,7 +1700,8 @@
 
 	rc = fg_check_iacs_ready(chip);
 	if (rc) {
-		pr_debug("IACS_RDY failed rc=%d\n", rc);
+		pr_err("IACS_RDY failed before setting address: %x offset: %d rc=%d\n",
+			address, offset, rc);
 		return rc;
 	}
 
@@ -1335,7 +1714,8 @@
 
 	rc = fg_check_iacs_ready(chip);
 	if (rc)
-		pr_debug("IACS_RDY failed rc=%d\n", rc);
+		pr_err("IACS_RDY failed after setting address: %x offset: %d rc=%d\n",
+			address, offset, rc);
 
 	return rc;
 }
@@ -1346,10 +1726,13 @@
 static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
 						int len, int offset)
 {
-	int rc = 0, orig_address = address;
+	int rc = 0, ret, orig_address = address;
 	u8 start_beat_count, end_beat_count, count = 0;
 	bool retry = false;
 
+	if (chip->fg_shutdown)
+		return -EINVAL;
+
 	if (offset > 3) {
 		pr_err("offset too large %d\n", offset);
 		return -EINVAL;
@@ -1372,11 +1755,22 @@
 	}
 
 	mutex_lock(&chip->rw_lock);
+	if (fg_debug_mask & FG_MEM_DEBUG_READS)
+		pr_info("Read for %d bytes is attempted @ 0x%x[%d]\n",
+			len, address, offset);
 
 retry:
+	if (count >= RETRY_COUNT) {
+		pr_err("Retried reading 3 times\n");
+		retry = false;
+		goto out;
+	}
+
 	rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0);
 	if (rc) {
 		pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+		retry = true;
+		count++;
 		goto out;
 	}
 
@@ -1385,18 +1779,21 @@
 			chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
 	if (rc) {
 		pr_err("failed to read beat count rc=%d\n", rc);
+		retry = true;
+		count++;
 		goto out;
 	}
 
 	/* read data */
 	rc = __fg_interleaved_mem_read(chip, val, address, offset, len);
 	if (rc) {
+		count++;
 		if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
-			count++;
 			pr_err("IMA access failed retry_count = %d\n", count);
 			goto retry;
 		} else {
 			pr_err("failed to read SRAM address rc = %d\n", rc);
+			retry = true;
 			goto out;
 		}
 	}
@@ -1406,6 +1803,8 @@
 			chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
 	if (rc) {
 		pr_err("failed to read beat count rc=%d\n", rc);
+		retry = true;
+		count++;
 		goto out;
 	}
 
@@ -1418,12 +1817,13 @@
 		if (fg_debug_mask & FG_MEM_DEBUG_READS)
 			pr_info("Beat count do not match - retry transaction\n");
 		retry = true;
+		count++;
 	}
 out:
 	/* Release IMA access */
-	rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
-	if (rc)
-		pr_err("failed to reset IMA access bit rc = %d\n", rc);
+	ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+	if (ret)
+		pr_err("failed to reset IMA access bit ret = %d\n", ret);
 
 	if (retry) {
 		retry = false;
@@ -1439,8 +1839,12 @@
 static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address,
 							int len, int offset)
 {
-	int rc = 0, orig_address = address;
+	int rc = 0, ret, orig_address = address;
 	u8 count = 0;
+	bool retry = false;
+
+	if (chip->fg_shutdown)
+		return -EINVAL;
 
 	if (address < RAM_OFFSET)
 		return -EINVAL;
@@ -1455,32 +1859,49 @@
 	offset = (orig_address + offset) % 4;
 
 	mutex_lock(&chip->rw_lock);
+	if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+		pr_info("Write for %d bytes is attempted @ 0x%x[%d]\n",
+			len, address, offset);
 
 retry:
+	if (count >= RETRY_COUNT) {
+		pr_err("Retried writing 3 times\n");
+		retry = false;
+		goto out;
+	}
+
 	rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1);
 	if (rc) {
-		pr_err("failed to xonfigure SRAM for IMA rc = %d\n", rc);
+		pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+		retry = true;
+		count++;
 		goto out;
 	}
 
 	/* write data */
 	rc = __fg_interleaved_mem_write(chip, val, address, offset, len);
 	if (rc) {
+		count++;
 		if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
-			count++;
 			pr_err("IMA access failed retry_count = %d\n", count);
 			goto retry;
 		} else {
 			pr_err("failed to write SRAM address rc = %d\n", rc);
+			retry = true;
 			goto out;
 		}
 	}
 
 out:
 	/* Release IMA access */
-	rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
-	if (rc)
-		pr_err("failed to reset IMA access bit rc = %d\n", rc);
+	ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+	if (ret)
+		pr_err("failed to reset IMA access bit ret = %d\n", ret);
+
+	if (retry) {
+		retry = false;
+		goto retry;
+	}
 
 	mutex_unlock(&chip->rw_lock);
 	fg_relax(&chip->memif_wakeup_source);
@@ -1490,6 +1911,9 @@
 static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address,
 			int len, int offset, bool keep_access)
 {
+	if (chip->block_sram_access)
+		return -EBUSY;
+
 	if (chip->ima_supported)
 		return fg_interleaved_mem_read(chip, val, address,
 						len, offset);
@@ -1501,6 +1925,9 @@
 static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address,
 		int len, int offset, bool keep_access)
 {
+	if (chip->block_sram_access)
+		return -EBUSY;
+
 	if (chip->ima_supported)
 		return fg_interleaved_mem_write(chip, val, address,
 						len, offset);
@@ -1538,6 +1965,62 @@
 	return rc;
 }
 
+static u8 sram_backup_buffer[100];
+static int fg_backup_sram_registers(struct fg_chip *chip, bool save)
+{
+	int rc, i, len, offset;
+	u16 address;
+	u8 *ptr;
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("%sing SRAM registers\n", save ? "Back" : "Restor");
+
+	ptr = sram_backup_buffer;
+	for (i = 0; i < FG_BACKUP_MAX; i++) {
+		address = fg_backup_regs[i].address;
+		offset = fg_backup_regs[i].offset;
+		len = fg_backup_regs[i].len;
+		if (save)
+			rc = fg_interleaved_mem_read(chip, ptr, address,
+					len, offset);
+		else
+			rc = fg_interleaved_mem_write(chip, ptr, address,
+					len, offset);
+		if (rc) {
+			pr_err("Error in reading %d bytes from %x[%d], rc=%d\n",
+				len, address, offset, rc);
+			break;
+		}
+		ptr += len;
+	}
+
+	return rc;
+}
+
+#define SOC_FG_RESET	0xF3
+#define RESET_MASK	(BIT(7) | BIT(5))
+static int fg_reset(struct fg_chip *chip, bool reset)
+{
+	int rc;
+
+	rc = fg_sec_masked_write(chip, chip->soc_base + SOC_FG_RESET,
+		0xFF, reset ? RESET_MASK : 0, 1);
+	if (rc)
+		pr_err("Error in writing to 0x%x, rc=%d\n", SOC_FG_RESET, rc);
+
+	return rc;
+}
+
+static void fg_handle_battery_insertion(struct fg_chip *chip)
+{
+	reinit_completion(&chip->batt_id_avail);
+	reinit_completion(&chip->fg_reset_done);
+	schedule_delayed_work(&chip->batt_profile_init, 0);
+	cancel_delayed_work(&chip->update_sram_data);
+	schedule_delayed_work(&chip->update_sram_data, msecs_to_jiffies(0));
+}
+
+
 static int soc_to_setpoint(int soc)
 {
 	return DIV_ROUND_CLOSEST(soc * 255, 100);
@@ -1550,6 +2033,7 @@
 	val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000);
 	data[0] = val & 0xFF;
 	data[1] = val >> 8;
+	return;
 }
 
 static u8 batt_to_setpoint_8b(int vbatt_mv)
@@ -1678,14 +2162,37 @@
 	return rc;
 }
 
+#define VBATT_LOW_STS_BIT BIT(2)
+static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
+{
+	int rc = 0;
+	u8 fg_batt_sts;
+
+	rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
+	if (rc)
+		pr_err("spmi read failed: addr=%03X, rc=%d\n",
+				INT_RT_STS(chip->batt_base), rc);
+	else
+		*vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
+
+	return rc;
+}
+
 #define SOC_EMPTY	BIT(3)
 static bool fg_is_batt_empty(struct fg_chip *chip)
 {
 	u8 fg_soc_sts;
 	int rc;
+	bool vbatt_low_sts;
 
-	rc = fg_read(chip, &fg_soc_sts,
-				 INT_RT_STS(chip->soc_base), 1);
+	if (chip->use_vbat_low_empty_soc) {
+		if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+			return false;
+
+		return vbatt_low_sts;
+	}
+
+	rc = fg_read(chip, &fg_soc_sts, INT_RT_STS(chip->soc_base), 1);
 	if (rc) {
 		pr_err("spmi read failed: addr=%03X, rc=%d\n",
 				INT_RT_STS(chip->soc_base), rc);
@@ -1732,7 +2239,16 @@
 #define FULL_SOC_RAW		0xFF
 static int get_prop_capacity(struct fg_chip *chip)
 {
-	int msoc;
+	int msoc, rc;
+	bool vbatt_low_sts;
+
+	if (chip->use_last_soc && chip->last_soc) {
+		if (chip->last_soc == FULL_SOC_RAW)
+			return FULL_CAPACITY;
+		return DIV_ROUND_CLOSEST((chip->last_soc - 1) *
+				(FULL_CAPACITY - 2),
+				FULL_SOC_RAW - 2) + 1;
+	}
 
 	if (chip->battery_missing)
 		return MISSING_CAPACITY;
@@ -1747,10 +2263,28 @@
 		return EMPTY_CAPACITY;
 	}
 	msoc = get_monotonic_soc_raw(chip);
-	if (msoc == 0)
-		return EMPTY_CAPACITY;
-	else if (msoc == FULL_SOC_RAW)
+	if (msoc == 0) {
+		if (fg_reset_on_lockup && chip->use_vbat_low_empty_soc) {
+			rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
+			if (rc) {
+				pr_err("Error in reading vbatt_status, rc=%d\n",
+					rc);
+				return EMPTY_CAPACITY;
+			}
+
+			if (!vbatt_low_sts)
+				return DIV_ROUND_CLOSEST((chip->last_soc - 1) *
+						(FULL_CAPACITY - 2),
+						FULL_SOC_RAW - 2) + 1;
+			else
+				return EMPTY_CAPACITY;
+		} else {
+			return EMPTY_CAPACITY;
+		}
+	} else if (msoc == FULL_SOC_RAW) {
 		return FULL_CAPACITY;
+	}
+
 	return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2),
 			FULL_SOC_RAW - 2) + 1;
 }
@@ -1843,6 +2377,25 @@
 	return 0;
 }
 
+#define IGNORE_FALSE_NEGATIVE_ISENSE_BIT	BIT(3)
+static int set_prop_ignore_false_negative_isense(struct fg_chip *chip,
+							bool ignore)
+{
+	int rc;
+
+	rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+			IGNORE_FALSE_NEGATIVE_ISENSE_BIT,
+			ignore ? IGNORE_FALSE_NEGATIVE_ISENSE_BIT : 0,
+			EXTERNAL_SENSE_OFFSET);
+	if (rc) {
+		pr_err("failed to %s isense false negative ignore rc=%d\n",
+				ignore ? "enable" : "disable", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
 #define EXPONENT_MASK		0xF800
 #define MANTISSA_MASK		0x3FF
 #define SIGN			BIT(10)
@@ -1953,8 +2506,7 @@
 		return rc;
 	}
 
-	if (fg_debug_mask & FG_IRQS)
-		pr_info("fg batt sts 0x%x\n", fg_batt_sts);
+	pr_debug("fg batt sts 0x%x\n", fg_batt_sts);
 
 	return (fg_batt_sts & BATT_IDED) ? 1 : 0;
 }
@@ -1984,7 +2536,7 @@
 #define DECIKELVIN	2730
 #define SRAM_PERIOD_NO_ID_UPDATE_MS	100
 #define FULL_PERCENT_28BIT		0xFFFFFFF
-static void update_sram_data(struct fg_chip *chip, int *resched_ms)
+static int update_sram_data(struct fg_chip *chip, int *resched_ms)
 {
 	int i, j, rc = 0;
 	u8 reg[4];
@@ -2060,6 +2612,31 @@
 	}
 	fg_mem_release(chip);
 
+	/* Backup the registers whenever no error happens during update */
+	if (fg_reset_on_lockup && !chip->ima_error_handling) {
+		if (!rc) {
+			if (fg_debug_mask & FG_STATUS)
+				pr_info("backing up SRAM registers\n");
+			rc = fg_backup_sram_registers(chip, true);
+			if (rc) {
+				pr_err("Couldn't save sram registers\n");
+				goto out;
+			}
+			if (!chip->use_last_soc) {
+				chip->last_soc = get_monotonic_soc_raw(chip);
+				chip->last_cc_soc = div64_s64(
+					(int64_t)chip->last_soc *
+					FULL_PERCENT_28BIT, FULL_SOC_RAW);
+			}
+			if (fg_debug_mask & FG_STATUS)
+				pr_info("last_soc: %d last_cc_soc: %lld\n",
+					chip->last_soc, chip->last_cc_soc);
+		} else {
+			pr_err("update_sram failed\n");
+			goto out;
+		}
+	}
+
 	if (!rc)
 		get_current_time(&chip->last_sram_update_time);
 
@@ -2070,7 +2647,55 @@
 	} else {
 		*resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
 	}
+out:
 	fg_relax(&chip->update_sram_wakeup_source);
+	return rc;
+}
+
+#define SANITY_CHECK_PERIOD_MS	5000
+static void check_sanity_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+				struct fg_chip,
+				check_sanity_work.work);
+	int rc = 0;
+	u8 beat_count;
+	bool tried_once = false;
+
+	fg_stay_awake(&chip->sanity_wakeup_source);
+
+try_again:
+	rc = fg_read(chip, &beat_count,
+			chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+	if (rc) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		goto resched;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("current: %d, prev: %d\n", beat_count,
+			chip->last_beat_count);
+
+	if (chip->last_beat_count == beat_count) {
+		if (!tried_once) {
+			/* Wait for 1 FG cycle and read it once again */
+			msleep(1500);
+			tried_once = true;
+			goto try_again;
+		} else {
+			pr_err("Beat count not updating\n");
+			fg_check_ima_error_handling(chip);
+			goto out;
+		}
+	} else {
+		chip->last_beat_count = beat_count;
+	}
+resched:
+	schedule_delayed_work(
+		&chip->check_sanity_work,
+		msecs_to_jiffies(SANITY_CHECK_PERIOD_MS));
+out:
+	fg_relax(&chip->sanity_wakeup_source);
 }
 
 #define SRAM_TIMEOUT_MS			3000
@@ -2079,8 +2704,9 @@
 	struct fg_chip *chip = container_of(work,
 				struct fg_chip,
 				update_sram_data.work);
-	int resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS, ret;
+	int resched_ms, ret;
 	bool tried_again = false;
+	int rc = 0;
 
 wait:
 	/* Wait for MEMIF access revoked */
@@ -2094,14 +2720,19 @@
 		goto wait;
 	} else if (ret <= 0) {
 		pr_err("transaction timed out ret=%d\n", ret);
+		if (fg_is_batt_id_valid(chip))
+			resched_ms = fg_sram_update_period_ms;
+		else
+			resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
 		goto out;
 	}
-	update_sram_data(chip, &resched_ms);
+	rc = update_sram_data(chip, &resched_ms);
 
 out:
-	schedule_delayed_work(
-		&chip->update_sram_data,
-		msecs_to_jiffies(resched_ms));
+	if (!rc)
+		schedule_delayed_work(
+			&chip->update_sram_data,
+			msecs_to_jiffies(resched_ms));
 }
 
 #define BATT_TEMP_OFFSET	3
@@ -2115,6 +2746,8 @@
 				TEMP_SENSE_CHARGE_BIT)
 #define TEMP_PERIOD_UPDATE_MS		10000
 #define TEMP_PERIOD_TIMEOUT_MS		3000
+#define BATT_TEMP_LOW_LIMIT		-600
+#define BATT_TEMP_HIGH_LIMIT		1500
 static void update_temp_data(struct work_struct *work)
 {
 	s16 temp;
@@ -2166,14 +2799,44 @@
 	}
 
 	temp = reg[0] | (reg[1] << 8);
-	fg_data[0].value = (temp * TEMP_LSB_16B / 1000)
-		- DECIKELVIN;
+	temp = (temp * TEMP_LSB_16B / 1000) - DECIKELVIN;
+
+	/*
+	 * If temperature is within the specified range (e.g. -60C and 150C),
+	 * update it to the userspace. Otherwise, use the last read good
+	 * temperature.
+	 */
+	if (temp > chip->batt_temp_low_limit &&
+			temp < chip->batt_temp_high_limit) {
+		chip->last_good_temp = temp;
+		fg_data[0].value = temp;
+	} else {
+		fg_data[0].value = chip->last_good_temp;
+
+		/*
+		 * If the temperature is read before and seems to be in valid
+		 * range, then a bad temperature reading could be because of
+		 * FG lockup. Trigger the FG reset sequence in such cases.
+		 */
+		if (chip->last_temp_update_time && fg_reset_on_lockup &&
+			(chip->last_good_temp > chip->batt_temp_low_limit &&
+			chip->last_good_temp < chip->batt_temp_high_limit)) {
+			pr_err("Batt_temp is %d !, triggering FG reset\n",
+				temp);
+			fg_check_ima_error_handling(chip);
+		}
+	}
 
 	if (fg_debug_mask & FG_MEM_DEBUG_READS)
 		pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value);
 
 	get_current_time(&chip->last_temp_update_time);
 
+	if (chip->soc_slope_limiter_en) {
+		fg_stay_awake(&chip->slope_limit_wakeup_source);
+		schedule_work(&chip->slope_limiter_work);
+	}
+
 out:
 	if (chip->sw_rbias_ctrl) {
 		rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
@@ -2226,18 +2889,6 @@
 	return rc;
 }
 
-#define VBATT_LOW_STS_BIT BIT(2)
-static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
-{
-	int rc = 0;
-	u8 fg_batt_sts;
-
-	rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
-	if (!rc)
-		*vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
-	return rc;
-}
-
 #define BATT_CYCLE_NUMBER_REG		0x5E8
 #define BATT_CYCLE_OFFSET		0
 static void restore_cycle_counter(struct fg_chip *chip)
@@ -2301,6 +2952,9 @@
 			bucket, rc);
 	else
 		chip->cyc_ctr.count[bucket] = cyc_count;
+
+	if (fg_debug_mask & FG_POWER_SUPPLY)
+		pr_info("Stored bucket %d cyc_count: %d\n", bucket, cyc_count);
 	return rc;
 }
 
@@ -2416,6 +3070,62 @@
 	return ((int)val) * 1000;
 }
 
+#define SLOPE_LIMITER_COEFF_REG		0x430
+#define SLOPE_LIMITER_COEFF_OFFSET	3
+#define SLOPE_LIMIT_TEMP_THRESHOLD	100
+#define SLOPE_LIMIT_LOW_TEMP_CHG	45
+#define SLOPE_LIMIT_HIGH_TEMP_CHG	2
+#define SLOPE_LIMIT_LOW_TEMP_DISCHG	45
+#define SLOPE_LIMIT_HIGH_TEMP_DISCHG	2
+static void slope_limiter_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+				slope_limiter_work);
+	enum slope_limit_status status;
+	int batt_temp, rc;
+	u8 buf[2];
+	int64_t val;
+
+	batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+
+	if (chip->status == POWER_SUPPLY_STATUS_CHARGING ||
+			chip->status == POWER_SUPPLY_STATUS_FULL) {
+		if (batt_temp < chip->slope_limit_temp)
+			status = LOW_TEMP_CHARGE;
+		else
+			status = HIGH_TEMP_CHARGE;
+	} else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+		if (batt_temp < chip->slope_limit_temp)
+			status = LOW_TEMP_DISCHARGE;
+		else
+			status = HIGH_TEMP_DISCHARGE;
+	} else {
+		goto out;
+	}
+
+	if (status == chip->slope_limit_sts)
+		goto out;
+
+	val = chip->slope_limit_coeffs[status];
+	val *= MICRO_UNIT;
+	half_float_to_buffer(val, buf);
+	rc = fg_mem_write(chip, buf,
+			SLOPE_LIMITER_COEFF_REG, 2,
+			SLOPE_LIMITER_COEFF_OFFSET, 0);
+	if (rc) {
+		pr_err("Couldn't write to slope_limiter_coeff_reg, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	chip->slope_limit_sts = status;
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Slope limit sts: %d val: %lld buf[%x %x] written\n",
+			status, val, buf[0], buf[1]);
+out:
+	fg_relax(&chip->slope_limit_wakeup_source);
+}
+
 static int lookup_ocv_for_soc(struct fg_chip *chip, int soc)
 {
 	int64_t *coeffs;
@@ -2481,6 +3191,7 @@
 #define ESR_ACTUAL_REG		0x554
 #define BATTERY_ESR_REG		0x4F4
 #define TEMP_RS_TO_RSLOW_REG	0x514
+#define ESR_OFFSET		2
 static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity)
 {
 	int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow;
@@ -2519,7 +3230,7 @@
 
 	rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0);
 	esr_actual = half_float(buffer);
-	rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, 2, 0);
+	rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0);
 	battery_esr = half_float(buffer);
 
 	if (rc) {
@@ -2594,124 +3305,6 @@
 	estimate_battery_age(chip, &chip->actual_cap_uah);
 }
 
-static enum power_supply_property fg_power_props[] = {
-	POWER_SUPPLY_PROP_CAPACITY,
-	POWER_SUPPLY_PROP_CAPACITY_RAW,
-	POWER_SUPPLY_PROP_CURRENT_NOW,
-	POWER_SUPPLY_PROP_VOLTAGE_NOW,
-	POWER_SUPPLY_PROP_VOLTAGE_OCV,
-	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
-	POWER_SUPPLY_PROP_CHARGE_NOW,
-	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
-	POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
-	POWER_SUPPLY_PROP_CHARGE_FULL,
-	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
-	POWER_SUPPLY_PROP_TEMP,
-	POWER_SUPPLY_PROP_COOL_TEMP,
-	POWER_SUPPLY_PROP_WARM_TEMP,
-	POWER_SUPPLY_PROP_RESISTANCE,
-	POWER_SUPPLY_PROP_RESISTANCE_ID,
-	POWER_SUPPLY_PROP_BATTERY_TYPE,
-	POWER_SUPPLY_PROP_UPDATE_NOW,
-	POWER_SUPPLY_PROP_ESR_COUNT,
-	POWER_SUPPLY_PROP_VOLTAGE_MIN,
-	POWER_SUPPLY_PROP_CYCLE_COUNT,
-	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
-	POWER_SUPPLY_PROP_HI_POWER,
-};
-
-static int fg_power_get_property(struct power_supply *psy,
-				       enum power_supply_property psp,
-				       union power_supply_propval *val)
-{
-	struct fg_chip *chip = power_supply_get_drvdata(psy);
-	bool vbatt_low_sts;
-
-	switch (psp) {
-	case POWER_SUPPLY_PROP_BATTERY_TYPE:
-		if (chip->battery_missing)
-			val->strval = missing_batt_type;
-		else if (chip->fg_restarting)
-			val->strval = loading_batt_type;
-		else
-			val->strval = chip->batt_type;
-		break;
-	case POWER_SUPPLY_PROP_CAPACITY:
-		val->intval = get_prop_capacity(chip);
-		break;
-	case POWER_SUPPLY_PROP_CAPACITY_RAW:
-		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
-		val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
-		break;
-	case POWER_SUPPLY_PROP_CURRENT_NOW:
-		val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
-		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-		val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
-		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
-		val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
-		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
-		val->intval = chip->batt_max_voltage_uv;
-		break;
-	case POWER_SUPPLY_PROP_TEMP:
-		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
-		break;
-	case POWER_SUPPLY_PROP_COOL_TEMP:
-		val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
-		break;
-	case POWER_SUPPLY_PROP_WARM_TEMP:
-		val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
-		break;
-	case POWER_SUPPLY_PROP_RESISTANCE:
-		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
-		break;
-	case POWER_SUPPLY_PROP_ESR_COUNT:
-		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
-		break;
-	case POWER_SUPPLY_PROP_CYCLE_COUNT:
-		val->intval = fg_get_cycle_count(chip);
-		break;
-	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
-		val->intval = chip->cyc_ctr.id;
-		break;
-	case POWER_SUPPLY_PROP_RESISTANCE_ID:
-		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
-		break;
-	case POWER_SUPPLY_PROP_UPDATE_NOW:
-		val->intval = 0;
-		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
-		if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
-			val->intval = (int)vbatt_low_sts;
-		else
-			val->intval = 1;
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
-		val->intval = chip->nom_cap_uah;
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_FULL:
-		val->intval = chip->learning_data.learned_cc_uah;
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_NOW:
-		val->intval = chip->learning_data.cc_uah;
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
-		val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
-		break;
-	case POWER_SUPPLY_PROP_HI_POWER:
-		val->intval = !!chip->bcl_lpm_disabled;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static int correction_times[] = {
 	1470,
 	2940,
@@ -2853,11 +3446,8 @@
 		goto fail;
 	}
 
-	if (chip->wa_flag & USE_CC_SOC_REG) {
-		mutex_unlock(&chip->learning_data.learning_lock);
-		fg_relax(&chip->capacity_learning_wakeup_source);
-		return;
-	}
+	if (chip->wa_flag & USE_CC_SOC_REG)
+		goto fail;
 
 	fg_mem_lock(chip);
 
@@ -2888,6 +3478,8 @@
 		pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah);
 
 fail:
+	if (chip->wa_flag & USE_CC_SOC_REG)
+		fg_relax(&chip->capacity_learning_wakeup_source);
 	mutex_unlock(&chip->learning_data.learning_lock);
 	return;
 
@@ -2901,7 +3493,7 @@
 {
 	int rc;
 	u8 reg[4];
-	unsigned int temp, magnitude;
+	int temp;
 
 	rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
 	if (rc) {
@@ -2910,20 +3502,61 @@
 	}
 
 	temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0];
-	magnitude = temp & CC_SOC_MAGNITUDE_MASK;
-	if (temp & CC_SOC_NEGATIVE_BIT)
-		*cc_soc = -1 * (~magnitude + 1);
-	else
-		*cc_soc = magnitude;
-
+	*cc_soc = sign_extend32(temp, 29);
 	return 0;
 }
 
+static int fg_get_current_cc(struct fg_chip *chip)
+{
+	int cc_soc, rc;
+	int64_t current_capacity;
+
+	if (!(chip->wa_flag & USE_CC_SOC_REG))
+		return chip->learning_data.cc_uah;
+
+	if (!chip->learning_data.learned_cc_uah)
+		return -EINVAL;
+
+	rc = fg_get_cc_soc(chip, &cc_soc);
+	if (rc < 0) {
+		pr_err("Failed to get cc_soc, rc=%d\n", rc);
+		return rc;
+	}
+
+	current_capacity = cc_soc * chip->learning_data.learned_cc_uah;
+	current_capacity = div64_u64(current_capacity, FULL_PERCENT_28BIT);
+	return current_capacity;
+}
+
+#define BATT_MISSING_STS BIT(6)
+static bool is_battery_missing(struct fg_chip *chip)
+{
+	int rc;
+	u8 fg_batt_sts;
+
+	rc = fg_read(chip, &fg_batt_sts,
+				 INT_RT_STS(chip->batt_base), 1);
+	if (rc) {
+		pr_err("spmi read failed: addr=%03X, rc=%d\n",
+				INT_RT_STS(chip->batt_base), rc);
+		return false;
+	}
+
+	return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+}
+
 static int fg_cap_learning_process_full_data(struct fg_chip *chip)
 {
 	int cc_pc_val, rc = -EINVAL;
 	unsigned int cc_soc_delta_pc;
 	int64_t delta_cc_uah;
+	uint64_t temp;
+	bool batt_missing = is_battery_missing(chip);
+
+	if (batt_missing) {
+		pr_err("Battery is missing!\n");
+		goto fail;
+	}
 
 	if (!chip->learning_data.active)
 		goto fail;
@@ -2940,9 +3573,8 @@
 		goto fail;
 	}
 
-	cc_soc_delta_pc = DIV_ROUND_CLOSEST(
-			abs(cc_pc_val - chip->learning_data.init_cc_pc_val)
-			* 100, FULL_PERCENT_28BIT);
+	temp = abs(cc_pc_val - chip->learning_data.init_cc_pc_val);
+	cc_soc_delta_pc = DIV_ROUND_CLOSEST_ULL(temp * 100, FULL_PERCENT_28BIT);
 
 	delta_cc_uah = div64_s64(
 			chip->learning_data.learned_cc_uah * cc_soc_delta_pc,
@@ -2950,8 +3582,11 @@
 	chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah;
 
 	if (fg_debug_mask & FG_AGING)
-		pr_info("current cc_soc=%d cc_soc_pc=%d total_cc_uah = %lld\n",
+		pr_info("current cc_soc=%d cc_soc_pc=%d init_cc_pc_val=%d delta_cc_uah=%lld learned_cc_uah=%lld total_cc_uah = %lld\n",
 				cc_pc_val, cc_soc_delta_pc,
+				chip->learning_data.init_cc_pc_val,
+				delta_cc_uah,
+				chip->learning_data.learned_cc_uah,
 				chip->learning_data.cc_uah);
 
 	return 0;
@@ -3044,6 +3679,12 @@
 {
 	int16_t cc_mah;
 	int rc;
+	bool batt_missing = is_battery_missing(chip);
+
+	if (batt_missing) {
+		pr_err("Battery is missing!\n");
+		return;
+	}
 
 	cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
 
@@ -3065,6 +3706,12 @@
 static void fg_cap_learning_post_process(struct fg_chip *chip)
 {
 	int64_t max_inc_val, min_dec_val, old_cap;
+	bool batt_missing = is_battery_missing(chip);
+
+	if (batt_missing) {
+		pr_err("Battery is missing!\n");
+		return;
+	}
 
 	max_inc_val = chip->learning_data.learned_cc_uah
 			* (1000 + chip->learning_data.max_increment);
@@ -3083,6 +3730,32 @@
 		chip->learning_data.learned_cc_uah =
 			chip->learning_data.cc_uah;
 
+	if (chip->learning_data.max_cap_limit) {
+		max_inc_val = (int64_t)chip->nom_cap_uah * (1000 +
+				chip->learning_data.max_cap_limit);
+		max_inc_val = div64_u64(max_inc_val, 1000);
+		if (chip->learning_data.cc_uah > max_inc_val) {
+			if (fg_debug_mask & FG_AGING)
+				pr_info("learning capacity %lld goes above max limit %lld\n",
+					chip->learning_data.cc_uah,
+					max_inc_val);
+			chip->learning_data.learned_cc_uah = max_inc_val;
+		}
+	}
+
+	if (chip->learning_data.min_cap_limit) {
+		min_dec_val = (int64_t)chip->nom_cap_uah * (1000 -
+				chip->learning_data.min_cap_limit);
+		min_dec_val = div64_u64(min_dec_val, 1000);
+		if (chip->learning_data.cc_uah < min_dec_val) {
+			if (fg_debug_mask & FG_AGING)
+				pr_info("learning capacity %lld goes below min limit %lld\n",
+					chip->learning_data.cc_uah,
+					min_dec_val);
+			chip->learning_data.learned_cc_uah = min_dec_val;
+		}
+	}
+
 	fg_cap_learning_save_data(chip);
 	if (fg_debug_mask & FG_AGING)
 		pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
@@ -3142,7 +3815,7 @@
 		if (battery_soc * 100 / FULL_PERCENT_3B
 				> chip->learning_data.max_start_soc) {
 			if (fg_debug_mask & FG_AGING)
-				pr_info("battery soc too low (%d < %d), aborting\n",
+				pr_info("battery soc too high (%d > %d), aborting\n",
 					battery_soc * 100 / FULL_PERCENT_3B,
 					chip->learning_data.max_start_soc);
 			fg_mem_release(chip);
@@ -3226,6 +3899,17 @@
 		}
 
 		fg_cap_learning_stop(chip);
+	} else if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+		if (chip->wa_flag & USE_CC_SOC_REG) {
+			/* reset SW_CC_SOC register to 100% upon charge_full */
+			rc = fg_mem_write(chip, (u8 *)&cc_pc_100,
+				CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+			if (rc)
+				pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+								rc);
+			else if (fg_debug_mask & FG_STATUS)
+				pr_info("Reset SW_CC_SOC to full value\n");
+		}
 	}
 
 fail:
@@ -3323,7 +4007,14 @@
 				struct fg_chip,
 				status_change_work);
 	unsigned long current_time = 0;
-	int cc_soc, rc, capacity = get_prop_capacity(chip);
+	int cc_soc, batt_soc, rc, capacity = get_prop_capacity(chip);
+	bool batt_missing = is_battery_missing(chip);
+
+	if (batt_missing) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("Battery is missing\n");
+		return;
+	}
 
 	if (chip->esr_pulse_tune_en) {
 		fg_stay_awake(&chip->esr_extract_wakeup_source);
@@ -3343,19 +4034,34 @@
 	}
 	if (chip->status == POWER_SUPPLY_STATUS_FULL ||
 			chip->status == POWER_SUPPLY_STATUS_CHARGING) {
-		if (!chip->vbat_low_irq_enabled) {
+		if (!chip->vbat_low_irq_enabled &&
+				!chip->use_vbat_low_empty_soc) {
 			enable_irq(chip->batt_irq[VBATT_LOW].irq);
 			enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
 			chip->vbat_low_irq_enabled = true;
 		}
+
+		if (!chip->full_soc_irq_enabled) {
+			enable_irq(chip->soc_irq[FULL_SOC].irq);
+			enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+			chip->full_soc_irq_enabled = true;
+		}
+
 		if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100)
 			fg_configure_soc(chip);
 	} else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
-		if (chip->vbat_low_irq_enabled) {
+		if (chip->vbat_low_irq_enabled &&
+				!chip->use_vbat_low_empty_soc) {
 			disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
 			disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
 			chip->vbat_low_irq_enabled = false;
 		}
+
+		if (chip->full_soc_irq_enabled) {
+			disable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+			disable_irq_nosync(chip->soc_irq[FULL_SOC].irq);
+			chip->full_soc_irq_enabled = false;
+		}
 	}
 	fg_cap_learning_check(chip);
 	schedule_work(&chip->update_esr_work);
@@ -3368,6 +4074,42 @@
 	}
 
 	if (chip->prev_status != chip->status && chip->last_sram_update_time) {
+		/*
+		 * Reset SW_CC_SOC to a value based off battery SOC when
+		 * the device is discharging.
+		 */
+		if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+			batt_soc = get_battery_soc_raw(chip);
+			if (!batt_soc)
+				return;
+
+			batt_soc = div64_s64((int64_t)batt_soc *
+					FULL_PERCENT_28BIT, FULL_PERCENT_3B);
+			rc = fg_mem_write(chip, (u8 *)&batt_soc,
+				CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+			if (rc)
+				pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+									rc);
+			else if (fg_debug_mask & FG_STATUS)
+				pr_info("Reset SW_CC_SOC to %x\n", batt_soc);
+		}
+
+		/*
+		 * Schedule the update_temp_work whenever there is a status
+		 * change. This is essential for applying the slope limiter
+		 * coefficients when that feature is enabled.
+		 */
+		if (chip->last_temp_update_time && chip->soc_slope_limiter_en) {
+			cancel_delayed_work_sync(&chip->update_temp_work);
+			schedule_delayed_work(&chip->update_temp_work,
+				msecs_to_jiffies(0));
+		}
+
+		if (chip->dischg_gain.enable) {
+			fg_stay_awake(&chip->dischg_gain_wakeup_source);
+			schedule_work(&chip->dischg_gain_work);
+		}
+
 		get_current_time(&current_time);
 		/*
 		 * When charging status changes, update SRAM parameters if it
@@ -3393,10 +4135,10 @@
 	}
 	if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en
 			&& chip->safety_timer_expired) {
-		chip->sw_cc_soc_data.delta_soc =
-			DIV_ROUND_CLOSEST(abs(cc_soc -
-					chip->sw_cc_soc_data.init_cc_soc)
-					* 100, FULL_PERCENT_28BIT);
+		uint64_t delta_cc_soc = abs(cc_soc -
+					chip->sw_cc_soc_data.init_cc_soc);
+		chip->sw_cc_soc_data.delta_soc = DIV_ROUND_CLOSEST_ULL(
+				delta_cc_soc * 100, FULL_PERCENT_28BIT);
 		chip->sw_cc_soc_data.full_capacity =
 			chip->sw_cc_soc_data.delta_soc +
 			chip->sw_cc_soc_data.init_sys_soc;
@@ -3539,6 +4281,395 @@
 	return rc;
 }
 
+static int fg_restore_cc_soc(struct fg_chip *chip)
+{
+	int rc;
+
+	if (!chip->use_last_cc_soc || !chip->last_cc_soc)
+		return 0;
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Restoring cc_soc: %lld\n", chip->last_cc_soc);
+
+	rc = fg_mem_write(chip, (u8 *)&chip->last_cc_soc,
+			fg_data[FG_DATA_CC_CHARGE].address, 4,
+			fg_data[FG_DATA_CC_CHARGE].offset, 0);
+	if (rc)
+		pr_err("failed to update CC_SOC rc=%d\n", rc);
+	else
+		chip->use_last_cc_soc = false;
+
+	return rc;
+}
+
+#define SRAM_MONOTONIC_SOC_REG		0x574
+#define SRAM_MONOTONIC_SOC_OFFSET	2
+static int fg_restore_soc(struct fg_chip *chip)
+{
+	int rc;
+	u16 msoc;
+
+	if (chip->use_last_soc && chip->last_soc)
+		msoc = DIV_ROUND_CLOSEST(chip->last_soc * 0xFFFF,
+				FULL_SOC_RAW);
+	else
+		return 0;
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Restored soc: %d\n", msoc);
+
+	rc = fg_mem_write(chip, (u8 *)&msoc, SRAM_MONOTONIC_SOC_REG, 2,
+			SRAM_MONOTONIC_SOC_OFFSET, 0);
+	if (rc)
+		pr_err("failed to write M_SOC_REG rc=%d\n", rc);
+
+	return rc;
+}
+
+#define NOM_CAP_REG			0x4F4
+#define CAPACITY_DELTA_DECIPCT		500
+static int load_battery_aging_data(struct fg_chip *chip)
+{
+	int rc = 0;
+	u8 buffer[2];
+	int16_t cc_mah;
+	int64_t delta_cc_uah, pct_nom_cap_uah;
+
+	rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+	if (rc) {
+		pr_err("Failed to read nominal capacitance: %d\n", rc);
+		goto out;
+	}
+
+	chip->nom_cap_uah = bcap_uah_2b(buffer);
+	chip->actual_cap_uah = chip->nom_cap_uah;
+
+	if (chip->learning_data.learned_cc_uah == 0) {
+		chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+		fg_cap_learning_save_data(chip);
+	} else if (chip->learning_data.feedback_on) {
+		delta_cc_uah = abs(chip->learning_data.learned_cc_uah -
+					chip->nom_cap_uah);
+		pct_nom_cap_uah = div64_s64((int64_t)chip->nom_cap_uah *
+				CAPACITY_DELTA_DECIPCT, 1000);
+		/*
+		 * If the learned capacity is out of range, say by 50%
+		 * from the nominal capacity, then overwrite the learned
+		 * capacity with the nominal capacity.
+		 */
+		if (chip->nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+			if (fg_debug_mask & FG_AGING) {
+				pr_info("learned_cc_uah: %lld is higher than expected\n",
+					chip->learning_data.learned_cc_uah);
+				pr_info("Capping it to nominal:%d\n",
+					chip->nom_cap_uah);
+			}
+			chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+			fg_cap_learning_save_data(chip);
+		} else {
+			cc_mah = div64_s64(chip->learning_data.learned_cc_uah,
+					1000);
+			rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+			if (rc)
+				pr_err("Error in restoring cc_soc_coeff, rc:%d\n",
+					rc);
+		}
+	}
+out:
+	return rc;
+}
+
+static void fg_restore_battery_info(struct fg_chip *chip)
+{
+	int rc;
+	char buf[4] = {0, 0, 0, 0};
+
+	chip->last_soc = DIV_ROUND_CLOSEST(chip->batt_info[BATT_INFO_SOC] *
+				FULL_SOC_RAW, FULL_CAPACITY);
+	chip->last_cc_soc = div64_s64((int64_t)chip->last_soc *
+				FULL_PERCENT_28BIT, FULL_SOC_RAW);
+	chip->use_last_soc = true;
+	chip->use_last_cc_soc = true;
+	rc = fg_restore_soc(chip);
+	if (rc) {
+		pr_err("Error in restoring soc, rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_restore_cc_soc(chip);
+	if (rc) {
+		pr_err("Error in restoring cc_soc, rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_mem_write(chip, buf,
+			fg_data[FG_DATA_VINT_ERR].address,
+			fg_data[FG_DATA_VINT_ERR].len,
+			fg_data[FG_DATA_VINT_ERR].offset, 0);
+	if (rc) {
+		pr_err("Failed to write to VINT_ERR, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->learning_data.learned_cc_uah = chip->batt_info[BATT_INFO_FCC];
+	rc = load_battery_aging_data(chip);
+	if (rc) {
+		pr_err("Failed to load battery aging data, rc:%d\n", rc);
+		goto out;
+	}
+
+	if (chip->power_supply_registered)
+		power_supply_changed(chip->bms_psy);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Restored battery info!\n");
+
+out:
+	return;
+}
+
+#define DELTA_BATT_TEMP		30
+static bool fg_validate_battery_info(struct fg_chip *chip)
+{
+	int i, delta_pct, batt_id_kohm, batt_temp, batt_volt_mv, batt_soc;
+
+	for (i = 1; i < BATT_INFO_MAX; i++) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("batt_info[%d]: %d\n", i, chip->batt_info[i]);
+
+		if ((chip->batt_info[i] == 0 && i != BATT_INFO_TEMP) ||
+			chip->batt_info[i] == INT_MAX) {
+			if (fg_debug_mask & FG_STATUS)
+				pr_info("batt_info[%d]:%d is invalid\n", i,
+					chip->batt_info[i]);
+			return false;
+		}
+	}
+
+	batt_id_kohm = get_sram_prop_now(chip, FG_DATA_BATT_ID) / 1000;
+	if (batt_id_kohm != chip->batt_info[BATT_INFO_RES_ID]) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("batt_id(%dK) does not match the stored batt_id(%dK)\n",
+				batt_id_kohm,
+				chip->batt_info[BATT_INFO_RES_ID]);
+		return false;
+	}
+
+	batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+	if (abs(chip->batt_info[BATT_INFO_TEMP] - batt_temp) >
+			DELTA_BATT_TEMP) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("batt_temp(%d) is higher/lower than stored batt_temp(%d)\n",
+				batt_temp, chip->batt_info[BATT_INFO_TEMP]);
+		return false;
+	}
+
+	if (chip->batt_info[BATT_INFO_FCC] < 0) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("batt_fcc cannot be %d\n",
+				chip->batt_info[BATT_INFO_FCC]);
+		return false;
+	}
+
+	batt_volt_mv = get_sram_prop_now(chip, FG_DATA_VOLTAGE) / 1000;
+	batt_soc = get_monotonic_soc_raw(chip);
+	if (batt_soc != 0 && batt_soc != FULL_SOC_RAW)
+		batt_soc = DIV_ROUND_CLOSEST((batt_soc - 1) *
+				(FULL_CAPACITY - 2), FULL_SOC_RAW - 2) + 1;
+
+	if (*chip->batt_range_ocv && chip->batt_max_voltage_uv > 1000)
+		delta_pct =  DIV_ROUND_CLOSEST(abs(batt_volt_mv -
+				chip->batt_info[BATT_INFO_VOLTAGE]) * 100,
+				chip->batt_max_voltage_uv / 1000);
+	else
+		delta_pct = abs(batt_soc - chip->batt_info[BATT_INFO_SOC]);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Validating by %s batt_voltage:%d capacity:%d delta_pct:%d\n",
+			*chip->batt_range_ocv ? "OCV" : "SOC", batt_volt_mv,
+			batt_soc, delta_pct);
+
+	if (*chip->batt_range_pct && delta_pct > *chip->batt_range_pct) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("delta_pct(%d) is higher than batt_range_pct(%d)\n",
+				delta_pct, *chip->batt_range_pct);
+		return false;
+	}
+
+	return true;
+}
+
+static int fg_set_battery_info(struct fg_chip *chip, int val)
+{
+	if (chip->batt_info_id < 0 ||
+			chip->batt_info_id >= BATT_INFO_MAX) {
+		pr_err("Invalid batt_info_id %d\n", chip->batt_info_id);
+		chip->batt_info_id = 0;
+		return -EINVAL;
+	}
+
+	if (chip->batt_info_id == BATT_INFO_NOTIFY && val == INT_MAX - 1) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("Notified from userspace\n");
+		if (chip->batt_info_restore && !chip->ima_error_handling) {
+			if (!fg_validate_battery_info(chip)) {
+				if (fg_debug_mask & FG_STATUS)
+					pr_info("Validating battery info failed\n");
+			} else {
+				fg_restore_battery_info(chip);
+			}
+		}
+	}
+
+	chip->batt_info[chip->batt_info_id] = val;
+	return 0;
+}
+
+static enum power_supply_property fg_power_props[] = {
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CAPACITY_RAW,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_OCV,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_COOL_TEMP,
+	POWER_SUPPLY_PROP_WARM_TEMP,
+	POWER_SUPPLY_PROP_RESISTANCE,
+	POWER_SUPPLY_PROP_RESISTANCE_ID,
+	POWER_SUPPLY_PROP_BATTERY_TYPE,
+	POWER_SUPPLY_PROP_UPDATE_NOW,
+	POWER_SUPPLY_PROP_ESR_COUNT,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
+	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+	POWER_SUPPLY_PROP_HI_POWER,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+	POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE,
+	POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION,
+	POWER_SUPPLY_PROP_BATTERY_INFO,
+	POWER_SUPPLY_PROP_BATTERY_INFO_ID,
+};
+
+static int fg_power_get_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       union power_supply_propval *val)
+{
+	struct fg_chip *chip =  power_supply_get_drvdata(psy);
+	bool vbatt_low_sts;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_BATTERY_TYPE:
+		if (chip->battery_missing)
+			val->strval = missing_batt_type;
+		else if (chip->fg_restarting)
+			val->strval = loading_batt_type;
+		else
+			val->strval = chip->batt_type;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = get_prop_capacity(chip);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY_RAW:
+		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
+		val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+		val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		val->intval = chip->batt_max_voltage_uv;
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+		break;
+	case POWER_SUPPLY_PROP_COOL_TEMP:
+		val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
+		break;
+	case POWER_SUPPLY_PROP_WARM_TEMP:
+		val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE:
+		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
+		break;
+	case POWER_SUPPLY_PROP_ESR_COUNT:
+		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+		val->intval = fg_get_cycle_count(chip);
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+		val->intval = chip->cyc_ctr.id;
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE_ID:
+		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+		break;
+	case POWER_SUPPLY_PROP_UPDATE_NOW:
+		val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
+			val->intval = (int)vbatt_low_sts;
+		else
+			val->intval = 1;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		val->intval = chip->nom_cap_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		val->intval = chip->learning_data.learned_cc_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		val->intval = chip->learning_data.cc_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+		val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+		val->intval = fg_get_current_cc(chip);
+		break;
+	case POWER_SUPPLY_PROP_HI_POWER:
+		val->intval = !!chip->bcl_lpm_disabled;
+		break;
+	case POWER_SUPPLY_PROP_SOC_REPORTING_READY:
+		val->intval = !!chip->soc_reporting_ready;
+		break;
+	case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE:
+		val->intval = !chip->allow_false_negative_isense;
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION:
+		val->intval = chip->use_soft_jeita_irq;
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_INFO:
+		if (chip->batt_info_id < 0 ||
+				chip->batt_info_id >= BATT_INFO_MAX)
+			return -EINVAL;
+		val->intval = chip->batt_info[chip->batt_info_id];
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
+		val->intval = chip->batt_info_id;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int fg_power_set_property(struct power_supply *psy,
 				  enum power_supply_property psp,
 				  const union power_supply_propval *val)
@@ -3557,6 +4688,67 @@
 		if (val->intval)
 			update_sram_data(chip, &unused);
 		break;
+	case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE:
+		rc = set_prop_ignore_false_negative_isense(chip, !!val->intval);
+		if (rc)
+			pr_err("set_prop_ignore_false_negative_isense failed, rc=%d\n",
+							rc);
+		else
+			chip->allow_false_negative_isense = !val->intval;
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION:
+		if (chip->use_soft_jeita_irq == !!val->intval) {
+			pr_debug("JEITA irq %s, ignore!\n",
+				chip->use_soft_jeita_irq ?
+				"enabled" : "disabled");
+			break;
+		}
+		chip->use_soft_jeita_irq = !!val->intval;
+		if (chip->use_soft_jeita_irq) {
+			if (chip->batt_irq[JEITA_SOFT_COLD].disabled) {
+				enable_irq(
+					chip->batt_irq[JEITA_SOFT_COLD].irq);
+				chip->batt_irq[JEITA_SOFT_COLD].disabled =
+								false;
+			}
+			if (!chip->batt_irq[JEITA_SOFT_COLD].wakeup) {
+				enable_irq_wake(
+					chip->batt_irq[JEITA_SOFT_COLD].irq);
+				chip->batt_irq[JEITA_SOFT_COLD].wakeup = true;
+			}
+			if (chip->batt_irq[JEITA_SOFT_HOT].disabled) {
+				enable_irq(
+					chip->batt_irq[JEITA_SOFT_HOT].irq);
+				chip->batt_irq[JEITA_SOFT_HOT].disabled = false;
+			}
+			if (!chip->batt_irq[JEITA_SOFT_HOT].wakeup) {
+				enable_irq_wake(
+					chip->batt_irq[JEITA_SOFT_HOT].irq);
+				chip->batt_irq[JEITA_SOFT_HOT].wakeup = true;
+			}
+		} else {
+			if (chip->batt_irq[JEITA_SOFT_COLD].wakeup) {
+				disable_irq_wake(
+					chip->batt_irq[JEITA_SOFT_COLD].irq);
+				chip->batt_irq[JEITA_SOFT_COLD].wakeup = false;
+			}
+			if (!chip->batt_irq[JEITA_SOFT_COLD].disabled) {
+				disable_irq_nosync(
+					chip->batt_irq[JEITA_SOFT_COLD].irq);
+				chip->batt_irq[JEITA_SOFT_COLD].disabled = true;
+			}
+			if (chip->batt_irq[JEITA_SOFT_HOT].wakeup) {
+				disable_irq_wake(
+					chip->batt_irq[JEITA_SOFT_HOT].irq);
+				chip->batt_irq[JEITA_SOFT_HOT].wakeup = false;
+			}
+			if (!chip->batt_irq[JEITA_SOFT_HOT].disabled) {
+				disable_irq_nosync(
+					chip->batt_irq[JEITA_SOFT_HOT].irq);
+				chip->batt_irq[JEITA_SOFT_HOT].disabled = true;
+			}
+		}
+		break;
 	case POWER_SUPPLY_PROP_STATUS:
 		chip->prev_status = chip->status;
 		chip->status = val->intval;
@@ -3599,6 +4791,12 @@
 			schedule_work(&chip->bcl_hi_power_work);
 		}
 		break;
+	case POWER_SUPPLY_PROP_BATTERY_INFO:
+		rc = fg_set_battery_info(chip, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
+		chip->batt_info_id = val->intval;
+		break;
 	default:
 		return -EINVAL;
 	};
@@ -3613,6 +4811,8 @@
 	case POWER_SUPPLY_PROP_COOL_TEMP:
 	case POWER_SUPPLY_PROP_WARM_TEMP:
 	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+	case POWER_SUPPLY_PROP_BATTERY_INFO:
+	case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
 		return 1;
 	default:
 		break;
@@ -3807,21 +5007,197 @@
 	fg_relax(&chip->gain_comp_wakeup_source);
 }
 
-#define BATT_MISSING_STS BIT(6)
-static bool is_battery_missing(struct fg_chip *chip)
+static void cc_soc_store_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+					cc_soc_store_work);
+	int cc_soc_pct;
+
+	if (!chip->nom_cap_uah) {
+		pr_err("nom_cap_uah zero!\n");
+		fg_relax(&chip->cc_soc_wakeup_source);
+		return;
+	}
+
+	cc_soc_pct = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+	cc_soc_pct = div64_s64(cc_soc_pct * 100,
+				chip->nom_cap_uah);
+	chip->last_cc_soc = div64_s64((int64_t)chip->last_soc *
+				FULL_PERCENT_28BIT, FULL_SOC_RAW);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("cc_soc_pct: %d last_cc_soc: %lld\n", cc_soc_pct,
+			chip->last_cc_soc);
+
+	if (fg_reset_on_lockup && (chip->cc_soc_limit_pct > 0 &&
+			cc_soc_pct >= chip->cc_soc_limit_pct)) {
+		pr_err("CC_SOC out of range\n");
+		fg_check_ima_error_handling(chip);
+	}
+
+	fg_relax(&chip->cc_soc_wakeup_source);
+}
+
+#define HARD_JEITA_ALARM_CHECK_NS	10000000000
+static enum alarmtimer_restart fg_hard_jeita_alarm_cb(struct alarm *alarm,
+						ktime_t now)
+{
+	struct fg_chip *chip = container_of(alarm,
+			struct fg_chip, hard_jeita_alarm);
+	int rc, health = POWER_SUPPLY_HEALTH_UNKNOWN;
+	u8 regval;
+	bool batt_hot, batt_cold;
+	union power_supply_propval val = {0, };
+
+	if (!is_usb_present(chip)) {
+		pr_debug("USB plugged out, stop the timer!\n");
+		return ALARMTIMER_NORESTART;
+	}
+
+	rc = fg_read(chip, &regval, BATT_INFO_STS(chip->batt_base), 1);
+	if (rc) {
+		pr_err("read batt_sts failed, rc=%d\n", rc);
+		goto recheck;
+	}
+
+	batt_hot = !!(regval & JEITA_HARD_HOT_RT_STS);
+	batt_cold = !!(regval & JEITA_HARD_COLD_RT_STS);
+	if (batt_hot && batt_cold) {
+		pr_debug("Hot && cold can't co-exist\n");
+		goto recheck;
+	}
+
+	if ((batt_hot == chip->batt_hot) && (batt_cold == chip->batt_cold)) {
+		pr_debug("battery JEITA state not changed, ignore\n");
+		goto recheck;
+	}
+
+	if (batt_cold != chip->batt_cold) {
+		/* cool --> cold */
+		if (chip->batt_cool) {
+			chip->batt_cool = false;
+			chip->batt_cold = true;
+			health = POWER_SUPPLY_HEALTH_COLD;
+		} else if (chip->batt_cold) { /* cold --> cool */
+			chip->batt_cool = true;
+			chip->batt_cold = false;
+			health = POWER_SUPPLY_HEALTH_COOL;
+		}
+	}
+
+	if (batt_hot != chip->batt_hot) {
+		/* warm --> hot */
+		if (chip->batt_warm) {
+			chip->batt_warm = false;
+			chip->batt_hot = true;
+			health = POWER_SUPPLY_HEALTH_OVERHEAT;
+		} else if (chip->batt_hot) { /* hot --> warm */
+			chip->batt_hot = false;
+			chip->batt_warm = true;
+			health = POWER_SUPPLY_HEALTH_WARM;
+		}
+	}
+
+	if (health != POWER_SUPPLY_HEALTH_UNKNOWN) {
+		pr_debug("FG report battery health: %d\n", health);
+		val.intval = health;
+		rc = power_supply_set_property(chip->batt_psy,
+				POWER_SUPPLY_PROP_HEALTH, &val);
+		if (rc)
+			pr_err("Set batt_psy health: %d failed\n", health);
+	}
+
+recheck:
+	alarm_forward_now(alarm, ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+	return ALARMTIMER_RESTART;
+}
+
+#define BATT_SOFT_COLD_STS	BIT(0)
+#define BATT_SOFT_HOT_STS	BIT(1)
+static irqreturn_t fg_jeita_soft_hot_irq_handler(int irq, void *_chip)
 {
 	int rc;
-	u8 fg_batt_sts;
+	struct fg_chip *chip = _chip;
+	u8 regval;
+	bool batt_warm;
+	union power_supply_propval val = {0, };
 
-	rc = fg_read(chip, &fg_batt_sts,
-				 INT_RT_STS(chip->batt_base), 1);
+	if (!is_charger_available(chip))
+		return IRQ_HANDLED;
+
+	rc = fg_read(chip, &regval, INT_RT_STS(chip->batt_base), 1);
 	if (rc) {
 		pr_err("spmi read failed: addr=%03X, rc=%d\n",
 				INT_RT_STS(chip->batt_base), rc);
-		return false;
+		return IRQ_HANDLED;
 	}
 
-	return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+	batt_warm = !!(regval & BATT_SOFT_HOT_STS);
+	if (chip->batt_warm == batt_warm) {
+		pr_debug("warm state not change, ignore!\n");
+		return IRQ_HANDLED;
+	}
+
+	chip->batt_warm = batt_warm;
+	if (batt_warm) {
+		val.intval = POWER_SUPPLY_HEALTH_WARM;
+		power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_HEALTH, &val);
+		/* kick the alarm timer for hard hot polling */
+		alarm_start_relative(&chip->hard_jeita_alarm,
+				ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+	} else {
+		val.intval = POWER_SUPPLY_HEALTH_GOOD;
+		 power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_HEALTH, &val);
+		/* cancel the alarm timer */
+		alarm_try_to_cancel(&chip->hard_jeita_alarm);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_jeita_soft_cold_irq_handler(int irq, void *_chip)
+{
+	int rc;
+	struct fg_chip *chip = _chip;
+	u8 regval;
+	bool batt_cool;
+	union power_supply_propval val = {0, };
+
+	if (!is_charger_available(chip))
+		return IRQ_HANDLED;
+
+	rc = fg_read(chip, &regval, INT_RT_STS(chip->batt_base), 1);
+	if (rc) {
+		pr_err("spmi read failed: addr=%03X, rc=%d\n",
+				INT_RT_STS(chip->batt_base), rc);
+		return IRQ_HANDLED;
+	}
+
+	batt_cool = !!(regval & BATT_SOFT_COLD_STS);
+	if (chip->batt_cool == batt_cool) {
+		pr_debug("cool state not change, ignore\n");
+		return IRQ_HANDLED;
+	}
+
+	chip->batt_cool = batt_cool;
+	if (batt_cool) {
+		val.intval = POWER_SUPPLY_HEALTH_COOL;
+		power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_HEALTH, &val);
+		/* kick the alarm timer for hard cold polling */
+		alarm_start_relative(&chip->hard_jeita_alarm,
+				ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+	} else {
+		val.intval = POWER_SUPPLY_HEALTH_GOOD;
+		power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_HEALTH, &val);
+		/* cancel the alarm timer */
+		alarm_try_to_cancel(&chip->hard_jeita_alarm);
+	}
+
+	return IRQ_HANDLED;
 }
 
 #define SOC_FIRST_EST_DONE	BIT(5)
@@ -3841,21 +5217,40 @@
 	return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false;
 }
 
+#define FG_EMPTY_DEBOUNCE_MS	1500
 static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip)
 {
 	struct fg_chip *chip = _chip;
-	int rc;
 	bool vbatt_low_sts;
 
 	if (fg_debug_mask & FG_IRQS)
 		pr_info("vbatt-low triggered\n");
 
-	if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
-		rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
-		if (rc) {
-			pr_err("error in reading vbatt_status, rc:%d\n", rc);
+	/* handle empty soc based on vbatt-low interrupt */
+	if (chip->use_vbat_low_empty_soc) {
+		if (fg_get_vbatt_status(chip, &vbatt_low_sts))
 			goto out;
+
+		if (vbatt_low_sts) {
+			if (fg_debug_mask & FG_IRQS)
+				pr_info("Vbatt is low\n");
+			disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+			disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+			chip->vbat_low_irq_enabled = false;
+			fg_stay_awake(&chip->empty_check_wakeup_source);
+			schedule_delayed_work(&chip->check_empty_work,
+				msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+		} else {
+			if (fg_debug_mask & FG_IRQS)
+				pr_info("Vbatt is high\n");
+			chip->soc_empty = false;
 		}
+		goto out;
+	}
+
+	if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+		if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+			goto out;
 		if (!vbatt_low_sts && chip->vbat_low_irq_enabled) {
 			if (fg_debug_mask & FG_IRQS)
 				pr_info("disabling vbatt_low irq\n");
@@ -3876,8 +5271,10 @@
 	bool batt_missing = is_battery_missing(chip);
 
 	if (batt_missing) {
+		fg_cap_learning_stop(chip);
 		chip->battery_missing = true;
 		chip->profile_loaded = false;
+		chip->soc_reporting_ready = false;
 		chip->batt_type = default_batt_type;
 		mutex_lock(&chip->cyc_ctr.lock);
 		if (fg_debug_mask & FG_IRQS)
@@ -3885,17 +5282,10 @@
 		clear_cycle_counter(chip);
 		mutex_unlock(&chip->cyc_ctr.lock);
 	} else {
-		if (!chip->use_otp_profile) {
-			reinit_completion(&chip->batt_id_avail);
-			reinit_completion(&chip->first_soc_done);
-			schedule_delayed_work(&chip->batt_profile_init, 0);
-			cancel_delayed_work(&chip->update_sram_data);
-			schedule_delayed_work(
-				&chip->update_sram_data,
-				msecs_to_jiffies(0));
-		} else {
+		if (!chip->use_otp_profile)
+			fg_handle_battery_insertion(chip);
+		else
 			chip->battery_missing = false;
-		}
 	}
 
 	if (fg_debug_mask & FG_IRQS)
@@ -3943,7 +5333,7 @@
 {
 	struct fg_chip *chip = _chip;
 	u8 soc_rt_sts;
-	int rc;
+	int rc, msoc;
 
 	rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
 	if (rc) {
@@ -3954,6 +5344,37 @@
 	if (fg_debug_mask & FG_IRQS)
 		pr_info("triggered 0x%x\n", soc_rt_sts);
 
+	if (chip->dischg_gain.enable) {
+		fg_stay_awake(&chip->dischg_gain_wakeup_source);
+		schedule_work(&chip->dischg_gain_work);
+	}
+
+	if (chip->soc_slope_limiter_en) {
+		fg_stay_awake(&chip->slope_limit_wakeup_source);
+		schedule_work(&chip->slope_limiter_work);
+	}
+
+	/* Backup last soc every delta soc interrupt */
+	chip->use_last_soc = false;
+	if (fg_reset_on_lockup) {
+		if (!chip->ima_error_handling)
+			chip->last_soc = get_monotonic_soc_raw(chip);
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("last_soc: %d\n", chip->last_soc);
+
+		fg_stay_awake(&chip->cc_soc_wakeup_source);
+		schedule_work(&chip->cc_soc_store_work);
+	}
+
+	if (chip->use_vbat_low_empty_soc) {
+		msoc = get_monotonic_soc_raw(chip);
+		if (msoc == 0 || chip->soc_empty) {
+			fg_stay_awake(&chip->empty_check_wakeup_source);
+			schedule_delayed_work(&chip->check_empty_work,
+				msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+		}
+	}
+
 	schedule_work(&chip->battery_age_work);
 
 	if (chip->power_supply_registered)
@@ -3988,7 +5409,6 @@
 	return IRQ_HANDLED;
 }
 
-#define FG_EMPTY_DEBOUNCE_MS	1500
 static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip)
 {
 	struct fg_chip *chip = _chip;
@@ -4100,16 +5520,15 @@
 	fg_relax(&chip->resume_soc_wakeup_source);
 }
 
-
 #define OCV_COEFFS_START_REG		0x4C0
 #define OCV_JUNCTION_REG		0x4D8
-#define NOM_CAP_REG			0x4F4
 #define CUTOFF_VOLTAGE_REG		0x40C
 #define RSLOW_CFG_REG			0x538
 #define RSLOW_CFG_OFFSET		2
 #define RSLOW_THRESH_REG		0x52C
 #define RSLOW_THRESH_OFFSET		0
-#define TEMP_RS_TO_RSLOW_OFFSET		2
+#define RS_TO_RSLOW_CHG_OFFSET		2
+#define RS_TO_RSLOW_DISCHG_OFFSET	0
 #define RSLOW_COMP_REG			0x528
 #define RSLOW_COMP_C1_OFFSET		0
 #define RSLOW_COMP_C2_OFFSET		2
@@ -4117,7 +5536,6 @@
 {
 	u8 buffer[24];
 	int rc, i;
-	int16_t cc_mah;
 
 	fg_mem_lock(chip);
 	rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0);
@@ -4138,30 +5556,21 @@
 				chip->ocv_coeffs[8], chip->ocv_coeffs[9],
 				chip->ocv_coeffs[10], chip->ocv_coeffs[11]);
 	}
-	rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 0, 0);
-	chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
-	rc |= fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 1, 0);
-	chip->ocv_junction_p2p3 = buffer[0] * 100 / 255;
+	rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 2, 0, 0);
 	if (rc) {
 		pr_err("Failed to read ocv junctions: %d\n", rc);
 		goto done;
 	}
-	rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+
+	chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
+	chip->ocv_junction_p2p3 = buffer[1] * 100 / 255;
+
+	rc = load_battery_aging_data(chip);
 	if (rc) {
-		pr_err("Failed to read nominal capacitance: %d\n", rc);
+		pr_err("Failed to load battery aging data, rc:%d\n", rc);
 		goto done;
 	}
-	chip->nom_cap_uah = bcap_uah_2b(buffer);
-	chip->actual_cap_uah = chip->nom_cap_uah;
-	if (chip->learning_data.learned_cc_uah == 0) {
-		chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
-		fg_cap_learning_save_data(chip);
-	} else if (chip->learning_data.feedback_on) {
-		cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
-		rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
-		if (rc)
-			pr_err("Error in restoring cc_soc_coeff, rc:%d\n", rc);
-	}
+
 	rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0);
 	if (rc) {
 		pr_err("Failed to read cutoff voltage: %d\n", rc);
@@ -4188,9 +5597,9 @@
 	}
 	chip->rslow_comp.rslow_thr = buffer[0];
 	rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
-			RSLOW_THRESH_OFFSET, 0);
+			RS_TO_RSLOW_CHG_OFFSET, 0);
 	if (rc) {
-		pr_err("unable to read rs to rslow: %d\n", rc);
+		pr_err("unable to read rs to rslow_chg: %d\n", rc);
 		goto done;
 	}
 	memcpy(chip->rslow_comp.rs_to_rslow, buffer, 2);
@@ -4207,6 +5616,68 @@
 	return rc;
 }
 
+static int fg_update_batt_rslow_settings(struct fg_chip *chip)
+{
+	int64_t rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr, rconn_uohm;
+	u8 buffer[2];
+	int rc;
+
+	rc = fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0);
+	if (rc) {
+		pr_err("unable to read battery_esr: %d\n", rc);
+		goto done;
+	}
+	batt_esr = half_float(buffer);
+
+	rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+			RS_TO_RSLOW_DISCHG_OFFSET, 0);
+	if (rc) {
+		pr_err("unable to read rs to rslow dischg: %d\n", rc);
+		goto done;
+	}
+	rs_to_rslow_dischg = half_float(buffer);
+
+	rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+			RS_TO_RSLOW_CHG_OFFSET, 0);
+	if (rc) {
+		pr_err("unable to read rs to rslow chg: %d\n", rc);
+		goto done;
+	}
+	rs_to_rslow_chg = half_float(buffer);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("rs_rslow_chg: %lld, rs_rslow_dischg: %lld, esr: %lld\n",
+			rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr);
+
+	rconn_uohm = chip->rconn_mohm * 1000;
+	rs_to_rslow_dischg = div64_s64(rs_to_rslow_dischg * batt_esr,
+					batt_esr + rconn_uohm);
+	rs_to_rslow_chg = div64_s64(rs_to_rslow_chg * batt_esr,
+					batt_esr + rconn_uohm);
+
+	half_float_to_buffer(rs_to_rslow_chg, buffer);
+	rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+			RS_TO_RSLOW_CHG_OFFSET, 0);
+	if (rc) {
+		pr_err("unable to write rs_to_rslow_chg: %d\n", rc);
+		goto done;
+	}
+
+	half_float_to_buffer(rs_to_rslow_dischg, buffer);
+	rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+			RS_TO_RSLOW_DISCHG_OFFSET, 0);
+	if (rc) {
+		pr_err("unable to write rs_to_rslow_dischg: %d\n", rc);
+		goto done;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Modified rs_rslow_chg: %lld, rs_rslow_dischg: %lld\n",
+			rs_to_rslow_chg, rs_to_rslow_dischg);
+done:
+	return rc;
+}
+
 #define RSLOW_CFG_MASK		(BIT(2) | BIT(3) | BIT(4) | BIT(5))
 #define RSLOW_CFG_ON_VAL	(BIT(2) | BIT(3))
 #define RSLOW_THRESH_FULL_VAL	0xFF
@@ -4233,7 +5704,7 @@
 
 	half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer);
 	rc = fg_mem_write(chip, buffer,
-			TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+			TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0);
 	if (rc) {
 		pr_err("unable to write rs to rslow: %d\n", rc);
 		goto done;
@@ -4286,7 +5757,7 @@
 	}
 
 	rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow,
-			TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+			TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0);
 	if (rc) {
 		pr_err("unable to write rs to rslow: %d\n", rc);
 		goto done;
@@ -4510,6 +5981,58 @@
 	fg_relax(&chip->esr_extract_wakeup_source);
 }
 
+#define KI_COEFF_MEDC_REG		0x400
+#define KI_COEFF_MEDC_OFFSET		0
+#define KI_COEFF_HIGHC_REG		0x404
+#define KI_COEFF_HIGHC_OFFSET		0
+#define DEFAULT_MEDC_VOLTAGE_GAIN	3
+#define DEFAULT_HIGHC_VOLTAGE_GAIN	2
+static void discharge_gain_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+						dischg_gain_work);
+	u8 buf[2];
+	int capacity, rc, i;
+	int64_t medc_val = DEFAULT_MEDC_VOLTAGE_GAIN;
+	int64_t highc_val = DEFAULT_HIGHC_VOLTAGE_GAIN;
+
+	capacity = get_prop_capacity(chip);
+	if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+		for (i = VOLT_GAIN_MAX - 1; i >= 0; i--) {
+			if (capacity <= chip->dischg_gain.soc[i]) {
+				medc_val = chip->dischg_gain.medc_gain[i];
+				highc_val = chip->dischg_gain.highc_gain[i];
+			}
+		}
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Capacity: %d, medc_gain: %lld highc_gain: %lld\n",
+			capacity, medc_val, highc_val);
+
+	medc_val *= MICRO_UNIT;
+	half_float_to_buffer(medc_val, buf);
+	rc = fg_mem_write(chip, buf, KI_COEFF_MEDC_REG, 2,
+				KI_COEFF_MEDC_OFFSET, 0);
+	if (rc)
+		pr_err("Couldn't write to ki_coeff_medc_reg, rc=%d\n", rc);
+	else if (fg_debug_mask & FG_STATUS)
+		pr_info("Value [%x %x] written to ki_coeff_medc\n", buf[0],
+			buf[1]);
+
+	highc_val *= MICRO_UNIT;
+	half_float_to_buffer(highc_val, buf);
+	rc = fg_mem_write(chip, buf, KI_COEFF_HIGHC_REG, 2,
+				KI_COEFF_HIGHC_OFFSET, 0);
+	if (rc)
+		pr_err("Couldn't write to ki_coeff_highc_reg, rc=%d\n", rc);
+	else if (fg_debug_mask & FG_STATUS)
+		pr_info("Value [%x %x] written to ki_coeff_highc\n", buf[0],
+			buf[1]);
+
+	fg_relax(&chip->dischg_gain_wakeup_source);
+}
+
 #define LOW_LATENCY			BIT(6)
 #define BATT_PROFILE_OFFSET		0x4C0
 #define PROFILE_INTEGRITY_REG		0x53C
@@ -4529,7 +6052,7 @@
 		pr_info("restarting fuel gauge...\n");
 
 try_again:
-	if (write_profile) {
+	if (write_profile && !chip->ima_error_handling) {
 		if (!chip->charging_disabled) {
 			pr_err("Charging not yet disabled!\n");
 			return -EINVAL;
@@ -4770,7 +6293,8 @@
 #define BATTERY_PSY_WAIT_MS		2000
 static int fg_batt_profile_init(struct fg_chip *chip)
 {
-	int rc = 0, ret, len, batt_id;
+	int rc = 0, ret;
+	int len, batt_id;
 	struct device_node *node = chip->pdev->dev.of_node;
 	struct device_node *batt_node, *profile_node;
 	const char *data, *batt_type_str;
@@ -4792,6 +6316,19 @@
 		goto no_profile;
 	}
 
+	/* Check whether the charger is ready */
+	if (!is_charger_available(chip))
+		goto reschedule;
+
+	/* Disable charging for a FG cycle before calculating vbat_in_range */
+	if (!chip->charging_disabled) {
+		rc = set_prop_enable_charging(chip, false);
+		if (rc)
+			pr_err("Failed to disable charging, rc=%d\n", rc);
+
+		goto update;
+	}
+
 	batt_node = of_find_node_by_name(node, "qcom,battery-data");
 	if (!batt_node) {
 		pr_warn("No available batterydata, using OTP defaults\n");
@@ -4808,8 +6345,12 @@
 							fg_batt_type);
 	if (IS_ERR_OR_NULL(profile_node)) {
 		rc = PTR_ERR(profile_node);
-		pr_err("couldn't find profile handle %d\n", rc);
-		goto no_profile;
+		if (rc == -EPROBE_DEFER) {
+			goto reschedule;
+		} else {
+			pr_err("couldn't find profile handle rc=%d\n", rc);
+			goto no_profile;
+		}
 	}
 
 	/* read rslow compensation values if they're available */
@@ -4903,18 +6444,6 @@
 		goto no_profile;
 	}
 
-	/* Check whether the charger is ready */
-	if (!is_charger_available(chip))
-		goto reschedule;
-
-	/* Disable charging for a FG cycle before calculating vbat_in_range */
-	if (!chip->charging_disabled) {
-		rc = set_prop_enable_charging(chip, false);
-		if (rc)
-			pr_err("Failed to disable charging, rc=%d\n", rc);
-
-		goto reschedule;
-	}
 
 	vbat_in_range = get_vbat_est_diff(chip)
 			< settings[FG_MEM_VBAT_EST_DIFF].value * 1000;
@@ -4956,11 +6485,7 @@
 				chip->batt_profile, len, false);
 	}
 
-	if (chip->power_supply_registered)
-		power_supply_changed(chip->bms_psy);
-
 	memcpy(chip->batt_profile, data, len);
-
 	chip->batt_profile_len = len;
 
 	if (fg_debug_mask & FG_STATUS)
@@ -4995,6 +6520,11 @@
 		}
 	}
 
+	if (chip->rconn_mohm > 0) {
+		rc = fg_update_batt_rslow_settings(chip);
+		if (rc)
+			pr_err("Error in updating ESR, rc=%d\n", rc);
+	}
 done:
 	if (chip->charging_disabled) {
 		rc = set_prop_enable_charging(chip, true);
@@ -5008,8 +6538,22 @@
 		chip->batt_type = fg_batt_type;
 	else
 		chip->batt_type = batt_type_str;
+
+	if (chip->first_profile_loaded && fg_reset_on_lockup) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("restoring SRAM registers\n");
+		rc = fg_backup_sram_registers(chip, false);
+		if (rc)
+			pr_err("Couldn't restore sram registers\n");
+
+		/* Read the cycle counter back from FG SRAM */
+		if (chip->cyc_ctr.en)
+			restore_cycle_counter(chip);
+	}
+
 	chip->first_profile_loaded = true;
 	chip->profile_loaded = true;
+	chip->soc_reporting_ready = true;
 	chip->battery_missing = is_battery_missing(chip);
 	update_chg_iterm(chip);
 	update_cc_cv_setpoint(chip);
@@ -5025,8 +6569,10 @@
 	fg_relax(&chip->profile_wakeup_source);
 	pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip),
 		fg_data[FG_DATA_VOLTAGE].value);
+	complete_all(&chip->fg_reset_done);
 	return rc;
 no_profile:
+	chip->soc_reporting_ready = true;
 	if (chip->charging_disabled) {
 		rc = set_prop_enable_charging(chip, true);
 		if (rc)
@@ -5039,14 +6585,15 @@
 		power_supply_changed(chip->bms_psy);
 	fg_relax(&chip->profile_wakeup_source);
 	return rc;
-reschedule:
-	schedule_delayed_work(
-		&chip->batt_profile_init,
-		msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
+update:
 	cancel_delayed_work(&chip->update_sram_data);
 	schedule_delayed_work(
 		&chip->update_sram_data,
 		msecs_to_jiffies(0));
+reschedule:
+	schedule_delayed_work(
+		&chip->batt_profile_init,
+		msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
 	fg_relax(&chip->profile_wakeup_source);
 	return 0;
 }
@@ -5056,14 +6603,41 @@
 	struct fg_chip *chip = container_of(work,
 				struct fg_chip,
 				check_empty_work.work);
+	bool vbatt_low_sts;
+	int msoc;
 
-	if (fg_is_batt_empty(chip)) {
+	/* handle empty soc based on vbatt-low interrupt */
+	if (chip->use_vbat_low_empty_soc) {
+		if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+			goto out;
+
+		msoc = get_monotonic_soc_raw(chip);
+
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("Vbatt_low: %d, msoc: %d\n", vbatt_low_sts,
+				msoc);
+		if (vbatt_low_sts || (msoc == 0))
+			chip->soc_empty = true;
+		else
+			chip->soc_empty = false;
+
+		if (chip->power_supply_registered)
+			power_supply_changed(chip->bms_psy);
+
+		if (!chip->vbat_low_irq_enabled) {
+			enable_irq(chip->batt_irq[VBATT_LOW].irq);
+			enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+			chip->vbat_low_irq_enabled = true;
+		}
+	} else if (fg_is_batt_empty(chip)) {
 		if (fg_debug_mask & FG_STATUS)
 			pr_info("EMPTY SOC high\n");
 		chip->soc_empty = true;
 		if (chip->power_supply_registered)
 			power_supply_changed(chip->bms_psy);
 	}
+
+out:
 	fg_relax(&chip->empty_check_wakeup_source);
 }
 
@@ -5103,7 +6677,7 @@
 	int rc;
 	u8 buffer[3];
 	int bsoc;
-	int resume_soc_raw = FULL_SOC_RAW - settings[FG_MEM_RESUME_SOC].value;
+	int resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
 	bool disable = false;
 	u8 reg;
 
@@ -5318,6 +6892,98 @@
 	}								\
 } while (0)
 
+static int fg_dischg_gain_dt_init(struct fg_chip *chip)
+{
+	struct device_node *node = chip->pdev->dev.of_node;
+	struct property *prop;
+	int i, rc = 0;
+	size_t size;
+
+	prop = of_find_property(node, "qcom,fg-dischg-voltage-gain-soc",
+			NULL);
+	if (!prop) {
+		pr_err("qcom-fg-dischg-voltage-gain-soc not specified\n");
+		goto out;
+	}
+
+	size = prop->length / sizeof(u32);
+	if (size != VOLT_GAIN_MAX) {
+		pr_err("Voltage gain SOC specified is of incorrect size\n");
+		goto out;
+	}
+
+	rc = of_property_read_u32_array(node,
+		"qcom,fg-dischg-voltage-gain-soc", chip->dischg_gain.soc, size);
+	if (rc < 0) {
+		pr_err("Reading qcom-fg-dischg-voltage-gain-soc failed, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	for (i = 0; i < VOLT_GAIN_MAX; i++) {
+		if (chip->dischg_gain.soc[i] > 100) {
+			pr_err("Incorrect dischg-voltage-gain-soc\n");
+			goto out;
+		}
+	}
+
+	prop = of_find_property(node, "qcom,fg-dischg-med-voltage-gain",
+			NULL);
+	if (!prop) {
+		pr_err("qcom-fg-dischg-med-voltage-gain not specified\n");
+		goto out;
+	}
+
+	size = prop->length / sizeof(u32);
+	if (size != VOLT_GAIN_MAX) {
+		pr_err("med-voltage-gain specified is of incorrect size\n");
+		goto out;
+	}
+
+	rc = of_property_read_u32_array(node,
+		"qcom,fg-dischg-med-voltage-gain", chip->dischg_gain.medc_gain,
+		size);
+	if (rc < 0) {
+		pr_err("Reading qcom-fg-dischg-med-voltage-gain failed, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	prop = of_find_property(node, "qcom,fg-dischg-high-voltage-gain",
+			NULL);
+	if (!prop) {
+		pr_err("qcom-fg-dischg-high-voltage-gain not specified\n");
+		goto out;
+	}
+
+	size = prop->length / sizeof(u32);
+	if (size != VOLT_GAIN_MAX) {
+		pr_err("high-voltage-gain specified is of incorrect size\n");
+		goto out;
+	}
+
+	rc = of_property_read_u32_array(node,
+		"qcom,fg-dischg-high-voltage-gain",
+		chip->dischg_gain.highc_gain, size);
+	if (rc < 0) {
+		pr_err("Reading qcom-fg-dischg-high-voltage-gain failed, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	if (fg_debug_mask & FG_STATUS) {
+		for (i = 0; i < VOLT_GAIN_MAX; i++)
+			pr_info("SOC:%d MedC_Gain:%d HighC_Gain: %d\n",
+				chip->dischg_gain.soc[i],
+				chip->dischg_gain.medc_gain[i],
+				chip->dischg_gain.highc_gain[i]);
+	}
+	return 0;
+out:
+	chip->dischg_gain.enable = false;
+	return rc;
+}
+
 #define DEFAULT_EVALUATION_CURRENT_MA	1000
 static int fg_of_init(struct fg_chip *chip)
 {
@@ -5395,6 +7061,10 @@
 			"cl-max-start-capacity", rc, 15);
 	OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv,
 			"cl-vbat-est-thr-uv", rc, 40000);
+	OF_READ_PROPERTY(chip->learning_data.max_cap_limit,
+			"cl-max-limit-deciperc", rc, 0);
+	OF_READ_PROPERTY(chip->learning_data.min_cap_limit,
+			"cl-min-limit-deciperc", rc, 0);
 	OF_READ_PROPERTY(chip->evaluation_current,
 			"aging-eval-current-ma", rc,
 			DEFAULT_EVALUATION_CURRENT_MA);
@@ -5455,6 +7125,77 @@
 	chip->esr_pulse_tune_en = of_property_read_bool(node,
 					"qcom,esr-pulse-tuning-en");
 
+	chip->soc_slope_limiter_en = of_property_read_bool(node,
+					"qcom,fg-control-slope-limiter");
+	if (chip->soc_slope_limiter_en) {
+		OF_READ_PROPERTY(chip->slope_limit_temp,
+			"fg-slope-limit-temp-threshold", rc,
+			SLOPE_LIMIT_TEMP_THRESHOLD);
+
+		OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_CHARGE],
+			"fg-slope-limit-low-temp-chg", rc,
+			SLOPE_LIMIT_LOW_TEMP_CHG);
+
+		OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_CHARGE],
+			"fg-slope-limit-high-temp-chg", rc,
+			SLOPE_LIMIT_HIGH_TEMP_CHG);
+
+		OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE],
+			"fg-slope-limit-low-temp-dischg", rc,
+			SLOPE_LIMIT_LOW_TEMP_DISCHG);
+
+		OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE],
+			"fg-slope-limit-high-temp-dischg", rc,
+			SLOPE_LIMIT_HIGH_TEMP_DISCHG);
+
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("slope-limiter, temp: %d coeffs: [%d %d %d %d]\n",
+				chip->slope_limit_temp,
+				chip->slope_limit_coeffs[LOW_TEMP_CHARGE],
+				chip->slope_limit_coeffs[HIGH_TEMP_CHARGE],
+				chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE],
+				chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE]);
+	}
+
+	OF_READ_PROPERTY(chip->rconn_mohm, "fg-rconn-mohm", rc, 0);
+
+	chip->dischg_gain.enable = of_property_read_bool(node,
+					"qcom,fg-dischg-voltage-gain-ctrl");
+	if (chip->dischg_gain.enable) {
+		rc = fg_dischg_gain_dt_init(chip);
+		if (rc) {
+			pr_err("Error in reading dischg_gain parameters, rc=%d\n",
+				rc);
+			rc = 0;
+		}
+	}
+
+	chip->use_vbat_low_empty_soc = of_property_read_bool(node,
+					"qcom,fg-use-vbat-low-empty-soc");
+
+	OF_READ_PROPERTY(chip->batt_temp_low_limit,
+			"fg-batt-temp-low-limit", rc, BATT_TEMP_LOW_LIMIT);
+
+	OF_READ_PROPERTY(chip->batt_temp_high_limit,
+			"fg-batt-temp-high-limit", rc, BATT_TEMP_HIGH_LIMIT);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("batt-temp-low_limit: %d batt-temp-high_limit: %d\n",
+			chip->batt_temp_low_limit, chip->batt_temp_high_limit);
+
+	OF_READ_PROPERTY(chip->cc_soc_limit_pct, "fg-cc-soc-limit-pct", rc, 0);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("cc-soc-limit-pct: %d\n", chip->cc_soc_limit_pct);
+
+	chip->batt_info_restore = of_property_read_bool(node,
+					"qcom,fg-restore-batt-info");
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("restore: %d validate_by_ocv: %d range_pct: %d\n",
+			chip->batt_info_restore, fg_batt_valid_ocv,
+			fg_batt_range_pct);
+
 	return rc;
 }
 
@@ -5528,15 +7269,22 @@
 					chip->soc_irq[FULL_SOC].irq, rc);
 				return rc;
 			}
-			rc = devm_request_irq(chip->dev,
-				chip->soc_irq[EMPTY_SOC].irq,
-				fg_empty_soc_irq_handler,
-				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-				"empty-soc", chip);
-			if (rc < 0) {
-				pr_err("Can't request %d empty-soc: %d\n",
-					chip->soc_irq[EMPTY_SOC].irq, rc);
-				return rc;
+			enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+			chip->full_soc_irq_enabled = true;
+
+			if (!chip->use_vbat_low_empty_soc) {
+				rc = devm_request_irq(chip->dev,
+					chip->soc_irq[EMPTY_SOC].irq,
+					fg_empty_soc_irq_handler,
+					IRQF_TRIGGER_RISING |
+					IRQF_TRIGGER_FALLING,
+					"empty-soc", chip);
+				if (rc < 0) {
+					pr_err("Can't request %d empty-soc: %d\n",
+						chip->soc_irq[EMPTY_SOC].irq,
+						rc);
+					return rc;
+				}
 			}
 			rc = devm_request_irq(chip->dev,
 				chip->soc_irq[DELTA_SOC].irq,
@@ -5558,8 +7306,8 @@
 			}
 
 			enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
-			enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
-			enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+			if (!chip->use_vbat_low_empty_soc)
+				enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
 			break;
 		case FG_MEMIF:
 			chip->mem_irq[FG_MEM_AVAIL].irq
@@ -5581,8 +7329,53 @@
 			}
 			break;
 		case FG_BATT:
-			chip->batt_irq[BATT_MISSING].irq
-				= of_irq_get_byname(child, "batt-missing");
+			chip->batt_irq[JEITA_SOFT_COLD].irq =
+				of_irq_get_byname(child, "soft-cold");
+			if (chip->batt_irq[JEITA_SOFT_COLD].irq < 0) {
+				pr_err("Unable to get soft-cold irq\n");
+				rc = -EINVAL;
+				return rc;
+			}
+			rc = devm_request_threaded_irq(chip->dev,
+					chip->batt_irq[JEITA_SOFT_COLD].irq,
+					NULL,
+					fg_jeita_soft_cold_irq_handler,
+					IRQF_TRIGGER_RISING |
+					IRQF_TRIGGER_FALLING |
+					IRQF_ONESHOT,
+					"soft-cold", chip);
+			if (rc < 0) {
+				pr_err("Can't request %d soft-cold: %d\n",
+					chip->batt_irq[JEITA_SOFT_COLD].irq,
+								rc);
+				return rc;
+			}
+			disable_irq(chip->batt_irq[JEITA_SOFT_COLD].irq);
+			chip->batt_irq[JEITA_SOFT_COLD].disabled = true;
+			chip->batt_irq[JEITA_SOFT_HOT].irq =
+				of_irq_get_byname(child, "soft-hot");
+			if (chip->batt_irq[JEITA_SOFT_HOT].irq < 0) {
+				pr_err("Unable to get soft-hot irq\n");
+				rc = -EINVAL;
+				return rc;
+			}
+			rc = devm_request_threaded_irq(chip->dev,
+					chip->batt_irq[JEITA_SOFT_HOT].irq,
+					NULL,
+					fg_jeita_soft_hot_irq_handler,
+					IRQF_TRIGGER_RISING |
+					IRQF_TRIGGER_FALLING |
+					IRQF_ONESHOT,
+					"soft-hot", chip);
+			if (rc < 0) {
+				pr_err("Can't request %d soft-hot: %d\n",
+					chip->batt_irq[JEITA_SOFT_HOT].irq, rc);
+				return rc;
+			}
+			disable_irq(chip->batt_irq[JEITA_SOFT_HOT].irq);
+			chip->batt_irq[JEITA_SOFT_HOT].disabled = true;
+			chip->batt_irq[BATT_MISSING].irq =
+				of_irq_get_byname(child, "batt-missing");
 			if (chip->batt_irq[BATT_MISSING].irq < 0) {
 				pr_err("Unable to get batt-missing irq\n");
 				rc = -EINVAL;
@@ -5619,8 +7412,14 @@
 					chip->batt_irq[VBATT_LOW].irq, rc);
 				return rc;
 			}
-			disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
-			chip->vbat_low_irq_enabled = false;
+			if (chip->use_vbat_low_empty_soc) {
+				enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+				chip->vbat_low_irq_enabled = true;
+			} else {
+				disable_irq_nosync(
+					chip->batt_irq[VBATT_LOW].irq);
+				chip->vbat_low_irq_enabled = false;
+			}
 			break;
 		case FG_ADC:
 			break;
@@ -5630,17 +7429,22 @@
 		}
 	}
 
+	chip->irqs_enabled = true;
 	return rc;
 }
 
-static void fg_cleanup(struct fg_chip *chip)
+static void fg_cancel_all_works(struct fg_chip *chip)
 {
+	cancel_delayed_work_sync(&chip->check_sanity_work);
 	cancel_delayed_work_sync(&chip->update_sram_data);
 	cancel_delayed_work_sync(&chip->update_temp_work);
 	cancel_delayed_work_sync(&chip->update_jeita_setting);
 	cancel_delayed_work_sync(&chip->check_empty_work);
 	cancel_delayed_work_sync(&chip->batt_profile_init);
 	alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+	alarm_try_to_cancel(&chip->hard_jeita_alarm);
+	if (!chip->ima_error_handling)
+		cancel_work_sync(&chip->ima_error_recovery_work);
 	cancel_work_sync(&chip->rslow_comp_work);
 	cancel_work_sync(&chip->set_resume_soc_work);
 	cancel_work_sync(&chip->fg_cap_learning_work);
@@ -5652,12 +7456,23 @@
 	cancel_work_sync(&chip->gain_comp_work);
 	cancel_work_sync(&chip->init_work);
 	cancel_work_sync(&chip->charge_full_work);
+	cancel_work_sync(&chip->bcl_hi_power_work);
 	cancel_work_sync(&chip->esr_extract_config_work);
+	cancel_work_sync(&chip->slope_limiter_work);
+	cancel_work_sync(&chip->dischg_gain_work);
+	cancel_work_sync(&chip->cc_soc_store_work);
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+	fg_cancel_all_works(chip);
+	power_supply_unregister(chip->bms_psy);
 	mutex_destroy(&chip->rslow_comp.lock);
 	mutex_destroy(&chip->rw_lock);
 	mutex_destroy(&chip->cyc_ctr.lock);
 	mutex_destroy(&chip->learning_data.learning_lock);
 	mutex_destroy(&chip->sysfs_restart_lock);
+	mutex_destroy(&chip->ima_recovery_lock);
 	wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
 	wakeup_source_trash(&chip->empty_check_wakeup_source.source);
 	wakeup_source_trash(&chip->memif_wakeup_source.source);
@@ -5667,6 +7482,11 @@
 	wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
 	wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
 	wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+	wakeup_source_trash(&chip->slope_limit_wakeup_source.source);
+	wakeup_source_trash(&chip->dischg_gain_wakeup_source.source);
+	wakeup_source_trash(&chip->fg_reset_wakeup_source.source);
+	wakeup_source_trash(&chip->cc_soc_wakeup_source.source);
+	wakeup_source_trash(&chip->sanity_wakeup_source.source);
 }
 
 static int fg_remove(struct platform_device *pdev)
@@ -6155,12 +7975,13 @@
 	return 0;
 }
 
-#define FG_ALG_SYSCTL_1	0x4B0
-#define SOC_CNFG	0x450
-#define SOC_DELTA_OFFSET	3
-#define DELTA_SOC_PERCENT	1
-#define I_TERM_QUAL_BIT		BIT(1)
-#define PATCH_NEG_CURRENT_BIT	BIT(3)
+#define FG_ALG_SYSCTL_1			0x4B0
+#define SOC_CNFG			0x450
+#define SOC_DELTA_OFFSET		3
+#define DELTA_SOC_PERCENT		1
+#define ALERT_CFG_OFFSET		3
+#define I_TERM_QUAL_BIT			BIT(1)
+#define PATCH_NEG_CURRENT_BIT		BIT(3)
 #define KI_COEFF_PRED_FULL_ADDR		0x408
 #define KI_COEFF_PRED_FULL_4_0_MSB	0x88
 #define KI_COEFF_PRED_FULL_4_0_LSB	0x00
@@ -6168,6 +7989,12 @@
 #define FG_ADC_CONFIG_REG		0x4B8
 #define FG_BCL_CONFIG_OFFSET		0x3
 #define BCL_FORCED_HPM_IN_CHARGE	BIT(2)
+#define IRQ_USE_VOLTAGE_HYST_BIT	BIT(0)
+#define EMPTY_FROM_VOLTAGE_BIT		BIT(1)
+#define EMPTY_FROM_SOC_BIT		BIT(2)
+#define EMPTY_SOC_IRQ_MASK		(IRQ_USE_VOLTAGE_HYST_BIT | \
+					EMPTY_FROM_SOC_BIT | \
+					EMPTY_FROM_VOLTAGE_BIT)
 static int fg_common_hw_init(struct fg_chip *chip)
 {
 	int rc;
@@ -6176,8 +8003,9 @@
 
 	update_iterm(chip);
 	update_cutoff_voltage(chip);
-	update_irq_volt_empty(chip);
 	update_bcl_thresholds(chip);
+	if (!chip->use_vbat_low_empty_soc)
+		update_irq_volt_empty(chip);
 
 	resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
 	if (resume_soc_raw > 0) {
@@ -6207,6 +8035,11 @@
 		return rc;
 	}
 
+	/* Override the voltage threshold for vbatt_low with empty_volt */
+	if (chip->use_vbat_low_empty_soc)
+		settings[FG_MEM_BATT_LOW].value =
+			settings[FG_MEM_IRQ_VOLT_EMPTY].value;
+
 	rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF,
 			batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value),
 			settings[FG_MEM_BATT_LOW].offset);
@@ -6274,20 +8107,41 @@
 		if (fg_debug_mask & FG_STATUS)
 			pr_info("imptr_pulse_slow is %sabled\n",
 				chip->imptr_pulse_slow_en ? "en" : "dis");
+	}
 
-		rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
-				0);
-		if (rc) {
-			pr_err("unable to read rslow cfg: %d\n", rc);
-			return rc;
-		}
+	rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
+			0);
+	if (rc) {
+		pr_err("unable to read rslow cfg: %d\n", rc);
+		return rc;
+	}
 
-		if (val & RSLOW_CFG_ON_VAL)
-			chip->rslow_comp.active = true;
+	if (val & RSLOW_CFG_ON_VAL)
+		chip->rslow_comp.active = true;
 
-		if (fg_debug_mask & FG_STATUS)
-			pr_info("rslow_comp active is %sabled\n",
-				chip->rslow_comp.active ? "en" : "dis");
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("rslow_comp active is %sabled\n",
+			chip->rslow_comp.active ? "en" : "dis");
+
+	/*
+	 * Clear bits 0-2 in 0x4B3 and set them again to make empty_soc irq
+	 * trigger again.
+	 */
+	rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK,
+			0, ALERT_CFG_OFFSET);
+	if (rc) {
+		pr_err("failed to write to 0x4B3 rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Wait for a FG cycle before enabling empty soc irq configuration */
+	msleep(FG_CYCLE_MS);
+
+	rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK,
+			EMPTY_SOC_IRQ_MASK, ALERT_CFG_OFFSET);
+	if (rc) {
+		pr_err("failed to write to 0x4B3 rc=%d\n", rc);
+		return rc;
 	}
 
 	return 0;
@@ -6414,12 +8268,13 @@
 		/* Setup workaround flag based on PMIC type */
 		if (fg_sense_type == INTERNAL_CURRENT_SENSE)
 			chip->wa_flag |= IADC_GAIN_COMP_WA;
-		if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+		if (chip->pmic_revision[REVID_DIG_MAJOR] >= 1)
 			chip->wa_flag |= USE_CC_SOC_REG;
 
 		break;
 	case PMI8950:
 	case PMI8937:
+	case PMI8940:
 		rc = fg_8950_hw_init(chip);
 		/* Setup workaround flag based on PMIC type */
 		chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA;
@@ -6438,12 +8293,223 @@
 	return rc;
 }
 
+static int fg_init_iadc_config(struct fg_chip *chip)
+{
+	u8 reg[2];
+	int rc;
+
+	/* read default gain config */
+	rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
+	if (rc) {
+		pr_err("Failed to read default gain rc=%d\n", rc);
+		return rc;
+	}
+
+	if (reg[1] || reg[0]) {
+		/*
+		 * Default gain register has valid value:
+		 * - write to gain register.
+		 */
+		rc = fg_mem_write(chip, reg, GAIN_REG, 2,
+						GAIN_OFFSET, 0);
+		if (rc) {
+			pr_err("Failed to write gain rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		/*
+		 * Default gain register is invalid:
+		 * - read gain register for default gain value
+		 * - write to default gain register.
+		 */
+		rc = fg_mem_read(chip, reg, GAIN_REG, 2,
+						GAIN_OFFSET, 0);
+		if (rc) {
+			pr_err("Failed to read gain rc=%d\n", rc);
+			return rc;
+		}
+		rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
+						DEF_GAIN_OFFSET, 0);
+		if (rc) {
+			pr_err("Failed to write default gain rc=%d\n",
+								rc);
+			return rc;
+		}
+	}
+
+	chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
+	chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
+	chip->iadc_comp_data.dfl_gain = half_float(reg);
+
+	pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
+		       reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
+	return 0;
+}
+
+#define EN_WR_FGXCT_PRD		BIT(6)
+#define EN_RD_FGXCT_PRD		BIT(5)
+#define FG_RESTART_TIMEOUT_MS	12000
+static void ima_error_recovery_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+				struct fg_chip,
+				ima_error_recovery_work);
+	bool tried_again = false;
+	int rc;
+	u8 buf[4] = {0, 0, 0, 0};
+
+	fg_stay_awake(&chip->fg_reset_wakeup_source);
+	mutex_lock(&chip->ima_recovery_lock);
+	if (!chip->ima_error_handling) {
+		pr_err("Scheduled by mistake?\n");
+		mutex_unlock(&chip->ima_recovery_lock);
+		fg_relax(&chip->fg_reset_wakeup_source);
+		return;
+	}
+
+	/*
+	 * SOC should be read and used until the error recovery completes.
+	 * Without this, there could be a fluctuation in SOC values notified
+	 * to the userspace.
+	 */
+	chip->use_last_soc = true;
+
+	/* Block SRAM access till FG reset is complete */
+	chip->block_sram_access = true;
+
+	/* Release the mutex to avoid deadlock while cancelling the works */
+	mutex_unlock(&chip->ima_recovery_lock);
+
+	/* Cancel all the works */
+	fg_cancel_all_works(chip);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("last_soc: %d\n", chip->last_soc);
+
+	mutex_lock(&chip->ima_recovery_lock);
+	/* Acquire IMA access forcibly from FG ALG */
+	rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+			EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD,
+			EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 1);
+	if (rc) {
+		pr_err("Error in writing to IMA_CFG, rc=%d\n", rc);
+		goto out;
+	}
+
+	/* Release the IMA access now so that FG reset can go through */
+	rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+			EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 0, 1);
+	if (rc) {
+		pr_err("Error in writing to IMA_CFG, rc=%d\n", rc);
+		goto out;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("resetting FG\n");
+
+	/* Assert FG reset */
+	rc = fg_reset(chip, true);
+	if (rc) {
+		pr_err("Couldn't reset FG\n");
+		goto out;
+	}
+
+	/* Wait for a small time before deasserting FG reset */
+	msleep(100);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("clearing FG from reset\n");
+
+	/* Deassert FG reset */
+	rc = fg_reset(chip, false);
+	if (rc) {
+		pr_err("Couldn't clear FG reset\n");
+		goto out;
+	}
+
+	/* Wait for at least a FG cycle before doing SRAM access */
+	msleep(2000);
+
+	chip->block_sram_access = false;
+
+	if (!chip->init_done) {
+		schedule_work(&chip->init_work);
+		goto wait;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Calling hw_init\n");
+
+	/*
+	 * Once FG is reset, everything in SRAM will be wiped out. Redo
+	 * hw_init, update jeita settings etc., again to make sure all
+	 * the settings got restored again.
+	 */
+	rc = fg_hw_init(chip);
+	if (rc) {
+		pr_err("Error in hw_init, rc=%d\n", rc);
+		goto out;
+	}
+
+	update_jeita_setting(&chip->update_jeita_setting.work);
+
+	if (chip->wa_flag & IADC_GAIN_COMP_WA) {
+		rc = fg_init_iadc_config(chip);
+		if (rc)
+			goto out;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("loading battery profile\n");
+	if (!chip->use_otp_profile) {
+		chip->battery_missing = true;
+		chip->profile_loaded = false;
+		chip->soc_reporting_ready = false;
+		chip->batt_type = default_batt_type;
+		fg_handle_battery_insertion(chip);
+	}
+
+wait:
+	rc = wait_for_completion_interruptible_timeout(&chip->fg_reset_done,
+			msecs_to_jiffies(FG_RESTART_TIMEOUT_MS));
+
+	/* If we were interrupted wait again one more time. */
+	if (rc == -ERESTARTSYS && !tried_again) {
+		tried_again = true;
+		pr_debug("interrupted, waiting again\n");
+		goto wait;
+	} else if (rc <= 0) {
+		pr_err("fg_restart taking long time rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_mem_write(chip, buf, fg_data[FG_DATA_VINT_ERR].address,
+			fg_data[FG_DATA_VINT_ERR].len,
+			fg_data[FG_DATA_VINT_ERR].offset, 0);
+	if (rc < 0)
+		pr_err("Error in clearing VACT_INT_ERR, rc=%d\n", rc);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("IMA error recovery done...\n");
+out:
+	fg_restore_soc(chip);
+	fg_restore_cc_soc(chip);
+	fg_enable_irqs(chip, true);
+	update_sram_data_work(&chip->update_sram_data.work);
+	update_temp_data(&chip->update_temp_work.work);
+	schedule_delayed_work(&chip->check_sanity_work,
+		msecs_to_jiffies(1000));
+	chip->ima_error_handling = false;
+	mutex_unlock(&chip->ima_recovery_lock);
+	fg_relax(&chip->fg_reset_wakeup_source);
+}
+
 #define DIG_MINOR		0x0
 #define DIG_MAJOR		0x1
 #define ANA_MINOR		0x2
 #define ANA_MAJOR		0x3
 #define IACS_INTR_SRC_SLCT	BIT(3)
-static int fg_setup_memif_offset(struct fg_chip *chip)
+static int fg_memif_init(struct fg_chip *chip)
 {
 	int rc;
 
@@ -6464,7 +8530,7 @@
 		break;
 	default:
 		pr_err("Digital Major rev=%d not supported\n",
-				chip->revision[DIG_MAJOR]);
+					chip->revision[DIG_MAJOR]);
 		return -EINVAL;
 	}
 
@@ -6481,6 +8547,13 @@
 			pr_err("failed to configure interrupt source %d\n", rc);
 			return rc;
 		}
+
+		/* check for error condition */
+		rc = fg_check_ima_exception(chip, true);
+		if (rc) {
+			pr_err("Error in clearing IMA exception rc=%d", rc);
+			return rc;
+		}
 	}
 
 	return 0;
@@ -6515,6 +8588,7 @@
 	case PMI8950:
 	case PMI8937:
 	case PMI8996:
+	case PMI8940:
 		chip->pmic_subtype = pmic_rev_id->pmic_subtype;
 		chip->pmic_revision[REVID_RESERVED]	= pmic_rev_id->rev1;
 		chip->pmic_revision[REVID_VARIANT]	= pmic_rev_id->rev2;
@@ -6531,10 +8605,8 @@
 }
 
 #define INIT_JEITA_DELAY_MS 1000
-
 static void delayed_init_work(struct work_struct *work)
 {
-	u8 reg[2];
 	int rc;
 	struct fg_chip *chip = container_of(work,
 				struct fg_chip,
@@ -6546,6 +8618,14 @@
 	rc = fg_hw_init(chip);
 	if (rc) {
 		pr_err("failed to hw init rc = %d\n", rc);
+		if (!chip->init_done && chip->ima_supported) {
+			rc = fg_check_alg_status(chip);
+			if (rc && rc != -EBUSY)
+				pr_err("Couldn't check FG ALG status, rc=%d\n",
+					rc);
+			fg_mem_release(chip);
+			return;
+		}
 		fg_mem_release(chip);
 		fg_cleanup(chip);
 		return;
@@ -6566,57 +8646,19 @@
 	if (!chip->use_otp_profile)
 		schedule_delayed_work(&chip->batt_profile_init, 0);
 
+	if (chip->ima_supported && fg_reset_on_lockup)
+		schedule_delayed_work(&chip->check_sanity_work,
+			msecs_to_jiffies(1000));
+
 	if (chip->wa_flag & IADC_GAIN_COMP_WA) {
-		/* read default gain config */
-		rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
-		if (rc) {
-			pr_err("Failed to read default gain rc=%d\n", rc);
+		rc = fg_init_iadc_config(chip);
+		if (rc)
 			goto done;
-		}
-
-		if (reg[1] || reg[0]) {
-			/*
-			 * Default gain register has valid value:
-			 * - write to gain register.
-			 */
-			rc = fg_mem_write(chip, reg, GAIN_REG, 2,
-							GAIN_OFFSET, 0);
-			if (rc) {
-				pr_err("Failed to write gain rc=%d\n", rc);
-				goto done;
-			}
-		} else {
-			/*
-			 * Default gain register is invalid:
-			 * - read gain register for default gain value
-			 * - write to default gain register.
-			 */
-			rc = fg_mem_read(chip, reg, GAIN_REG, 2,
-							GAIN_OFFSET, 0);
-			if (rc) {
-				pr_err("Failed to read gain rc=%d\n", rc);
-				goto done;
-			}
-			rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
-							DEF_GAIN_OFFSET, 0);
-			if (rc) {
-				pr_err("Failed to write default gain rc=%d\n",
-									rc);
-				goto done;
-			}
-		}
-
-		chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
-		chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
-		chip->iadc_comp_data.dfl_gain = half_float(reg);
-		chip->input_present = is_input_present(chip);
-		chip->otg_present = is_otg_present(chip);
-		chip->init_done = true;
-
-		pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
-			       reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
 	}
 
+	chip->input_present = is_input_present(chip);
+	chip->otg_present = is_otg_present(chip);
+	chip->init_done = true;
 	pr_debug("FG: HW_init success\n");
 
 	return;
@@ -6675,16 +8717,30 @@
 			"qpnp_fg_cap_learning");
 	wakeup_source_init(&chip->esr_extract_wakeup_source.source,
 			"qpnp_fg_esr_extract");
+	wakeup_source_init(&chip->slope_limit_wakeup_source.source,
+			"qpnp_fg_slope_limit");
+	wakeup_source_init(&chip->dischg_gain_wakeup_source.source,
+			"qpnp_fg_dischg_gain");
+	wakeup_source_init(&chip->fg_reset_wakeup_source.source,
+			"qpnp_fg_reset");
+	wakeup_source_init(&chip->cc_soc_wakeup_source.source,
+			"qpnp_fg_cc_soc");
+	wakeup_source_init(&chip->sanity_wakeup_source.source,
+			"qpnp_fg_sanity_check");
+	spin_lock_init(&chip->sec_access_lock);
 	mutex_init(&chip->rw_lock);
 	mutex_init(&chip->cyc_ctr.lock);
 	mutex_init(&chip->learning_data.learning_lock);
 	mutex_init(&chip->rslow_comp.lock);
 	mutex_init(&chip->sysfs_restart_lock);
+	mutex_init(&chip->ima_recovery_lock);
 	INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting);
 	INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work);
 	INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data);
 	INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work);
 	INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init);
+	INIT_DELAYED_WORK(&chip->check_sanity_work, check_sanity_work);
+	INIT_WORK(&chip->ima_error_recovery_work, ima_error_recovery_work);
 	INIT_WORK(&chip->rslow_comp_work, rslow_comp_work);
 	INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work);
 	INIT_WORK(&chip->dump_sram, dump_sram);
@@ -6699,13 +8755,19 @@
 	INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work);
 	INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work);
 	INIT_WORK(&chip->esr_extract_config_work, esr_extract_config_work);
+	INIT_WORK(&chip->slope_limiter_work, slope_limiter_work);
+	INIT_WORK(&chip->dischg_gain_work, discharge_gain_work);
+	INIT_WORK(&chip->cc_soc_store_work, cc_soc_store_work);
 	alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME,
 			fg_cap_learning_alarm_cb);
+	alarm_init(&chip->hard_jeita_alarm, ALARM_BOOTTIME,
+			fg_hard_jeita_alarm_cb);
 	init_completion(&chip->sram_access_granted);
 	init_completion(&chip->sram_access_revoked);
 	complete_all(&chip->sram_access_revoked);
 	init_completion(&chip->batt_id_avail);
 	init_completion(&chip->first_soc_done);
+	init_completion(&chip->fg_reset_done);
 	dev_set_drvdata(&pdev->dev, chip);
 
 	if (of_get_available_child_count(pdev->dev.of_node) == 0) {
@@ -6763,7 +8825,7 @@
 		return rc;
 	}
 
-	rc = fg_setup_memif_offset(chip);
+	rc = fg_memif_init(chip);
 	if (rc) {
 		pr_err("Unable to setup mem_if offsets rc=%d\n", rc);
 		goto of_init_fail;
@@ -6834,10 +8896,18 @@
 		rc = fg_dfs_create(chip);
 		if (rc < 0) {
 			pr_err("failed to create debugfs rc = %d\n", rc);
-			goto cancel_work;
+			goto power_supply_unregister;
 		}
 	}
 
+	/* Fake temperature till the actual temperature is read */
+	chip->last_good_temp = 250;
+
+	/* Initialize batt_info variables */
+	chip->batt_range_ocv = &fg_batt_valid_ocv;
+	chip->batt_range_pct = &fg_batt_range_pct;
+	memset(chip->batt_info, INT_MAX, sizeof(chip->batt_info));
+
 	schedule_work(&chip->init_work);
 
 	pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n",
@@ -6847,32 +8917,17 @@
 
 	return rc;
 
+power_supply_unregister:
+	power_supply_unregister(chip->bms_psy);
 cancel_work:
-	cancel_delayed_work_sync(&chip->update_jeita_setting);
-	cancel_delayed_work_sync(&chip->update_sram_data);
-	cancel_delayed_work_sync(&chip->update_temp_work);
-	cancel_delayed_work_sync(&chip->check_empty_work);
-	cancel_delayed_work_sync(&chip->batt_profile_init);
-	alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
-	cancel_work_sync(&chip->set_resume_soc_work);
-	cancel_work_sync(&chip->fg_cap_learning_work);
-	cancel_work_sync(&chip->dump_sram);
-	cancel_work_sync(&chip->status_change_work);
-	cancel_work_sync(&chip->cycle_count_work);
-	cancel_work_sync(&chip->update_esr_work);
-	cancel_work_sync(&chip->rslow_comp_work);
-	cancel_work_sync(&chip->sysfs_restart_work);
-	cancel_work_sync(&chip->gain_comp_work);
-	cancel_work_sync(&chip->init_work);
-	cancel_work_sync(&chip->charge_full_work);
-	cancel_work_sync(&chip->bcl_hi_power_work);
-	cancel_work_sync(&chip->esr_extract_config_work);
+	fg_cancel_all_works(chip);
 of_init_fail:
 	mutex_destroy(&chip->rslow_comp.lock);
 	mutex_destroy(&chip->rw_lock);
 	mutex_destroy(&chip->cyc_ctr.lock);
 	mutex_destroy(&chip->learning_data.learning_lock);
 	mutex_destroy(&chip->sysfs_restart_lock);
+	mutex_destroy(&chip->ima_recovery_lock);
 	wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
 	wakeup_source_trash(&chip->empty_check_wakeup_source.source);
 	wakeup_source_trash(&chip->memif_wakeup_source.source);
@@ -6882,6 +8937,11 @@
 	wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
 	wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
 	wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+	wakeup_source_trash(&chip->slope_limit_wakeup_source.source);
+	wakeup_source_trash(&chip->dischg_gain_wakeup_source.source);
+	wakeup_source_trash(&chip->fg_reset_wakeup_source.source);
+	wakeup_source_trash(&chip->cc_soc_wakeup_source.source);
+	wakeup_source_trash(&chip->sanity_wakeup_source.source);
 	return rc;
 }
 
@@ -6938,11 +8998,103 @@
 	return 0;
 }
 
+static void fg_check_ima_idle(struct fg_chip *chip)
+{
+	bool rif_mem_sts = true;
+	int rc, time_count = 0;
+
+	mutex_lock(&chip->rw_lock);
+	/* Make sure IMA is idle */
+	while (1) {
+		rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+		if (rc)
+			break;
+
+		if (!rif_mem_sts)
+			break;
+
+		if  (time_count > 4) {
+			pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n");
+			fg_run_iacs_clear_sequence(chip);
+			break;
+		}
+
+		/* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */
+		usleep_range(4000, 4100);
+		time_count++;
+	}
+	mutex_unlock(&chip->rw_lock);
+}
+
+static void fg_shutdown(struct platform_device *pdev)
+{
+	struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_emerg("FG shutdown started\n");
+	fg_cancel_all_works(chip);
+	fg_check_ima_idle(chip);
+	chip->fg_shutdown = true;
+	if (fg_debug_mask & FG_STATUS)
+		pr_emerg("FG shutdown complete\n");
+}
+
 static const struct dev_pm_ops qpnp_fg_pm_ops = {
 	.suspend	= fg_suspend,
 	.resume		= fg_resume,
 };
 
+static int fg_reset_lockup_set(const char *val, const struct kernel_param *kp)
+{
+	int rc;
+	struct power_supply *bms_psy;
+	struct fg_chip *chip;
+	int old_val = fg_reset_on_lockup;
+
+	rc = param_set_int(val, kp);
+	if (rc) {
+		pr_err("Unable to set fg_reset_on_lockup: %d\n", rc);
+		return rc;
+	}
+
+	if (fg_reset_on_lockup != 0 && fg_reset_on_lockup != 1) {
+		pr_err("Bad value %d\n", fg_reset_on_lockup);
+		fg_reset_on_lockup = old_val;
+		return -EINVAL;
+	}
+
+	bms_psy = power_supply_get_by_name("bms");
+	if (!bms_psy) {
+		pr_err("bms psy not found\n");
+		return 0;
+	}
+
+	chip = power_supply_get_drvdata(bms_psy);
+	if (!chip->ima_supported) {
+		pr_err("Cannot set this for non-IMA supported FG\n");
+		fg_reset_on_lockup = old_val;
+		return -EINVAL;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("fg_reset_on_lockup set to %d\n", fg_reset_on_lockup);
+
+	if (fg_reset_on_lockup)
+		schedule_delayed_work(&chip->check_sanity_work,
+			msecs_to_jiffies(1000));
+	else
+		cancel_delayed_work_sync(&chip->check_sanity_work);
+
+	return rc;
+}
+
+static struct kernel_param_ops fg_reset_ops = {
+	.set = fg_reset_lockup_set,
+	.get = param_get_int,
+};
+
+module_param_cb(reset_on_lockup, &fg_reset_ops, &fg_reset_on_lockup, 0644);
+
 static int fg_sense_type_set(const char *val, const struct kernel_param *kp)
 {
 	int rc;
@@ -7025,6 +9177,7 @@
 	},
 	.probe		= fg_probe,
 	.remove		= fg_remove,
+	.shutdown	= fg_shutdown,
 };
 
 static int __init fg_init(void)
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 09705ad..2efe746 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -25,10 +25,12 @@
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
+#include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/pmic-voter.h>
 #include <linux/qpnp/qpnp-adc.h>
 #include <uapi/linux/qg.h>
+#include <uapi/linux/qg-profile.h>
 #include "fg-alg.h"
 #include "qg-sdam.h"
 #include "qg-core.h"
@@ -43,6 +45,16 @@
 	debug_mask, qg_debug_mask, int, 0600
 );
 
+static int qg_esr_mod_count = 10;
+module_param_named(
+	esr_mod_count, qg_esr_mod_count, int, 0600
+);
+
+static int qg_esr_count = 5;
+module_param_named(
+	esr_count, qg_esr_count, int, 0600
+);
+
 static bool is_battery_present(struct qpnp_qg *chip)
 {
 	u8 reg = 0;
@@ -211,6 +223,14 @@
 	}
 
 	pr_debug("Notified charger on float voltage and FCC\n");
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, &prop);
+	if (rc < 0) {
+		pr_err("Failed to get charge term current, rc=%d\n", rc);
+		return;
+	}
+	chip->chg_iterm_ma = prop.intval;
 }
 
 static bool is_batt_available(struct qpnp_qg *chip)
@@ -228,7 +248,7 @@
 	return true;
 }
 
-static int qg_update_sdam_params(struct qpnp_qg *chip)
+static int qg_store_soc_params(struct qpnp_qg *chip)
 {
 	int rc, batt_temp = 0, i;
 	unsigned long rtc_sec = 0;
@@ -245,13 +265,11 @@
 	else
 		chip->sdam_data[SDAM_TEMP] = (u32)batt_temp;
 
-	rc = qg_sdam_write_all(chip->sdam_data);
-	if (rc < 0)
-		pr_err("Failed to write to SDAM rc=%d\n", rc);
-
-	for (i = 0; i < SDAM_MAX; i++)
+	for (i = 0; i <= SDAM_TIME_SEC; i++) {
+		rc |= qg_sdam_write(i, chip->sdam_data[i]);
 		qg_dbg(chip, QG_DEBUG_STATUS, "SDAM write param %d value=%d\n",
 					i, chip->sdam_data[i]);
+	}
 
 	return rc;
 }
@@ -261,8 +279,13 @@
 	int rc = 0, i, j = 0, temp;
 	u8 v_fifo[MAX_FIFO_LENGTH * 2], i_fifo[MAX_FIFO_LENGTH * 2];
 	u32 sample_interval = 0, sample_count = 0, fifo_v = 0, fifo_i = 0;
+	unsigned long rtc_sec = 0;
 
-	chip->kdata.fifo_time = (u32)ktime_get_seconds();
+	rc = get_rtc_time(&rtc_sec);
+	if (rc < 0)
+		pr_err("Failed to get RTC time, rc=%d\n", rc);
+
+	chip->kdata.fifo_time = (u32)rtc_sec;
 
 	if (!fifo_length) {
 		pr_debug("No FIFO data\n");
@@ -428,6 +451,90 @@
 	return rc;
 }
 
+#define MIN_FIFO_FULL_TIME_MS			12000
+static int process_rt_fifo_data(struct qpnp_qg *chip,
+			bool update_vbat_low, bool update_smb)
+{
+	int rc = 0;
+	ktime_t now = ktime_get();
+	s64 time_delta;
+	u8 fifo_length;
+
+	/*
+	 * Reject the FIFO read event if there are back-to-back requests
+	 * This is done to gaurantee that there is always a minimum FIFO
+	 * data to be processed, ignore this if vbat_low is set.
+	 */
+	time_delta = ktime_ms_delta(now, chip->last_user_update_time);
+
+	qg_dbg(chip, QG_DEBUG_FIFO, "time_delta=%lld ms update_vbat_low=%d update_smb=%d\n",
+				time_delta, update_vbat_low, update_smb);
+
+	if (time_delta > MIN_FIFO_FULL_TIME_MS || update_vbat_low
+						|| update_smb) {
+		rc = qg_master_hold(chip, true);
+		if (rc < 0) {
+			pr_err("Failed to hold master, rc=%d\n", rc);
+			goto done;
+		}
+
+		rc = qg_process_rt_fifo(chip);
+		if (rc < 0) {
+			pr_err("Failed to process FIFO real-time, rc=%d\n", rc);
+			goto done;
+		}
+
+		if (update_vbat_low) {
+			/* change FIFO length */
+			fifo_length = chip->vbat_low ?
+					chip->dt.s2_vbat_low_fifo_length :
+					chip->dt.s2_fifo_length;
+			rc = qg_update_fifo_length(chip, fifo_length);
+			if (rc < 0)
+				goto done;
+
+			qg_dbg(chip, QG_DEBUG_STATUS,
+				"FIFO length updated to %d vbat_low=%d\n",
+					fifo_length, chip->vbat_low);
+		}
+
+		if (update_smb) {
+			rc = qg_masked_write(chip, chip->qg_base +
+				QG_MODE_CTL1_REG, PARALLEL_IBAT_SENSE_EN_BIT,
+				chip->parallel_enabled ?
+					PARALLEL_IBAT_SENSE_EN_BIT : 0);
+			if (rc < 0) {
+				pr_err("Failed to update SMB_EN, rc=%d\n", rc);
+				goto done;
+			}
+			qg_dbg(chip, QG_DEBUG_STATUS, "Parallel SENSE %d\n",
+						chip->parallel_enabled);
+		}
+
+		rc = qg_master_hold(chip, false);
+		if (rc < 0) {
+			pr_err("Failed to release master, rc=%d\n", rc);
+			goto done;
+		}
+		/* FIFOs restarted */
+		chip->last_fifo_update_time = ktime_get();
+
+		/* signal the read thread */
+		chip->data_ready = true;
+		wake_up_interruptible(&chip->qg_wait_q);
+		chip->last_user_update_time = now;
+
+		/* vote to stay awake until userspace reads data */
+		vote(chip->awake_votable, FIFO_RT_DONE_VOTER, true, 0);
+	} else {
+		qg_dbg(chip, QG_DEBUG_FIFO, "FIFO processing too early time_delta=%lld\n",
+							time_delta);
+	}
+done:
+	qg_master_hold(chip, false);
+	return rc;
+}
+
 #define VBAT_LOW_HYST_UV		50000 /* 50mV */
 static int qg_vbat_low_wa(struct qpnp_qg *chip)
 {
@@ -556,82 +663,356 @@
 	return rc;
 }
 
-#define MIN_FIFO_FULL_TIME_MS			12000
-static int process_rt_fifo_data(struct qpnp_qg *chip,
-				bool vbat_low, bool update_smb)
+static void qg_retrieve_esr_params(struct qpnp_qg *chip)
 {
-	int rc = 0;
-	ktime_t now = ktime_get();
-	s64 time_delta;
+	u32 data = 0;
+	int rc;
+
+	rc = qg_sdam_read(SDAM_ESR_CHARGE_DELTA, &data);
+	if (!rc && data) {
+		chip->kdata.param[QG_ESR_CHARGE_DELTA].data = data;
+		chip->kdata.param[QG_ESR_CHARGE_DELTA].valid = true;
+		qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR_CHARGE_DELTA SDAM=%d\n", data);
+	} else if (rc < 0) {
+		pr_err("Failed to read ESR_CHARGE_DELTA rc=%d\n", rc);
+	}
+
+	rc = qg_sdam_read(SDAM_ESR_DISCHARGE_DELTA, &data);
+	if (!rc && data) {
+		chip->kdata.param[QG_ESR_DISCHARGE_DELTA].data = data;
+		chip->kdata.param[QG_ESR_DISCHARGE_DELTA].valid = true;
+		qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR_DISCHARGE_DELTA SDAM=%d\n", data);
+	} else if (rc < 0) {
+		pr_err("Failed to read ESR_DISCHARGE_DELTA rc=%d\n", rc);
+	}
+
+	rc = qg_sdam_read(SDAM_ESR_CHARGE_SF, &data);
+	if (!rc && data) {
+		data = CAP(QG_ESR_SF_MIN, QG_ESR_SF_MAX, data);
+		chip->kdata.param[QG_ESR_CHARGE_SF].data = data;
+		chip->kdata.param[QG_ESR_CHARGE_SF].valid = true;
+		qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR_CHARGE_SF SDAM=%d\n", data);
+	} else if (rc < 0) {
+		pr_err("Failed to read ESR_CHARGE_SF rc=%d\n", rc);
+	}
+
+	rc = qg_sdam_read(SDAM_ESR_DISCHARGE_SF, &data);
+	if (!rc && data) {
+		data = CAP(QG_ESR_SF_MIN, QG_ESR_SF_MAX, data);
+		chip->kdata.param[QG_ESR_DISCHARGE_SF].data = data;
+		chip->kdata.param[QG_ESR_DISCHARGE_SF].valid = true;
+		qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR_DISCHARGE_SF SDAM=%d\n", data);
+	} else if (rc < 0) {
+		pr_err("Failed to read ESR_DISCHARGE_SF rc=%d\n", rc);
+	}
+}
+
+static void qg_store_esr_params(struct qpnp_qg *chip)
+{
+	unsigned int esr;
+
+	if (chip->udata.param[QG_ESR_CHARGE_DELTA].valid) {
+		esr = chip->udata.param[QG_ESR_CHARGE_DELTA].data;
+		qg_sdam_write(SDAM_ESR_CHARGE_DELTA, esr);
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"SDAM store ESR_CHARGE_DELTA=%d\n", esr);
+	}
+
+	if (chip->udata.param[QG_ESR_DISCHARGE_DELTA].valid) {
+		esr = chip->udata.param[QG_ESR_DISCHARGE_DELTA].data;
+		qg_sdam_write(SDAM_ESR_DISCHARGE_DELTA, esr);
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"SDAM store ESR_DISCHARGE_DELTA=%d\n", esr);
+	}
+
+	if (chip->udata.param[QG_ESR_CHARGE_SF].valid) {
+		esr = chip->udata.param[QG_ESR_CHARGE_SF].data;
+		qg_sdam_write(SDAM_ESR_CHARGE_SF, esr);
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"SDAM store ESR_CHARGE_SF=%d\n", esr);
+	}
+
+	if (chip->udata.param[QG_ESR_DISCHARGE_SF].valid) {
+		esr = chip->udata.param[QG_ESR_DISCHARGE_SF].data;
+		qg_sdam_write(SDAM_ESR_DISCHARGE_SF, esr);
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"SDAM store ESR_DISCHARGE_SF=%d\n", esr);
+	}
+}
+
+#define MAX_ESR_RETRY_COUNT		10
+#define ESR_SD_PERCENT			10
+static int qg_process_esr_data(struct qpnp_qg *chip)
+{
+	int i;
+	int pre_i, post_i, pre_v, post_v, first_pre_i = 0;
+	int diff_v, diff_i, esr_avg = 0, count = 0;
+
+	for (i = 0; i < qg_esr_count; i++) {
+		if (!chip->esr_data[i].valid)
+			continue;
+
+		pre_i = chip->esr_data[i].pre_esr_i;
+		pre_v = chip->esr_data[i].pre_esr_v;
+		post_i = chip->esr_data[i].post_esr_i;
+		post_v = chip->esr_data[i].post_esr_v;
+
+		/*
+		 * Check if any of the pre/post readings have changed
+		 * signs by comparing it with the first valid
+		 * pre_i value.
+		 */
+		if (!first_pre_i)
+			first_pre_i = pre_i;
+
+		if ((first_pre_i < 0 && pre_i > 0) ||
+			(first_pre_i > 0 && post_i < 0) ||
+			(first_pre_i < 0 && post_i > 0)) {
+			qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR-sign mismatch %d reject all data\n", i);
+			esr_avg = count = 0;
+			break;
+		}
+
+		/* calculate ESR */
+		diff_v = abs(post_v - pre_v);
+		diff_i = abs(post_i - pre_i);
+
+		if (!diff_v || !diff_i ||
+			(diff_i < chip->dt.esr_qual_i_ua) ||
+			(diff_v < chip->dt.esr_qual_v_uv)) {
+			qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR (%d) V/I %duA %duV fails qualification\n",
+				i, diff_i, diff_v);
+			chip->esr_data[i].valid = false;
+			continue;
+		}
+
+		chip->esr_data[i].esr =
+			DIV_ROUND_CLOSEST(diff_v * 1000, diff_i);
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"ESR qualified: i=%d pre_i=%d pre_v=%d post_i=%d post_v=%d esr_diff_v=%d esr_diff_i=%d esr=%d\n",
+			i, pre_i, pre_v, post_i, post_v,
+			diff_v, diff_i, chip->esr_data[i].esr);
+
+		esr_avg += chip->esr_data[i].esr;
+		count++;
+	}
+
+	if (!count) {
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"No ESR samples qualified, ESR not found\n");
+		chip->esr_avg = 0;
+		return 0;
+	}
+
+	esr_avg /= count;
+	qg_dbg(chip, QG_DEBUG_ESR,
+		"ESR all sample average=%d count=%d apply_SD=%d\n",
+		esr_avg, count, (esr_avg * ESR_SD_PERCENT) / 100);
 
 	/*
-	 * Reject the FIFO read event if there are back-to-back requests
-	 * This is done to gaurantee that there is always a minimum FIFO
-	 * data to be processed, ignore this if vbat_low is set.
+	 * Reject ESR samples which do not fall in
+	 * 10% the standard-deviation
 	 */
-	time_delta = ktime_ms_delta(now, chip->last_user_update_time);
+	count = 0;
+	for (i = 0; i < qg_esr_count; i++) {
+		if (!chip->esr_data[i].valid)
+			continue;
 
-	qg_dbg(chip, QG_DEBUG_FIFO, "time_delta=%lld ms vbat_low=%d\n",
-				time_delta, vbat_low);
+		if ((abs(chip->esr_data[i].esr - esr_avg) <=
+			(esr_avg * ESR_SD_PERCENT) / 100)) {
+			/* valid ESR */
+			chip->esr_avg += chip->esr_data[i].esr;
+			count++;
+			qg_dbg(chip, QG_DEBUG_ESR,
+				"Valid ESR after SD (%d) %d mOhm\n",
+				i, chip->esr_data[i].esr);
+		} else {
+			qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR (%d) %d falls-out of SD(%d)\n",
+				i, chip->esr_data[i].esr, ESR_SD_PERCENT);
+		}
+	}
 
-	if (time_delta > MIN_FIFO_FULL_TIME_MS || vbat_low || update_smb) {
-		rc = qg_master_hold(chip, true);
+	if (count >= QG_MIN_ESR_COUNT) {
+		chip->esr_avg /= count;
+		qg_dbg(chip, QG_DEBUG_ESR, "Average estimated ESR %d mOhm\n",
+					chip->esr_avg);
+	} else {
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"Not enough ESR samples, ESR not found\n");
+		chip->esr_avg = 0;
+	}
+
+	return 0;
+}
+
+static int qg_esr_estimate(struct qpnp_qg *chip)
+{
+	int rc, i, ibat;
+	u8 esr_done_count, reg0 = 0, reg1 = 0;
+	bool is_charging = false;
+
+	if (chip->dt.esr_disable)
+		return 0;
+
+	/*
+	 * Charge - enable ESR estimation only during fast-charging.
+	 * Discharge - enable ESR estimation only if enabled via DT.
+	 */
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+			chip->charge_type != POWER_SUPPLY_CHARGE_TYPE_FAST) {
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"Skip ESR, Not in fast-charge (CC)\n");
+		return 0;
+	}
+
+	if (chip->charge_status != POWER_SUPPLY_STATUS_CHARGING &&
+			!chip->dt.esr_discharge_enable)
+		return 0;
+
+	if (chip->batt_soc != INT_MIN && (chip->batt_soc <
+					chip->dt.esr_disable_soc)) {
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"Skip ESR, batt-soc below %d\n",
+				chip->dt.esr_disable_soc);
+		return 0;
+	}
+
+	qg_dbg(chip, QG_DEBUG_ESR, "FIFO done count=%d ESR mod count=%d\n",
+			chip->fifo_done_count, qg_esr_mod_count);
+
+	if ((chip->fifo_done_count % qg_esr_mod_count) != 0)
+		return 0;
+
+	if (qg_esr_count > QG_MAX_ESR_COUNT)
+		qg_esr_count = QG_MAX_ESR_COUNT;
+
+	if (qg_esr_count < QG_MIN_ESR_COUNT)
+		qg_esr_count = QG_MIN_ESR_COUNT;
+
+	/* clear all data */
+	chip->esr_avg = 0;
+	memset(&chip->esr_data, 0, sizeof(chip->esr_data));
+
+	rc = qg_master_hold(chip, true);
+	if (rc < 0) {
+		pr_err("Failed to hold master, rc=%d\n", rc);
+		goto done;
+	}
+
+	for (i = 0; i < qg_esr_count; i++) {
+		/* Fire ESR measurement */
+		rc = qg_masked_write(chip,
+			chip->qg_base + QG_ESR_MEAS_TRIG_REG,
+			HW_ESR_MEAS_START_BIT, HW_ESR_MEAS_START_BIT);
 		if (rc < 0) {
-			pr_err("Failed to hold master, rc=%d\n", rc);
-			goto done;
+			pr_err("Failed to start ESR rc=%d\n", rc);
+			continue;
 		}
 
-		rc = qg_process_rt_fifo(chip);
-		if (rc < 0) {
-			pr_err("Failed to process FIFO real-time, rc=%d\n", rc);
-			goto done;
-		}
+		esr_done_count = reg0 = reg1 = 0;
+		do {
+			/* delay for ESR processing to complete */
+			msleep(50);
 
-		if (vbat_low) {
-			/* change FIFO length */
-			rc = qg_update_fifo_length(chip,
-					chip->dt.s2_vbat_low_fifo_length);
+			esr_done_count++;
+
+			rc = qg_read(chip,
+				chip->qg_base + QG_STATUS1_REG, &reg0, 1);
+			if (rc < 0)
+				continue;
+
+			rc = qg_read(chip,
+				chip->qg_base + QG_STATUS4_REG, &reg1, 1);
+			if (rc < 0)
+				continue;
+
+			/* check ESR-done status */
+			if (!(reg1 & ESR_MEAS_IN_PROGRESS_BIT) &&
+					(reg0 & ESR_MEAS_DONE_BIT)) {
+				qg_dbg(chip, QG_DEBUG_ESR,
+					"ESR measurement done %d count %d\n",
+						i, esr_done_count);
+				break;
+			}
+		} while (esr_done_count < MAX_ESR_RETRY_COUNT);
+
+		if (esr_done_count == MAX_ESR_RETRY_COUNT) {
+			pr_err("Failed to get ESR done for %d iteration\n", i);
+			continue;
+		} else {
+			/* found a valid ESR, read pre-post data */
+			rc = qg_read_raw_data(chip, QG_PRE_ESR_V_DATA0_REG,
+					&chip->esr_data[i].pre_esr_v);
 			if (rc < 0)
 				goto done;
 
-			qg_dbg(chip, QG_DEBUG_STATUS,
-				"FIFO length updated to %d vbat_low=%d\n",
-					chip->dt.s2_vbat_low_fifo_length,
-					vbat_low);
-		}
-
-		if (update_smb) {
-			rc = qg_masked_write(chip, chip->qg_base +
-				QG_MODE_CTL1_REG, PARALLEL_IBAT_SENSE_EN_BIT,
-				chip->parallel_enabled ?
-					PARALLEL_IBAT_SENSE_EN_BIT : 0);
-			if (rc < 0) {
-				pr_err("Failed to update SMB_EN, rc=%d\n", rc);
+			rc = qg_read_raw_data(chip, QG_PRE_ESR_I_DATA0_REG,
+					&chip->esr_data[i].pre_esr_i);
+			if (rc < 0)
 				goto done;
-			}
-			qg_dbg(chip, QG_DEBUG_STATUS, "Parallel SENSE %d\n",
-						chip->parallel_enabled);
+
+			rc = qg_read_raw_data(chip, QG_POST_ESR_V_DATA0_REG,
+					&chip->esr_data[i].post_esr_v);
+			if (rc < 0)
+				goto done;
+
+			rc = qg_read_raw_data(chip, QG_POST_ESR_I_DATA0_REG,
+					&chip->esr_data[i].post_esr_i);
+			if (rc < 0)
+				goto done;
+
+			chip->esr_data[i].pre_esr_v =
+				V_RAW_TO_UV(chip->esr_data[i].pre_esr_v);
+			ibat = sign_extend32(chip->esr_data[i].pre_esr_i, 15);
+			chip->esr_data[i].pre_esr_i = I_RAW_TO_UA(ibat);
+			chip->esr_data[i].post_esr_v =
+				V_RAW_TO_UV(chip->esr_data[i].post_esr_v);
+			ibat = sign_extend32(chip->esr_data[i].post_esr_i, 15);
+			chip->esr_data[i].post_esr_i = I_RAW_TO_UA(ibat);
+
+			chip->esr_data[i].valid = true;
+
+			if ((int)chip->esr_data[i].pre_esr_i < 0)
+				is_charging = true;
+
+			qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR values for %d iteration pre_v=%d pre_i=%d post_v=%d post_i=%d\n",
+				i, chip->esr_data[i].pre_esr_v,
+				(int)chip->esr_data[i].pre_esr_i,
+				chip->esr_data[i].post_esr_v,
+				(int)chip->esr_data[i].post_esr_i);
 		}
-
-		rc = qg_master_hold(chip, false);
-		if (rc < 0) {
-			pr_err("Failed to release master, rc=%d\n", rc);
-			goto done;
-		}
-		/* FIFOs restarted */
-		chip->last_fifo_update_time = ktime_get();
-
-		/* signal the read thread */
-		chip->data_ready = true;
-		wake_up_interruptible(&chip->qg_wait_q);
-		chip->last_user_update_time = now;
-
-		/* vote to stay awake until userspace reads data */
-		vote(chip->awake_votable, FIFO_RT_DONE_VOTER, true, 0);
-	} else {
-		qg_dbg(chip, QG_DEBUG_FIFO, "FIFO processing too early time_delta=%lld\n",
-							time_delta);
+		/* delay before the next ESR measurement */
+		msleep(200);
 	}
+
+	rc = qg_process_esr_data(chip);
+	if (rc < 0)
+		pr_err("Failed to process ESR data rc=%d\n", rc);
+
+	rc = qg_master_hold(chip, false);
+	if (rc < 0) {
+		pr_err("Failed to release master, rc=%d\n", rc);
+		goto done;
+	}
+
+	if (chip->esr_avg) {
+		chip->kdata.param[QG_ESR].data = chip->esr_avg;
+		chip->kdata.param[QG_ESR].valid = true;
+		qg_dbg(chip, QG_DEBUG_ESR, "ESR_SW=%d during %s\n",
+			chip->esr_avg, is_charging ? "CHARGE" : "DISCHARGE");
+		qg_retrieve_esr_params(chip);
+		chip->esr_actual = chip->esr_avg;
+	}
+
+	return 0;
 done:
 	qg_master_hold(chip, false);
 	return rc;
@@ -649,6 +1030,9 @@
 	if (chip->udata.param[QG_BATT_SOC].valid)
 		chip->batt_soc = chip->udata.param[QG_BATT_SOC].data;
 
+	if (chip->udata.param[QG_FULL_SOC].valid)
+		chip->full_soc = chip->udata.param[QG_FULL_SOC].data;
+
 	if (chip->udata.param[QG_SOC].valid) {
 		qg_dbg(chip, QG_DEBUG_SOC, "udata SOC=%d last SOC=%d\n",
 			chip->udata.param[QG_SOC].data, chip->catch_up_soc);
@@ -664,7 +1048,7 @@
 				chip->udata.param[QG_RBAT_MOHM].data;
 		chip->sdam_data[SDAM_VALID] = 1;
 
-		rc = qg_update_sdam_params(chip);
+		rc = qg_store_soc_params(chip);
 		if (rc < 0)
 			pr_err("Failed to update SDAM params, rc=%d\n", rc);
 	}
@@ -673,18 +1057,25 @@
 		chip->charge_counter_uah =
 			chip->udata.param[QG_CHARGE_COUNTER].data;
 
+	if (chip->udata.param[QG_ESR].valid)
+		chip->esr_last = chip->udata.param[QG_ESR].data;
+
+	if (chip->esr_actual != -EINVAL && chip->udata.param[QG_ESR].valid) {
+		chip->esr_nominal = chip->udata.param[QG_ESR].data;
+		if (chip->qg_psy)
+			power_supply_changed(chip->qg_psy);
+	}
+
+	if (!chip->dt.esr_disable)
+		qg_store_esr_params(chip);
+
+	qg_dbg(chip, QG_DEBUG_STATUS, "udata update: batt_soc=%d cc_soc=%d full_soc=%d qg_esr=%d\n",
+		(chip->batt_soc != INT_MIN) ? chip->batt_soc : -EINVAL,
+		(chip->cc_soc != INT_MIN) ? chip->cc_soc : -EINVAL,
+		chip->full_soc, chip->esr_last);
 	vote(chip->awake_votable, UDATA_READY_VOTER, false, 0);
 }
 
-static irqreturn_t qg_default_irq_handler(int irq, void *data)
-{
-	struct qpnp_qg *chip = data;
-
-	qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n");
-
-	return IRQ_HANDLED;
-}
-
 #define MAX_FIFO_DELTA_PERCENT		10
 static irqreturn_t qg_fifo_update_done_handler(int irq, void *data)
 {
@@ -712,6 +1103,9 @@
 		goto done;
 	}
 
+	if (++chip->fifo_done_count == U32_MAX)
+		chip->fifo_done_count = 0;
+
 	rc = qg_vbat_thresholds_config(chip);
 	if (rc < 0)
 		pr_err("Failed to apply VBAT EMPTY config rc=%d\n", rc);
@@ -722,6 +1116,12 @@
 		goto done;
 	}
 
+	rc = qg_esr_estimate(chip);
+	if (rc < 0) {
+		pr_err("Failed to estimate ESR, rc=%d\n", rc);
+		goto done;
+	}
+
 	rc = get_fifo_done_time(chip, false, &hw_delta_ms);
 	if (rc < 0)
 		hw_delta_ms = 0;
@@ -761,9 +1161,14 @@
 		pr_err("Failed to read RT status, rc=%d\n", rc);
 		goto done;
 	}
+	/* ignore VBAT low if battery is missing */
+	if ((status & BATTERY_MISSING_INT_RT_STS_BIT) ||
+			chip->battery_missing)
+		goto done;
+
 	chip->vbat_low = !!(status & VBAT_LOW_INT_RT_STS_BIT);
 
-	rc = process_rt_fifo_data(chip, chip->vbat_low, false);
+	rc = process_rt_fifo_data(chip, true, false);
 	if (rc < 0)
 		pr_err("Failed to process RT FIFO data, rc=%d\n", rc);
 
@@ -777,8 +1182,20 @@
 {
 	struct qpnp_qg *chip = data;
 	u32 ocv_uv = 0;
+	int rc;
+	u8 status = 0;
 
 	qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n");
+
+	rc = qg_read(chip, chip->qg_base + QG_INT_RT_STS_REG, &status, 1);
+	if (rc < 0)
+		pr_err("Failed to read RT status rc=%d\n", rc);
+
+	/* ignore VBAT empty if battery is missing */
+	if ((status & BATTERY_MISSING_INT_RT_STS_BIT) ||
+			chip->battery_missing)
+		return IRQ_HANDLED;
+
 	pr_warn("VBATT EMPTY SOC = 0\n");
 
 	chip->catch_up_soc = 0;
@@ -789,7 +1206,7 @@
 	chip->sdam_data[SDAM_OCV_UV] = ocv_uv;
 	chip->sdam_data[SDAM_VALID] = 1;
 
-	qg_update_sdam_params(chip);
+	qg_store_soc_params(chip);
 
 	if (chip->qg_psy)
 		power_supply_changed(chip->qg_psy);
@@ -839,7 +1256,6 @@
 static struct qg_irq_info qg_irqs[] = {
 	[QG_BATT_MISSING_IRQ] = {
 		.name		= "qg-batt-missing",
-		.handler	= qg_default_irq_handler,
 	},
 	[QG_VBATT_LOW_IRQ] = {
 		.name		= "qg-vbat-low",
@@ -863,11 +1279,9 @@
 	},
 	[QG_FSM_STAT_CHG_IRQ] = {
 		.name		= "qg-fsm-state-chg",
-		.handler	= qg_default_irq_handler,
 	},
 	[QG_EVENT_IRQ] = {
 		.name		= "qg-event",
-		.handler	= qg_default_irq_handler,
 	},
 };
 
@@ -966,7 +1380,7 @@
 		return -ENODEV;
 
 	if (chip->battery_missing || !chip->profile_loaded)
-		return -EPERM;
+		return -ENODEV;
 
 	rc = qg_sdam_multibyte_read(QG_SDAM_LEARNED_CAPACITY_OFFSET,
 					(u8 *)&cc_mah, 2);
@@ -991,7 +1405,7 @@
 		return -ENODEV;
 
 	if (chip->battery_missing || !learned_cap_uah)
-		return -EPERM;
+		return -ENODEV;
 
 	cc_mah = div64_s64(learned_cap_uah, 1000);
 	rc = qg_sdam_multibyte_write(QG_SDAM_LEARNED_CAPACITY_OFFSET,
@@ -1031,7 +1445,7 @@
 		return -ENODEV;
 
 	if (chip->battery_missing || !chip->profile_loaded)
-		return -EPERM;
+		return -ENODEV;
 
 	if (!buf || length > BUCKET_COUNT)
 		return -EINVAL;
@@ -1059,7 +1473,7 @@
 		return -ENODEV;
 
 	if (chip->battery_missing || !chip->profile_loaded)
-		return -EPERM;
+		return -ENODEV;
 
 	if (!buf || length > BUCKET_COUNT * 2 || id < 0 ||
 		id > BUCKET_COUNT - 1 ||
@@ -1097,17 +1511,36 @@
 static int qg_get_battery_current(struct qpnp_qg *chip, int *ibat_ua)
 {
 	int rc = 0, last_ibat = 0;
+	u32 fifo_length = 0;
 
 	if (chip->battery_missing) {
 		*ibat_ua = 0;
 		return 0;
 	}
 
-	rc = qg_read(chip, chip->qg_base + QG_LAST_ADC_I_DATA0_REG,
-				(u8 *)&last_ibat, 2);
-	if (rc < 0) {
-		pr_err("Failed to read LAST_ADV_I reg, rc=%d\n", rc);
-		return rc;
+	if (chip->parallel_enabled) {
+		/* read the last real-time FIFO */
+		rc = get_fifo_length(chip, &fifo_length, true);
+		if (rc < 0) {
+			pr_err("Failed to read RT FIFO length, rc=%d\n", rc);
+			return rc;
+		}
+		fifo_length = (fifo_length == 0) ? 0 : fifo_length - 1;
+		fifo_length *= 2;
+		rc = qg_read(chip, chip->qg_base + QG_I_FIFO0_DATA0_REG +
+					fifo_length, (u8 *)&last_ibat, 2);
+		if (rc < 0) {
+			pr_err("Failed to read FIFO_I_%d reg, rc=%d\n",
+					fifo_length / 2, rc);
+			return rc;
+		}
+	} else {
+		rc = qg_read(chip, chip->qg_base + QG_LAST_ADC_I_DATA0_REG,
+					(u8 *)&last_ibat, 2);
+		if (rc < 0) {
+			pr_err("Failed to read LAST_ADV_I reg, rc=%d\n", rc);
+			return rc;
+		}
 	}
 
 	last_ibat = sign_extend32(last_ibat, 15);
@@ -1171,30 +1604,85 @@
 	return 0;
 }
 
-static const char *qg_get_cycle_counts(struct qpnp_qg *chip)
+static int qg_get_ttf_param(void *data, enum ttf_param param, int *val)
 {
-	int i, rc, len = 0;
-	char *buf;
+	union power_supply_propval prop = {0, };
+	struct qpnp_qg *chip = data;
+	int rc = 0;
+	int64_t temp = 0;
 
-	buf = chip->counter_buf;
-	for (i = 1; i <= BUCKET_COUNT; i++) {
-		chip->counter->id = i;
-		rc = get_cycle_count(chip->counter);
-		if (rc < 0) {
-			pr_err("Couldn't get cycle count rc=%d\n", rc);
-			return NULL;
+	if (!chip)
+		return -ENODEV;
+
+	switch (param) {
+	case TTF_VALID:
+		*val = (!chip->battery_missing && chip->profile_loaded);
+		break;
+	case TTF_MSOC:
+		rc = qg_get_battery_capacity(chip, val);
+		break;
+	case TTF_VBAT:
+		rc = qg_get_battery_voltage(chip, val);
+		break;
+	case TTF_IBAT:
+		rc = qg_get_battery_current(chip, val);
+		break;
+	case TTF_FCC:
+		if (chip->qg_psy) {
+			rc = power_supply_get_property(chip->qg_psy,
+				POWER_SUPPLY_PROP_CHARGE_FULL, &prop);
+			if (rc >= 0) {
+				temp = div64_u64(prop.intval, 1000);
+				*val  = div64_u64(chip->full_soc * temp,
+						QG_SOC_FULL);
+			}
 		}
-
-		if (sizeof(chip->counter_buf) - len < 8) {
-			pr_err("Invalid length %d\n", len);
-			return NULL;
-		}
-
-		len += snprintf(buf+len, 8, "%d ", rc);
+		break;
+	case TTF_MODE:
+		*val = TTF_MODE_NORMAL;
+		break;
+	case TTF_ITERM:
+		if (chip->chg_iterm_ma == INT_MIN)
+			*val = 0;
+		else
+			*val = chip->chg_iterm_ma;
+		break;
+	case TTF_RBATT:
+		rc = qg_sdam_read(SDAM_RBAT_MOHM, val);
+		if (!rc)
+			*val *= 1000;
+		break;
+	case TTF_VFLOAT:
+		*val = chip->bp.float_volt_uv;
+		break;
+	case TTF_CHG_TYPE:
+		*val = chip->charge_type;
+		break;
+	case TTF_CHG_STATUS:
+		*val = chip->charge_status;
+		break;
+	default:
+		pr_err("Unsupported property %d\n", param);
+		rc = -EINVAL;
+		break;
 	}
 
-	buf[len] = '\0';
-	return buf;
+	return rc;
+}
+
+static int qg_ttf_awake_voter(void *data, bool val)
+{
+	struct qpnp_qg *chip = data;
+
+	if (!chip)
+		return -ENODEV;
+
+	if (chip->battery_missing || !chip->profile_loaded)
+		return -ENODEV;
+
+	vote(chip->awake_votable, TTF_AWAKE_VOTER, val, 0);
+
+	return 0;
 }
 
 static int qg_psy_set_property(struct power_supply *psy,
@@ -1224,6 +1712,17 @@
 			chip->cl->learned_cap_uah = pval->intval;
 		mutex_unlock(&chip->cl->lock);
 		break;
+	case POWER_SUPPLY_PROP_SOH:
+		chip->soh = pval->intval;
+		qg_dbg(chip, QG_DEBUG_STATUS, "SOH update: SOH=%d esr_actual=%d esr_nominal=%d\n",
+				chip->soh, chip->esr_actual, chip->esr_nominal);
+		break;
+	case POWER_SUPPLY_PROP_ESR_ACTUAL:
+		chip->esr_actual = pval->intval;
+		break;
+	case POWER_SUPPLY_PROP_ESR_NOMINAL:
+		chip->esr_nominal = pval->intval;
+		break;
 	default:
 		break;
 	}
@@ -1267,6 +1766,12 @@
 		if (!rc)
 			pval->intval *= 1000;
 		break;
+	case POWER_SUPPLY_PROP_RESISTANCE_NOW:
+		pval->intval = chip->esr_last;
+		break;
+	case POWER_SUPPLY_PROP_SOC_REPORTING_READY:
+		pval->intval = chip->soc_reporting_ready;
+		break;
 	case POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE:
 		pval->intval = chip->dt.rbat_conn_mohm;
 		break;
@@ -1302,7 +1807,29 @@
 			pval->intval = (int)temp;
 		break;
 	case POWER_SUPPLY_PROP_CYCLE_COUNTS:
-		pval->strval = qg_get_cycle_counts(chip);
+		rc = get_cycle_counts(chip->counter, &pval->strval);
+		if (rc < 0)
+			pval->strval = NULL;
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+		rc = get_cycle_count(chip->counter, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+		rc = ttf_get_time_to_full(chip->ttf, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+		rc = ttf_get_time_to_empty(chip->ttf, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_ESR_ACTUAL:
+		pval->intval = (chip->esr_actual == -EINVAL) ?  -EINVAL :
+					(chip->esr_actual * 1000);
+		break;
+	case POWER_SUPPLY_PROP_ESR_NOMINAL:
+		pval->intval = (chip->esr_nominal == -EINVAL) ?  -EINVAL :
+					(chip->esr_nominal * 1000);
+		break;
+	case POWER_SUPPLY_PROP_SOH:
+		pval->intval = chip->soh;
 		break;
 	default:
 		pr_debug("Unsupported property %d\n", psp);
@@ -1317,6 +1844,9 @@
 {
 	switch (psp) {
 	case POWER_SUPPLY_PROP_CHARGE_FULL:
+	case POWER_SUPPLY_PROP_ESR_ACTUAL:
+	case POWER_SUPPLY_PROP_ESR_NOMINAL:
+	case POWER_SUPPLY_PROP_SOH:
 		return 1;
 	default:
 		break;
@@ -1333,6 +1863,8 @@
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
 	POWER_SUPPLY_PROP_RESISTANCE,
 	POWER_SUPPLY_PROP_RESISTANCE_ID,
+	POWER_SUPPLY_PROP_RESISTANCE_NOW,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
 	POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
 	POWER_SUPPLY_PROP_DEBUG_BATTERY,
 	POWER_SUPPLY_PROP_BATTERY_TYPE,
@@ -1340,9 +1872,15 @@
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_BATT_FULL_CURRENT,
 	POWER_SUPPLY_PROP_BATT_PROFILE_VERSION,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
 	POWER_SUPPLY_PROP_CYCLE_COUNTS,
 	POWER_SUPPLY_PROP_CHARGE_FULL,
 	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+	POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+	POWER_SUPPLY_PROP_ESR_ACTUAL,
+	POWER_SUPPLY_PROP_ESR_NOMINAL,
+	POWER_SUPPLY_PROP_SOH,
 };
 
 static const struct power_supply_desc qg_psy_desc = {
@@ -1429,6 +1967,7 @@
 {
 	int rc;
 	bool parallel_enabled = is_parallel_enabled(chip);
+	bool update_smb = false;
 
 	if (parallel_enabled == chip->parallel_enabled)
 		return 0;
@@ -1439,7 +1978,14 @@
 
 	mutex_lock(&chip->data_lock);
 
-	rc = process_rt_fifo_data(chip, false, true);
+	/*
+	 * Parallel charger uses the same external sense, hence do not
+	 * enable SMB sensing if PMI632 is configured for external sense.
+	 */
+	if (!chip->dt.qg_ext_sense)
+		update_smb = true;
+
+	rc = process_rt_fifo_data(chip, false, update_smb);
 	if (rc < 0)
 		pr_err("Failed to process RT FIFO data, rc=%d\n", rc);
 
@@ -1464,6 +2010,110 @@
 	return 0;
 }
 
+static int qg_handle_battery_removal(struct qpnp_qg *chip)
+{
+	int rc, length = QG_SDAM_MAX_OFFSET - QG_SDAM_VALID_OFFSET;
+	u8 *data;
+
+	/* clear SDAM */
+	data = kcalloc(length, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	rc = qg_sdam_multibyte_write(QG_SDAM_VALID_OFFSET, data, length);
+	if (rc < 0)
+		pr_err("Failed to clear SDAM rc=%d\n", rc);
+
+	return rc;
+}
+
+#define MAX_QG_OK_RETRIES	20
+static int qg_handle_battery_insertion(struct qpnp_qg *chip)
+{
+	int rc, count = 0;
+	u32 ocv_uv = 0, ocv_raw = 0;
+	u8 reg = 0;
+
+	do {
+		rc = qg_read(chip, chip->qg_base + QG_STATUS1_REG, &reg, 1);
+		if (rc < 0) {
+			pr_err("Failed to read STATUS1_REG rc=%d\n", rc);
+			return rc;
+		}
+
+		if (reg & QG_OK_BIT)
+			break;
+
+		msleep(200);
+		count++;
+	} while (count < MAX_QG_OK_RETRIES);
+
+	if (count == MAX_QG_OK_RETRIES) {
+		qg_dbg(chip, QG_DEBUG_STATUS, "QG_OK not set!\n");
+		return 0;
+	}
+
+	/* read S7 PON OCV */
+	rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, S7_PON_OCV);
+	if (rc < 0) {
+		pr_err("Failed to read PON OCV rc=%d\n", rc);
+		return rc;
+	}
+
+	qg_dbg(chip, QG_DEBUG_STATUS,
+		"S7_OCV on battery insertion = %duV\n", ocv_uv);
+
+	chip->kdata.param[QG_GOOD_OCV_UV].data = ocv_uv;
+	chip->kdata.param[QG_GOOD_OCV_UV].valid = true;
+	/* clear all the userspace data */
+	chip->kdata.param[QG_CLEAR_LEARNT_DATA].data = 1;
+	chip->kdata.param[QG_CLEAR_LEARNT_DATA].valid = true;
+
+	vote(chip->awake_votable, GOOD_OCV_VOTER, true, 0);
+	/* signal the read thread */
+	chip->data_ready = true;
+	wake_up_interruptible(&chip->qg_wait_q);
+
+	return 0;
+}
+
+static int qg_battery_status_update(struct qpnp_qg *chip)
+{
+	int rc;
+	union power_supply_propval prop = {0, };
+
+	if (!is_batt_available(chip))
+		return 0;
+
+	mutex_lock(&chip->data_lock);
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_PRESENT, &prop);
+	if (rc < 0) {
+		pr_err("Failed to get battery-present, rc=%d\n", rc);
+		goto done;
+	}
+
+	if (chip->battery_missing && prop.intval) {
+		pr_warn("Battery inserted!\n");
+		rc = qg_handle_battery_insertion(chip);
+		if (rc < 0)
+			pr_err("Failed in battery-insertion rc=%d\n", rc);
+	} else if (!chip->battery_missing && !prop.intval) {
+		pr_warn("Battery removed!\n");
+		rc = qg_handle_battery_removal(chip);
+		if (rc < 0)
+			pr_err("Failed in battery-removal rc=%d\n", rc);
+	}
+
+	chip->battery_missing = !prop.intval;
+
+done:
+	mutex_unlock(&chip->data_lock);
+	return rc;
+}
+
+
 static void qg_status_change_work(struct work_struct *work)
 {
 	struct qpnp_qg *chip = container_of(work,
@@ -1476,6 +2126,17 @@
 		goto out;
 	}
 
+	rc = qg_battery_status_update(chip);
+	if (rc < 0)
+		pr_err("Failed to process battery status update rc=%d\n", rc);
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+	if (rc < 0)
+		pr_err("Failed to get charge-type, rc=%d\n", rc);
+	else
+		chip->charge_type = prop.intval;
+
 	rc = power_supply_get_property(chip->batt_psy,
 			POWER_SUPPLY_PROP_STATUS, &prop);
 	if (rc < 0)
@@ -1522,6 +2183,8 @@
 	rc = qg_charge_full_update(chip);
 	if (rc < 0)
 		pr_err("Failed in charge_full_update, rc=%d\n", rc);
+
+	ttf_update(chip->ttf, chip->usb_present);
 out:
 	pm_relax(chip->dev);
 }
@@ -1863,6 +2526,7 @@
 		qg_dbg(chip, QG_DEBUG_PROFILE, "Battery Missing!\n");
 		chip->battery_missing = true;
 		chip->profile_loaded = false;
+		chip->soc_reporting_ready = true;
 	} else {
 		/* battery present */
 		rc = get_batt_id_ohm(chip, &chip->batt_id_ohm);
@@ -1871,11 +2535,14 @@
 			chip->profile_loaded = false;
 		} else {
 			rc = qg_load_battery_profile(chip);
-			if (rc < 0)
+			if (rc < 0) {
 				pr_err("Failed to load battery-profile rc=%d\n",
 								rc);
-			else
+				chip->profile_loaded = false;
+				chip->soc_reporting_ready = true;
+			} else {
 				chip->profile_loaded = true;
+			}
 		}
 	}
 
@@ -1886,12 +2553,21 @@
 	return 0;
 }
 
+
+static struct ocv_all ocv[] = {
+	[S7_PON_OCV] = { 0, 0, "S7_PON_OCV"},
+	[S3_GOOD_OCV] = { 0, 0, "S3_GOOD_OCV"},
+	[S3_LAST_OCV] = { 0, 0, "S3_LAST_OCV"},
+	[SDAM_PON_OCV] = { 0, 0, "SDAM_PON_OCV"},
+};
+
+#define S7_ERROR_MARGIN_UV		20000
 static int qg_determine_pon_soc(struct qpnp_qg *chip)
 {
-	int rc = 0, batt_temp = 0;
+	int rc = 0, batt_temp = 0, i;
 	bool use_pon_ocv = true;
 	unsigned long rtc_sec = 0;
-	u32 ocv_uv = 0, ocv_raw = 0, soc = 0, shutdown[SDAM_MAX] = {0};
+	u32 ocv_uv = 0, soc = 0, shutdown[SDAM_MAX] = {0};
 	char ocv_type[20] = "NONE";
 
 	if (!chip->profile_loaded) {
@@ -1940,33 +2616,47 @@
 			goto done;
 		}
 
-		/*
-		 * Read S3_LAST_OCV, if S3_LAST_OCV is invalid,
-		 * read the SDAM_PON_OCV
-		 * if SDAM is not-set, use S7_PON_OCV.
-		 */
-		strlcpy(ocv_type, "S3_LAST_SOC", 20);
-		rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, S3_LAST_OCV);
-		if (rc < 0)
-			goto done;
-
-		if (ocv_raw == FIFO_V_RESET_VAL) {
-			/* S3_LAST_OCV is invalid */
-			strlcpy(ocv_type, "SDAM_PON_SOC", 20);
-			rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, SDAM_PON_OCV);
+		/* read all OCVs */
+		for (i = S7_PON_OCV; i < PON_OCV_MAX; i++) {
+			rc = qg_read_ocv(chip, &ocv[i].ocv_uv,
+						&ocv[i].ocv_raw, i);
 			if (rc < 0)
-				goto done;
+				pr_err("Failed to read %s OCV rc=%d\n",
+						ocv[i].ocv_type, rc);
+			else
+				qg_dbg(chip, QG_DEBUG_PON, "%s OCV=%d\n",
+					ocv[i].ocv_type, ocv[i].ocv_uv);
+		}
 
-			if (!ocv_uv) {
-				/* SDAM_PON_OCV is not set */
+		if (ocv[S3_LAST_OCV].ocv_raw == FIFO_V_RESET_VAL) {
+			if (!ocv[SDAM_PON_OCV].ocv_uv) {
 				strlcpy(ocv_type, "S7_PON_SOC", 20);
-				rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw,
-							S7_PON_OCV);
-				if (rc < 0)
-					goto done;
+				ocv_uv = ocv[S7_PON_OCV].ocv_uv;
+			} else if (ocv[SDAM_PON_OCV].ocv_uv <=
+					ocv[S7_PON_OCV].ocv_uv) {
+				strlcpy(ocv_type, "S7_PON_SOC", 20);
+				ocv_uv = ocv[S7_PON_OCV].ocv_uv;
+			} else if (!shutdown[SDAM_VALID] &&
+				((ocv[SDAM_PON_OCV].ocv_uv -
+					ocv[S7_PON_OCV].ocv_uv) >
+					S7_ERROR_MARGIN_UV)) {
+				strlcpy(ocv_type, "S7_PON_SOC", 20);
+				ocv_uv = ocv[S7_PON_OCV].ocv_uv;
+			} else {
+				strlcpy(ocv_type, "SDAM_PON_SOC", 20);
+				ocv_uv = ocv[SDAM_PON_OCV].ocv_uv;
+			}
+		} else {
+			if (ocv[S3_LAST_OCV].ocv_uv >= ocv[S7_PON_OCV].ocv_uv) {
+				strlcpy(ocv_type, "S3_LAST_SOC", 20);
+				ocv_uv = ocv[S3_LAST_OCV].ocv_uv;
+			} else {
+				strlcpy(ocv_type, "S7_PON_SOC", 20);
+				ocv_uv = ocv[S7_PON_OCV].ocv_uv;
 			}
 		}
 
+		ocv_uv = CAP(QG_MIN_OCV_UV, QG_MAX_OCV_UV, ocv_uv);
 		rc = lookup_soc_ocv(&soc, ocv_uv, batt_temp, false);
 		if (rc < 0) {
 			pr_err("Failed to lookup SOC@PON rc=%d\n", rc);
@@ -1992,13 +2682,16 @@
 	if (rc < 0)
 		pr_err("Failed to update MSOC register rc=%d\n", rc);
 
-	rc = qg_update_sdam_params(chip);
+	rc = qg_store_soc_params(chip);
 	if (rc < 0)
 		pr_err("Failed to update sdam params rc=%d\n", rc);
 
 	pr_info("using %s @ PON ocv_uv=%duV soc=%d\n",
 			ocv_type, ocv_uv, chip->msoc);
 
+	/* SOC reporting is now ready */
+	chip->soc_reporting_ready = 1;
+
 	return 0;
 }
 
@@ -2021,6 +2714,7 @@
 	return 0;
 }
 
+#define ADC_CONV_DLY_512MS		0xA
 static int qg_hw_init(struct qpnp_qg *chip)
 {
 	int rc, temp;
@@ -2201,6 +2895,22 @@
 		return rc;
 	}
 
+	/* disable S5 */
+	rc = qg_masked_write(chip, chip->qg_base +
+				QG_S5_OCV_VALIDATE_MEAS_CTL1_REG,
+				ALLOW_S5_BIT, 0);
+	if (rc < 0)
+		pr_err("Failed to disable S5 rc=%d\n", rc);
+
+	/* change PON OCV time to 512ms */
+	rc = qg_masked_write(chip, chip->qg_base +
+				QG_S7_PON_OCV_MEAS_CTL1_REG,
+				ADC_CONV_DLY_MASK,
+				ADC_CONV_DLY_512MS);
+	if (rc < 0)
+		pr_err("Failed to reconfigure S7-delay rc=%d\n", rc);
+
+
 	return 0;
 }
 
@@ -2220,6 +2930,10 @@
 				QG_INIT_STATE_IRQ_DISABLE, true, 0);
 	}
 
+	/* restore ESR data */
+	if (!chip->dt.esr_disable)
+		qg_retrieve_esr_params(chip);
+
 	return 0;
 }
 
@@ -2294,10 +3008,12 @@
 	return 0;
 }
 
+#define QG_TTF_ITERM_DELTA_MA		1
 static int qg_alg_init(struct qpnp_qg *chip)
 {
 	struct cycle_counter *counter;
 	struct cap_learning *cl;
+	struct ttf *ttf;
 	struct device_node *node = chip->dev->of_node;
 	int rc;
 
@@ -2320,6 +3036,28 @@
 
 	chip->counter = counter;
 
+	ttf = devm_kzalloc(chip->dev, sizeof(*ttf), GFP_KERNEL);
+	if (!ttf)
+		return -ENOMEM;
+
+	ttf->get_ttf_param = qg_get_ttf_param;
+	ttf->awake_voter = qg_ttf_awake_voter;
+	ttf->iterm_delta = QG_TTF_ITERM_DELTA_MA;
+	ttf->data = chip;
+
+	rc = ttf_tte_init(ttf);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in initializing ttf, rc:%d\n",
+			rc);
+		ttf->data = NULL;
+		counter->data = NULL;
+		devm_kfree(chip->dev, ttf);
+		devm_kfree(chip->dev, counter);
+		return rc;
+	}
+
+	chip->ttf = ttf;
+
 	chip->dt.cl_disable = of_property_read_bool(node,
 					"qcom,cl-disable");
 
@@ -2344,6 +3082,7 @@
 		counter->data = NULL;
 		cl->data = NULL;
 		devm_kfree(chip->dev, counter);
+		devm_kfree(chip->dev, ttf);
 		devm_kfree(chip->dev, cl);
 		return rc;
 	}
@@ -2369,10 +3108,13 @@
 #define DEFAULT_CL_MAX_START_SOC	15
 #define DEFAULT_CL_MIN_TEMP_DECIDEGC	150
 #define DEFAULT_CL_MAX_TEMP_DECIDEGC	500
-#define DEFAULT_CL_MAX_INC_DECIPERC	5
-#define DEFAULT_CL_MAX_DEC_DECIPERC	100
-#define DEFAULT_CL_MIN_LIM_DECIPERC	0
-#define DEFAULT_CL_MAX_LIM_DECIPERC	0
+#define DEFAULT_CL_MAX_INC_DECIPERC	10
+#define DEFAULT_CL_MAX_DEC_DECIPERC	20
+#define DEFAULT_CL_MIN_LIM_DECIPERC	500
+#define DEFAULT_CL_MAX_LIM_DECIPERC	100
+#define DEFAULT_ESR_QUAL_CURRENT_UA	130000
+#define DEFAULT_ESR_QUAL_VBAT_UV	7000
+#define DEFAULT_ESR_DISABLE_SOC		1000
 static int qg_parse_dt(struct qpnp_qg *chip)
 {
 	int rc = 0;
@@ -2566,6 +3308,33 @@
 	else
 		chip->dt.rbat_conn_mohm = temp;
 
+	/* esr */
+	chip->dt.esr_disable = of_property_read_bool(node,
+					"qcom,esr-disable");
+
+	chip->dt.esr_discharge_enable = of_property_read_bool(node,
+					"qcom,esr-discharge-enable");
+
+	rc = of_property_read_u32(node, "qcom,esr-qual-current-ua", &temp);
+	if (rc < 0)
+		chip->dt.esr_qual_i_ua = DEFAULT_ESR_QUAL_CURRENT_UA;
+	else
+		chip->dt.esr_qual_i_ua = temp;
+
+	rc = of_property_read_u32(node, "qcom,esr-qual-vbatt-uv", &temp);
+	if (rc < 0)
+		chip->dt.esr_qual_v_uv = DEFAULT_ESR_QUAL_VBAT_UV;
+	else
+		chip->dt.esr_qual_v_uv = temp;
+
+	rc = of_property_read_u32(node, "qcom,esr-disable-soc", &temp);
+	if (rc < 0)
+		chip->dt.esr_disable_soc = DEFAULT_ESR_DISABLE_SOC;
+	else
+		chip->dt.esr_disable_soc = temp * 100;
+
+	chip->dt.qg_ext_sense = of_property_read_bool(node, "qcom,qg-ext-sns");
+
 	/* Capacity learning params*/
 	if (!chip->dt.cl_disable) {
 		chip->dt.cl_feedback_on = of_property_read_bool(node,
@@ -2625,9 +3394,9 @@
 			chip->cl->dt.min_start_soc, chip->cl->dt.max_start_soc,
 			chip->cl->dt.min_temp, chip->cl->dt.max_temp);
 	}
-	qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d\n",
+	qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d ext-sns=%d\n",
 			chip->dt.vbatt_empty_mv, chip->dt.vbatt_low_mv,
-			chip->dt.delta_soc);
+			chip->dt.delta_soc, chip->dt.qg_ext_sense);
 
 	return 0;
 }
@@ -2642,6 +3411,7 @@
 	if (!chip->profile_loaded)
 		return 0;
 
+	cancel_delayed_work_sync(&chip->ttf->ttf_work);
 	/* disable GOOD_OCV IRQ in sleep */
 	vote(chip->good_ocv_irq_disable_votable,
 			QG_INIT_STATE_IRQ_DISABLE, true, 0);
@@ -2742,7 +3512,9 @@
 		chip->kdata.param[QG_GOOD_OCV_UV].data = ocv_uv;
 		chip->kdata.param[QG_GOOD_OCV_UV].valid = true;
 		 /* Clear suspend data as there has been a GOOD OCV */
+		memset(&chip->kdata, 0, sizeof(chip->kdata));
 		chip->suspend_data = false;
+
 		qg_dbg(chip, QG_DEBUG_PM, "GOOD OCV @ resume good_ocv=%d uV\n",
 				ocv_uv);
 	}
@@ -2772,6 +3544,8 @@
 		chip->suspend_data = false;
 	}
 
+	schedule_delayed_work(&chip->ttf->ttf_work, 0);
+
 	return rc;
 }
 
@@ -2849,6 +3623,11 @@
 	chip->maint_soc = -EINVAL;
 	chip->batt_soc = INT_MIN;
 	chip->cc_soc = INT_MIN;
+	chip->full_soc = QG_SOC_FULL;
+	chip->chg_iterm_ma = INT_MIN;
+	chip->soh = -EINVAL;
+	chip->esr_actual = -EINVAL;
+	chip->esr_nominal = -EINVAL;
 
 	rc = qg_alg_init(chip);
 	if (rc < 0) {
@@ -2914,6 +3693,7 @@
 			pr_err("Error in restoring cycle_count, rc=%d\n", rc);
 			return rc;
 		}
+		schedule_delayed_work(&chip->ttf->ttf_work, 10000);
 	}
 
 	rc = qg_determine_pon_soc(chip);
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 5df241f..56a09c6 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -331,6 +331,9 @@
 	if (rc < 0)
 		chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
 
+	chg->disable_stat_sw_override = of_property_read_bool(node,
+					"qcom,disable-stat-sw-override");
+
 	return 0;
 }
 
@@ -342,6 +345,7 @@
 	POWER_SUPPLY_PROP_PRESENT,
 	POWER_SUPPLY_PROP_ONLINE,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_PD_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CURRENT_MAX,
@@ -398,6 +402,9 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_get_prop_usb_voltage_max(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		rc = smblib_get_prop_usb_voltage_max_design(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 		rc = smblib_get_prop_usb_voltage_now(chg, val);
 		break;
@@ -991,6 +998,8 @@
 	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
 	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
 };
 
 static int smb2_batt_get_prop(struct power_supply *psy,
@@ -1048,9 +1057,6 @@
 	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
 		val->intval = chg->sw_jeita_enabled;
 		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-		rc = smblib_get_prop_batt_voltage_now(chg, val);
-		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		val->intval = get_client_vote(chg->fv_votable,
 				BATT_PROFILE_VOTER);
@@ -1062,9 +1068,6 @@
 		val->intval = get_client_vote_locked(chg->fv_votable,
 				QNOVO_VOTER);
 		break;
-	case POWER_SUPPLY_PROP_CURRENT_NOW:
-		rc = smblib_get_prop_batt_current_now(chg, val);
-		break;
 	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
 		val->intval = get_client_vote_locked(chg->fcc_votable,
 				QNOVO_VOTER);
@@ -1073,9 +1076,6 @@
 		val->intval = get_client_vote(chg->fcc_votable,
 					      BATT_PROFILE_VOTER);
 		break;
-	case POWER_SUPPLY_PROP_TEMP:
-		rc = smblib_get_prop_batt_temp(chg, val);
-		break;
 	case POWER_SUPPLY_PROP_TECHNOLOGY:
 		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
 		break;
@@ -1103,7 +1103,12 @@
 		val->intval = 0;
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
-		rc = smblib_get_prop_batt_charge_counter(chg, val);
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+	case POWER_SUPPLY_PROP_TEMP:
+		rc = smblib_get_prop_from_bms(chg, psp, val);
 		break;
 	default:
 		pr_err("batt power supply prop %d not supported\n", psp);
@@ -1837,6 +1842,16 @@
 		}
 	}
 
+	if (chg->disable_stat_sw_override) {
+		rc = smblib_masked_write(chg, STAT_CFG_REG,
+				STAT_SW_OVERRIDE_CFG_BIT, 0);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't disable STAT SW override rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	return rc;
 }
 
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index d3aaf26..2a997dd 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -52,9 +52,16 @@
 		.max_u  = 3000000,
 		.step_u = 50000,
 	},
+	.icl_max_stat		= {
+		.name   = "dcdc icl max status",
+		.reg    = ICL_MAX_STATUS_REG,
+		.min_u  = 0,
+		.max_u  = 3000000,
+		.step_u = 50000,
+	},
 	.icl_stat		= {
 		.name   = "input current limit status",
-		.reg    = AICL_ICL_STATUS_REG,
+		.reg    = ICL_STATUS_REG,
 		.min_u  = 0,
 		.max_u  = 3000000,
 		.step_u = 50000,
@@ -88,9 +95,25 @@
 		.step_u	= 400,
 		.set_proc = smblib_set_chg_freq,
 	},
+	.aicl_5v_threshold		= {
+		.name   = "AICL 5V threshold",
+		.reg    = USBIN_5V_AICL_THRESHOLD_REG,
+		.min_u  = 4000,
+		.max_u  = 4700,
+		.step_u = 100,
+	},
+	.aicl_cont_threshold		= {
+		.name   = "AICL CONT threshold",
+		.reg    = USBIN_CONT_AICL_THRESHOLD_REG,
+		.min_u  = 4000,
+		.max_u  = 8800,
+		.step_u = 100,
+		.get_proc = smblib_get_aicl_cont_threshold,
+		.set_proc = smblib_set_aicl_cont_threshold,
+	},
 };
 
-static struct smb_params smb5_pmi855_params = {
+static struct smb_params smb5_pm855b_params = {
 	.fcc			= {
 		.name   = "fast charge current",
 		.reg    = CHGR_FAST_CHARGE_CURRENT_CFG_REG,
@@ -112,8 +135,15 @@
 		.max_u  = 5000000,
 		.step_u = 50000,
 	},
+	.icl_max_stat		= {
+		.name   = "dcdc icl max status",
+		.reg    = ICL_MAX_STATUS_REG,
+		.min_u  = 0,
+		.max_u  = 5000000,
+		.step_u = 50000,
+	},
 	.icl_stat		= {
-		.name   = "input current limit status",
+		.name   = "aicl icl status",
 		.reg    = AICL_ICL_STATUS_REG,
 		.min_u  = 0,
 		.max_u  = 5000000,
@@ -150,6 +180,22 @@
 		.step_u	= 400,
 		.set_proc = NULL,
 	},
+	.aicl_5v_threshold		= {
+		.name   = "AICL 5V threshold",
+		.reg    = USBIN_5V_AICL_THRESHOLD_REG,
+		.min_u  = 4000,
+		.max_u  = 4700,
+		.step_u = 100,
+	},
+	.aicl_cont_threshold		= {
+		.name   = "AICL CONT threshold",
+		.reg    = USBIN_CONT_AICL_THRESHOLD_REG,
+		.min_u  = 4000,
+		.max_u  = 1180,
+		.step_u = 100,
+		.get_proc = smblib_get_aicl_cont_threshold,
+		.set_proc = smblib_set_aicl_cont_threshold,
+	},
 };
 
 struct smb_dt_props {
@@ -177,6 +223,11 @@
 	debug_mask, __debug_mask, int, 0600
 );
 
+static int __pd_disabled;
+module_param_named(
+	pd_disabled, __pd_disabled, int, 0600
+);
+
 static int __weak_chg_icl_ua = 500000;
 module_param_named(
 	weak_chg_icl_ua, __weak_chg_icl_ua, int, 0600
@@ -187,6 +238,13 @@
 	USBIN_VOLTAGE,
 };
 
+enum {
+	BAT_THERM = 0,
+	MISC_THERM,
+	CONN_THERM,
+	SMB_THERM,
+};
+
 #define PMI632_MAX_ICL_UA	3000000
 static int smb5_chg_config_init(struct smb5 *chip)
 {
@@ -216,14 +274,17 @@
 	switch (pmic_rev_id->pmic_subtype) {
 	case PM855B_SUBTYPE:
 		chip->chg.smb_version = PM855B_SUBTYPE;
-		chg->param = smb5_pmi855_params;
+		chg->param = smb5_pm855b_params;
 		chg->name = "pm855b_charger";
 		break;
 	case PMI632_SUBTYPE:
 		chip->chg.smb_version = PMI632_SUBTYPE;
+		chg->wa_flags |= WEAK_ADAPTER_WA;
 		chg->param = smb5_pmi632_params;
 		chg->use_extcon = true;
 		chg->name = "pmi632_charger";
+		/* PMI632 does not support PD */
+		chg->pd_not_supported = true;
 		chg->hw_max_icl_ua =
 			(chip->dt.usb_icl_ua > 0) ? chip->dt.usb_icl_ua
 						: PMI632_MAX_ICL_UA;
@@ -245,7 +306,55 @@
 	return rc;
 }
 
+#define PULL_NO_PULL	0
+#define PULL_30K	30
+#define PULL_100K	100
+#define PULL_400K	400
+static int get_valid_pullup(int pull_up)
+{
+	int pull;
+
+	/* pull up can only be 0/30K/100K/400K) */
+	switch (pull_up) {
+	case PULL_NO_PULL:
+		pull = INTERNAL_PULL_NO_PULL;
+		break;
+	case PULL_30K:
+		pull = INTERNAL_PULL_30K_PULL;
+		break;
+	case PULL_100K:
+		pull = INTERNAL_PULL_100K_PULL;
+		break;
+	case PULL_400K:
+		pull = INTERNAL_PULL_400K_PULL;
+		break;
+	default:
+		pull = INTERNAL_PULL_100K_PULL;
+	}
+
+	return pull;
+}
+
+#define INTERNAL_PULL_UP_MASK	0x3
+static int smb5_configure_internal_pull(struct smb_charger *chg, int type,
+					int pull)
+{
+	int rc;
+	int shift = type * 2;
+	u8 mask = INTERNAL_PULL_UP_MASK << shift;
+	u8 val = pull << shift;
+
+	rc = smblib_masked_write(chg, BATIF_ADC_INTERNAL_PULL_UP_REG,
+				mask, val);
+	if (rc < 0)
+		dev_err(chg->dev,
+			"Couldn't configure ADC pull-up reg rc=%d\n", rc);
+
+	return rc;
+}
+
 #define MICRO_1P5A		1500000
+#define MICRO_1PA		1000000
 #define MICRO_P1A		100000
 #define OTG_DEFAULT_DEGLITCH_TIME_MS	50
 #define MIN_WD_BARK_TIME		16
@@ -296,7 +405,8 @@
 	rc = of_property_read_u32(node,
 				"qcom,otg-cl-ua", &chg->otg_cl_ua);
 	if (rc < 0)
-		chg->otg_cl_ua = MICRO_1P5A;
+		chg->otg_cl_ua = (chip->chg.smb_version == PMI632_SUBTYPE) ?
+							MICRO_1PA : MICRO_1P5A;
 
 	if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) {
 		chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len,
@@ -364,14 +474,25 @@
 	if (rc < 0)
 		chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
 
+	chg->hw_die_temp_mitigation = of_property_read_bool(node,
+					"qcom,hw-die-temp-mitigation");
+
+	chg->hw_connector_mitigation = of_property_read_bool(node,
+					"qcom,hw-connector-mitigation");
+
+	chg->connector_pull_up = -EINVAL;
+	of_property_read_u32(node, "qcom,connector-internal-pull-kohm",
+					&chg->connector_pull_up);
+
 	return 0;
 }
 
 static int smb5_get_adc_data(struct smb_charger *chg, int channel,
 				union power_supply_propval *val)
 {
-	int rc;
+	int rc, ret = 0;
 	struct qpnp_vadc_result result;
+	u8 reg;
 
 	if (!chg->vadc_dev) {
 		if (of_find_property(chg->dev->of_node, "qcom,chg-vadc",
@@ -397,13 +518,43 @@
 
 	switch (channel) {
 	case USBIN_VOLTAGE:
+		/* Store ADC channel config */
+		rc = smblib_read(chg, BATIF_ADC_CHANNEL_EN_REG, &reg);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't read ADC config rc=%d\n", rc);
+			return rc;
+		}
+
+		/* Disable all ADC channels except IBAT channel */
+		rc = smblib_write(chg, BATIF_ADC_CHANNEL_EN_REG,
+				IBATT_CHANNEL_EN_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't write ADC config rc=%d\n", rc);
+			return rc;
+		}
+
 		rc = qpnp_vadc_read(chg->vadc_dev, VADC_USB_IN_V_DIV_16_PM5,
 				&result);
 		if (rc < 0) {
 			pr_err("Failed to read USBIN_V over vadc, rc=%d\n", rc);
-			return rc;
+			ret = rc;
+			goto restore;
 		}
 		val->intval = result.physical;
+
+restore:
+		/* Restore ADC channel config */
+		rc = smblib_write(chg, BATIF_ADC_CHANNEL_EN_REG, reg);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't write ADC config rc=%d\n", rc);
+			return rc;
+		}
+		/* If ADC read failed return ADC error */
+		if (ret < 0)
+			rc = ret;
 		break;
 	case USBIN_CURRENT:
 		rc = qpnp_vadc_read(chg->vadc_dev, VADC_USB_IN_I_PM5, &result);
@@ -434,7 +585,6 @@
 	POWER_SUPPLY_PROP_TYPEC_MODE,
 	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
 	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
-	POWER_SUPPLY_PROP_PD_ALLOWED,
 	POWER_SUPPLY_PROP_PD_ACTIVE,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
@@ -449,9 +599,11 @@
 	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CONNECTOR_TYPE,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
 	POWER_SUPPLY_PROP_SCOPE,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
+	POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
 };
 
 static int smb5_usb_get_prop(struct power_supply *psy,
@@ -485,6 +637,9 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_get_prop_usb_voltage_max(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		rc = smblib_get_prop_usb_voltage_max_design(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
 		val->intval = get_client_vote(chg->usb_icl_votable, PD_VOTER);
 		break;
@@ -515,9 +670,6 @@
 		else
 			rc = smblib_get_prop_typec_cc_orientation(chg, val);
 		break;
-	case POWER_SUPPLY_PROP_PD_ALLOWED:
-		rc = smblib_get_prop_pd_allowed(chg, val);
-		break;
 	case POWER_SUPPLY_PROP_PD_ACTIVE:
 		val->intval = chg->pd_active;
 		break;
@@ -583,6 +735,13 @@
 	case POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED:
 		val->intval = !chg->flash_active;
 		break;
+	case POWER_SUPPLY_PROP_QC_OPTI_DISABLE:
+		if (chg->hw_die_temp_mitigation)
+			val->intval = POWER_SUPPLY_QC_THERMAL_BALANCE_DISABLE
+					| POWER_SUPPLY_QC_INOV_THERMAL_DISABLE;
+		if (chg->hw_connector_mitigation)
+			val->intval |= POWER_SUPPLY_QC_CTM_DISABLE;
+		break;
 	default:
 		pr_err("get prop %d is not supported in usb\n", psp);
 		rc = -EINVAL;
@@ -605,12 +764,6 @@
 	struct smb_charger *chg = &chip->chg;
 	int rc = 0;
 
-	mutex_lock(&chg->lock);
-	if (!chg->typec_present) {
-		rc = -EINVAL;
-		goto unlock;
-	}
-
 	switch (psp) {
 	case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
 		rc = smblib_set_prop_pd_current_max(chg, val);
@@ -652,8 +805,6 @@
 		break;
 	}
 
-unlock:
-	mutex_unlock(&chg->lock);
 	return rc;
 }
 
@@ -898,6 +1049,10 @@
 					pr_err("Failed to force 5V\n");
 				else
 					chg->pulse_cnt = 0;
+			} else {
+				/* USB absent & flash not-active - vote 100mA */
+				vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER,
+							true, SDP_100_MA);
 			}
 
 			pr_debug("flash active VBUS 5V restriction %s\n",
@@ -1077,6 +1232,7 @@
 	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
 	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
 	POWER_SUPPLY_PROP_RECHARGE_SOC,
 };
 
@@ -1171,6 +1327,9 @@
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
 		rc = smblib_get_prop_batt_charge_counter(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+		rc = smblib_get_prop_batt_cycle_count(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_RECHARGE_SOC:
 		val->intval = chg->auto_recharge_soc;
 		break;
@@ -1247,7 +1406,7 @@
 		rc = smblib_set_prop_ship_mode(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_RERUN_AICL:
-		rc = smblib_rerun_aicl(chg);
+		rc = smblib_run_aicl(chg, RERUN_AICL);
 		break;
 	case POWER_SUPPLY_PROP_DP_DM:
 		if (!chg->flash_active)
@@ -1424,6 +1583,42 @@
 static int smb5_configure_typec(struct smb_charger *chg)
 {
 	int rc;
+	u8 val = 0;
+
+	rc = smblib_read(chg, LEGACY_CABLE_STATUS_REG, &val);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read Legacy status rc=%d\n", rc);
+		return rc;
+	}
+	/*
+	 * If Legacy cable is detected re-trigger Legacy detection
+	 * by disabling/enabling typeC mode.
+	 */
+	if (val & TYPEC_LEGACY_CABLE_STATUS_BIT) {
+		rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
+				TYPEC_DISABLE_CMD_BIT, TYPEC_DISABLE_CMD_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't disable TYPEC rc=%d\n", rc);
+			return rc;
+		}
+
+		/* delay before enabling typeC */
+		msleep(500);
+
+		rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
+				TYPEC_DISABLE_CMD_BIT, 0);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't enable TYPEC rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* disable apsd */
+	rc = smblib_configure_hvdcp_apsd(chg, false);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't disable APSD rc=%d\n", rc);
+		return rc;
+	}
 
 	rc = smblib_write(chg, TYPE_C_INTERRUPT_EN_CFG_1_REG,
 				TYPEC_CCOUT_DETACH_INT_EN_BIT |
@@ -1442,16 +1637,28 @@
 		return rc;
 	}
 
-	/* configure VCONN for software control */
-	rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
-				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
-				 VCONN_EN_SRC_BIT);
+	rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
+				EN_TRY_SNK_BIT, EN_TRY_SNK_BIT);
 	if (rc < 0) {
 		dev_err(chg->dev,
-			"Couldn't configure VCONN for SW control rc=%d\n", rc);
+			"Couldn't enable try.snk rc=%d\n", rc);
 		return rc;
 	}
 
+	/* Keep VCONN in h/w controlled mode for PMI632 */
+	if (chg->smb_version != PMI632_SUBTYPE) {
+		/* configure VCONN for software control */
+		rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
+				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
+				 VCONN_EN_SRC_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't configure VCONN for SW control rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	return rc;
 }
 
@@ -1459,14 +1666,6 @@
 {
 	int rc;
 
-	/* configure micro USB mode */
-	rc = smblib_masked_write(chg, TYPEC_U_USB_CFG_REG,
-			EN_MICRO_USB_MODE_BIT, EN_MICRO_USB_MODE_BIT);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't enable micro USB mode rc=%d\n", rc);
-		return rc;
-	}
-
 	rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG,
 					MICRO_USB_STATE_CHANGE_INT_EN_BIT,
 					MICRO_USB_STATE_CHANGE_INT_EN_BIT);
@@ -1479,6 +1678,42 @@
 	return rc;
 }
 
+static int smb5_configure_mitigation(struct smb_charger *chg)
+{
+	int rc;
+	u8 chan = 0;
+
+	if (!chg->hw_die_temp_mitigation && !chg->hw_connector_mitigation)
+		return 0;
+
+	if (chg->hw_die_temp_mitigation) {
+		rc = smblib_write(chg, MISC_THERMREG_SRC_CFG_REG,
+				THERMREG_CONNECTOR_ADC_SRC_EN_BIT
+				| THERMREG_DIE_ADC_SRC_EN_BIT
+				| THERMREG_DIE_CMP_SRC_EN_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't configure THERM_SRC reg rc=%d\n", rc);
+			return rc;
+		};
+
+		chan = DIE_TEMP_CHANNEL_EN_BIT;
+	}
+
+	if (chg->hw_connector_mitigation)
+		chan |= CONN_THM_CHANNEL_EN_BIT;
+
+	rc = smblib_masked_write(chg, BATIF_ADC_CHANNEL_EN_REG,
+			CONN_THM_CHANNEL_EN_BIT | DIE_TEMP_CHANNEL_EN_BIT,
+			chan);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable ADC channel rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
 static int smb5_init_hw(struct smb5 *chip)
 {
 	struct smb_charger *chg = &chip->chg;
@@ -1498,9 +1733,14 @@
 
 	smblib_get_charge_param(chg, &chg->param.usb_icl,
 				&chg->default_icl_ua);
+	smblib_get_charge_param(chg, &chg->param.aicl_5v_threshold,
+				&chg->default_aicl_5v_threshold_mv);
+	chg->aicl_5v_threshold_mv = chg->default_aicl_5v_threshold_mv;
+	smblib_get_charge_param(chg, &chg->param.aicl_cont_threshold,
+				&chg->default_aicl_cont_threshold_mv);
+	chg->aicl_cont_threshold_mv = chg->default_aicl_cont_threshold_mv;
 
 	/* Use SW based VBUS control, disable HW autonomous mode */
-	/* TODO: auth can be enabled through vote based on APSD flow */
 	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
 		HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT,
 		HVDCP_AUTH_ALG_EN_CFG_BIT);
@@ -1537,18 +1777,61 @@
 		type = !!(val & EN_MICRO_USB_MODE_BIT);
 	}
 
-	chg->connector_type = type ? POWER_SUPPLY_CONNECTOR_MICRO_USB
-					: POWER_SUPPLY_CONNECTOR_TYPEC;
 	pr_debug("Connector type=%s\n", type ? "Micro USB" : "TypeC");
 
+	if (type) {
+		chg->connector_type = POWER_SUPPLY_CONNECTOR_MICRO_USB;
+		rc = smb5_configure_micro_usb(chg);
+	} else {
+		chg->connector_type = POWER_SUPPLY_CONNECTOR_TYPEC;
+		rc = smb5_configure_typec(chg);
+	}
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure TypeC/micro-USB mode rc=%d\n", rc);
+		return rc;
+	}
+
 	/*
 	 * PMI632 based hw init:
+	 * - Enable STAT pin function on SMB_EN
+	 * - Rerun APSD to ensure proper charger detection if device
+	 *   boots with charger connected.
 	 * - Initialize flash module for PMI632
 	 */
-	if (chg->smb_version == PMI632_SUBTYPE)
-		schgm_flash_init(chg);
+	if (chg->smb_version == PMI632_SUBTYPE) {
+		rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
+					EN_STAT_CMD_BIT, EN_STAT_CMD_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't configure SMB_EN rc=%d\n",
+					rc);
+			return rc;
+		}
 
-	smblib_rerun_apsd_if_required(chg);
+		schgm_flash_init(chg);
+		smblib_rerun_apsd_if_required(chg);
+	}
+
+	/* clear the ICL override if it is set */
+	rc = smblib_icl_override(chg, false);
+	if (rc < 0) {
+		pr_err("Couldn't disable ICL override rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set OTG current limit */
+	rc = smblib_set_charge_param(chg, &chg->param.otg_cl, chg->otg_cl_ua);
+	if (rc < 0) {
+		pr_err("Couldn't set otg current limit rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure temperature mitigation */
+	rc = smb5_configure_mitigation(chg);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure mitigation rc=%d\n", rc);
+		return rc;
+	}
 
 	/* vote 0mA on usb_icl for non battery platforms */
 	vote(chg->usb_icl_votable,
@@ -1565,12 +1848,6 @@
 	vote(chg->fv_votable,
 		BATT_PROFILE_VOTER, chg->batt_profile_fv_uv > 0,
 		chg->batt_profile_fv_uv);
-	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
-			true, 0);
-	vote(chg->pd_disallowed_votable_indirect, APSD_VOTER,
-			true, 0);
-	vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
-		chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB, 0);
 
 	/* Some h/w limit maximum supported ICL */
 	vote(chg->usb_icl_votable, HW_LIMIT_VOTER,
@@ -1578,13 +1855,15 @@
 
 	/*
 	 * AICL configuration:
-	 * start from min and AICL ADC disable
+	 * AICL ADC disable
 	 */
-	rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
+	if (chg->smb_version != PMI632_SUBTYPE) {
+		rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
 				USBIN_AICL_ADC_EN_BIT, 0);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't configure AICL rc=%d\n", rc);
-		return rc;
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't config AICL rc=%d\n", rc);
+			return rc;
+		}
 	}
 
 	/* enable the charging path */
@@ -1594,16 +1873,6 @@
 		return rc;
 	}
 
-	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
-		rc = smb5_configure_micro_usb(chg);
-	else
-		rc = smb5_configure_typec(chg);
-	if (rc < 0) {
-		dev_err(chg->dev,
-			"Couldn't configure TypeC/micro-USB mode rc=%d\n", rc);
-		return rc;
-	}
-
 	/* configure VBUS for software control */
 	rc = smblib_masked_write(chg, DCDC_OTG_CFG_REG, OTG_EN_SRC_CFG_BIT, 0);
 	if (rc < 0) {
@@ -1774,6 +2043,17 @@
 		return rc;
 	}
 
+	if (chg->connector_pull_up != -EINVAL) {
+		rc = smb5_configure_internal_pull(chg, CONN_THERM,
+				get_valid_pullup(chg->connector_pull_up));
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't configure CONN_THERM pull-up rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	return rc;
 }
 
@@ -1812,11 +2092,21 @@
 {
 	struct smb_irq_data irq_data = {chip, "determine-initial-status"};
 	struct smb_charger *chg = &chip->chg;
+	union power_supply_propval val;
+	int rc;
+
+	rc = smblib_get_prop_usb_present(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get usb present rc=%d\n", rc);
+		return rc;
+	}
+	chg->early_usb_attach = val.intval;
 
 	if (chg->bms_psy)
 		smblib_suspend_on_debug_battery(chg);
 
 	usb_plugin_irq_handler(0, &irq_data);
+	typec_attach_detach_irq_handler(0, &irq_data);
 	typec_state_change_irq_handler(0, &irq_data);
 	usb_source_change_irq_handler(0, &irq_data);
 	chg_state_change_irq_handler(0, &irq_data);
@@ -1934,6 +2224,8 @@
 	[USBIN_UV_IRQ] = {
 		.name		= "usbin-uv",
 		.handler	= usbin_uv_irq_handler,
+		.wake		= true,
+		.storm_data	= {true, 3000, 5},
 	},
 	[USBIN_OV_IRQ] = {
 		.name		= "usbin-ov",
@@ -2008,6 +2300,7 @@
 	},
 	[TYPEC_ATTACH_DETACH_IRQ] = {
 		.name		= "typec-attach-detach",
+		.handler	= typec_attach_detach_irq_handler,
 	},
 	[TYPEC_LEGACY_CABLE_DETECT_IRQ] = {
 		.name		= "typec-legacy-cable-detect",
@@ -2303,6 +2596,7 @@
 	chg = &chip->chg;
 	chg->dev = &pdev->dev;
 	chg->debug_mask = &__debug_mask;
+	chg->pd_disabled = &__pd_disabled;
 	chg->weak_chg_icl_ua = &__weak_chg_icl_ua;
 	chg->mode = PARALLEL_MASTER;
 	chg->irq_info = smb5_irqs;
@@ -2315,12 +2609,6 @@
 		return -EINVAL;
 	}
 
-	rc = smb5_parse_dt(chip);
-	if (rc < 0) {
-		pr_err("Couldn't parse device tree rc=%d\n", rc);
-		return rc;
-	}
-
 	rc = smb5_chg_config_init(chip);
 	if (rc < 0) {
 		if (rc != -EPROBE_DEFER)
@@ -2328,6 +2616,12 @@
 		return rc;
 	}
 
+	rc = smb5_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		return rc;
+	}
+
 	rc = smblib_init(chg);
 	if (rc < 0) {
 		pr_err("Smblib_init failed rc=%d\n", rc);
@@ -2452,6 +2746,10 @@
 	struct smb5 *chip = platform_get_drvdata(pdev);
 	struct smb_charger *chg = &chip->chg;
 
+	/* force enable APSD */
+	smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+				BC1P2_SRC_DETECT_BIT, BC1P2_SRC_DETECT_BIT);
+
 	smb5_free_interrupts(chg);
 	smblib_deinit(chg);
 	platform_set_drvdata(pdev, NULL);
diff --git a/drivers/power/supply/qcom/qpnp-smbcharger.c b/drivers/power/supply/qcom/qpnp-smbcharger.c
index 9b5c25f..71b8b8b 100644
--- a/drivers/power/supply/qcom/qpnp-smbcharger.c
+++ b/drivers/power/supply/qcom/qpnp-smbcharger.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -632,6 +632,18 @@
 	mutex_unlock(&chip->pm_lock);
 };
 
+static bool is_bms_psy_present(struct smbchg_chip *chip)
+{
+	if (chip->bms_psy)
+		return true;
+
+	if (chip->bms_psy_name)
+		chip->bms_psy = power_supply_get_by_name(
+					(char *)chip->bms_psy_name);
+
+	return chip->bms_psy ? true : false;
+}
+
 enum pwr_path_type {
 	UNKNOWN = 0,
 	PWR_PATH_BATTERY = 1,
@@ -1520,6 +1532,47 @@
 	return chip->parallel.psy;
 }
 
+static int smbchg_request_dpdm(struct smbchg_chip *chip, bool enable)
+{
+	int rc = 0;
+
+	/* fetch the DPDM regulator */
+	if (!chip->dpdm_reg && of_get_property(chip->dev->of_node,
+				"dpdm-supply", NULL)) {
+		chip->dpdm_reg = devm_regulator_get(chip->dev, "dpdm");
+		if (IS_ERR(chip->dpdm_reg)) {
+			rc = PTR_ERR(chip->dpdm_reg);
+			dev_err(chip->dev, "Couldn't get dpdm regulator rc=%d\n",
+					rc);
+			chip->dpdm_reg = NULL;
+			return rc;
+		}
+	}
+
+	if (!chip->dpdm_reg)
+		return -ENODEV;
+
+	if (enable) {
+		if (!regulator_is_enabled(chip->dpdm_reg)) {
+			pr_smb(PR_STATUS, "enabling DPDM regulator\n");
+			rc = regulator_enable(chip->dpdm_reg);
+			if (rc < 0)
+				dev_err(chip->dev, "Couldn't enable dpdm regulator rc=%d\n",
+					rc);
+		}
+	} else {
+		if (regulator_is_enabled(chip->dpdm_reg)) {
+			pr_smb(PR_STATUS, "disabling DPDM regulator\n");
+			rc = regulator_disable(chip->dpdm_reg);
+			if (rc < 0)
+				dev_err(chip->dev, "Couldn't disable dpdm regulator rc=%d\n",
+					rc);
+		}
+	}
+
+	return rc;
+}
+
 static void smbchg_usb_update_online_work(struct work_struct *work)
 {
 	struct smbchg_chip *chip = container_of(work,
@@ -3707,17 +3760,11 @@
 static void smbchg_external_power_changed(struct power_supply *psy)
 {
 	struct smbchg_chip *chip = power_supply_get_drvdata(psy);
-	union power_supply_propval prop = {0,};
-	int rc, current_limit = 0, soc;
-	enum power_supply_type usb_supply_type;
-	char *usb_type_name = "null";
-
-	if (chip->bms_psy_name)
-		chip->bms_psy =
-			power_supply_get_by_name((char *)chip->bms_psy_name);
+	int rc, soc;
 
 	smbchg_aicl_deglitch_wa_check(chip);
-	if (chip->bms_psy) {
+
+	if (is_bms_psy_present(chip)) {
 		check_battery_type(chip);
 		soc = get_prop_batt_capacity(chip);
 		if (chip->previous_soc != soc) {
@@ -3732,37 +3779,8 @@
 									rc);
 	}
 
-	rc = power_supply_get_property(chip->usb_psy,
-				POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop);
-	if (rc == 0)
-		vote(chip->usb_suspend_votable, POWER_SUPPLY_EN_VOTER,
-				!prop.intval, 0);
-
-	current_limit = chip->usb_current_max / 1000;
-
-	/* Override if type-c charger used */
-	if (chip->typec_current_ma > 500 &&
-			current_limit < chip->typec_current_ma)
-		current_limit = chip->typec_current_ma;
-
-	read_usb_type(chip, &usb_type_name, &usb_supply_type);
-
-	if (usb_supply_type != POWER_SUPPLY_TYPE_USB)
-		goto  skip_current_for_non_sdp;
-
-	pr_smb(PR_MISC, "usb type = %s current_limit = %d\n",
-			usb_type_name, current_limit);
-
-	rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true,
-				current_limit);
-	if (rc < 0)
-		pr_err("Couldn't update USB PSY ICL vote rc=%d\n", rc);
-
-skip_current_for_non_sdp:
+	/* adjust vfloat */
 	smbchg_vfloat_adjust_check(chip);
-
-	if (chip->batt_psy)
-		power_supply_changed(chip->batt_psy);
 }
 
 static int smbchg_otg_regulator_enable(struct regulator_dev *rdev)
@@ -4590,8 +4608,11 @@
 	if (!rc && !(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
 		pr_smb(PR_MISC, "overwriting state = %d with %d\n",
 				state, POWER_SUPPLY_DP_DM_DPF_DMF);
-		if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
-			return regulator_enable(chip->dpdm_reg);
+		rc = smbchg_request_dpdm(chip, true);
+		if (rc < 0) {
+			pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+			return rc;
+		}
 	}
 	pr_smb(PR_MISC, "setting usb psy dp dm = %d\n", state);
 	pval.intval = state;
@@ -4697,8 +4718,7 @@
 	smbchg_relax(chip, PM_DETECT_HVDCP);
 	smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_UNKNOWN);
 	extcon_set_cable_state_(chip->extcon, EXTCON_USB, chip->usb_present);
-	if (chip->dpdm_reg)
-		regulator_disable(chip->dpdm_reg);
+	smbchg_request_dpdm(chip, false);
 	schedule_work(&chip->usb_set_online_work);
 
 	pr_smb(PR_MISC, "setting usb psy health UNKNOWN\n");
@@ -5248,8 +5268,7 @@
 {
 	int rc = 0;
 
-	if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
-		rc = regulator_enable(chip->dpdm_reg);
+	rc = smbchg_request_dpdm(chip, true);
 	if (rc < 0) {
 		pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
 		return rc;
@@ -5712,6 +5731,21 @@
 	}
 }
 
+static int smbchg_set_sdp_current(struct smbchg_chip *chip, int current_ma)
+{
+	if (chip->usb_supply_type == POWER_SUPPLY_TYPE_USB) {
+		/* Override if type-c charger used */
+		if (chip->typec_current_ma > 500 &&
+				current_ma < chip->typec_current_ma) {
+			current_ma = chip->typec_current_ma;
+		}
+		pr_smb(PR_MISC, "from USB current_ma = %d\n", current_ma);
+		vote(chip->usb_icl_votable, PSY_ICL_VOTER, true, current_ma);
+	}
+
+	return 0;
+}
+
 static int smbchg_usb_get_property(struct power_supply *psy,
 				  enum power_supply_property psp,
 				  union power_supply_propval *val)
@@ -5720,7 +5754,12 @@
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_CURRENT_MAX:
-		val->intval = chip->usb_current_max;
+	case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+		if (chip->usb_icl_votable)
+			val->intval = get_client_vote(chip->usb_icl_votable,
+						PSY_ICL_VOTER) * 1000;
+		else
+			val->intval = 0;
 		break;
 	case POWER_SUPPLY_PROP_PRESENT:
 		val->intval = chip->usb_present;
@@ -5750,17 +5789,16 @@
 	struct smbchg_chip *chip = power_supply_get_drvdata(psy);
 
 	switch (psp) {
-	case POWER_SUPPLY_PROP_CURRENT_MAX:
-		chip->usb_current_max = val->intval;
-		break;
 	case POWER_SUPPLY_PROP_ONLINE:
 		chip->usb_online = val->intval;
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+	case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+		smbchg_set_sdp_current(chip, val->intval / 1000);
 	default:
 		return -EINVAL;
 	}
 
-	power_supply_changed(psy);
 	return 0;
 }
 
@@ -5770,6 +5808,7 @@
 {
 	switch (psp) {
 	case POWER_SUPPLY_PROP_CURRENT_MAX:
+	case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
 		return 1;
 	default:
 		break;
@@ -5791,6 +5830,7 @@
 	POWER_SUPPLY_PROP_TYPE,
 	POWER_SUPPLY_PROP_REAL_TYPE,
 	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
 };
 
 #define CHARGE_OUTPUT_VTG_RATIO		840
@@ -6481,8 +6521,7 @@
 	 */
 	if (!(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
 		pr_smb(PR_MISC, "setting usb dp=f dm=f\n");
-		if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
-			rc = regulator_enable(chip->dpdm_reg);
+		rc = smbchg_request_dpdm(chip, true);
 		if (rc < 0) {
 			pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
 			return rc;
@@ -6731,15 +6770,8 @@
 	chip->dc_present = is_dc_present(chip);
 
 	if (chip->usb_present) {
-		int rc = 0;
-
 		pr_smb(PR_MISC, "setting usb dp=f dm=f\n");
-		if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
-			rc = regulator_enable(chip->dpdm_reg);
-		if (rc < 0) {
-			pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
-			return rc;
-		}
+		smbchg_request_dpdm(chip, true);
 		handle_usb_insertion(chip);
 	} else {
 		handle_usb_removal(chip);
@@ -8372,14 +8404,6 @@
 		goto votables_cleanup;
 	}
 
-	if (of_find_property(chip->dev->of_node, "dpdm-supply", NULL)) {
-		chip->dpdm_reg = devm_regulator_get(chip->dev, "dpdm");
-		if (IS_ERR(chip->dpdm_reg)) {
-			rc = PTR_ERR(chip->dpdm_reg);
-			goto votables_cleanup;
-		}
-	}
-
 	rc = smbchg_hw_init(chip);
 	if (rc < 0) {
 		dev_err(&pdev->dev,
@@ -8488,6 +8512,8 @@
 out:
 	handle_usb_removal(chip);
 votables_cleanup:
+	if (chip->hvdcp_enable_votable)
+		destroy_votable(chip->hvdcp_enable_votable);
 	if (chip->aicl_deglitch_short_votable)
 		destroy_votable(chip->aicl_deglitch_short_votable);
 	if (chip->hw_aicl_rerun_enable_indirect_votable)
diff --git a/drivers/power/supply/qcom/schgm-flash.c b/drivers/power/supply/qcom/schgm-flash.c
index eed70d3..3bbbdbf 100644
--- a/drivers/power/supply/qcom/schgm-flash.c
+++ b/drivers/power/supply/qcom/schgm-flash.c
@@ -101,6 +101,11 @@
 	}
 }
 
+bool is_flash_active(struct smb_charger *chg)
+{
+	return chg->flash_active ? true : false;
+}
+
 int schgm_flash_get_vreg_ok(struct smb_charger *chg, int *val)
 {
 	int rc, vreg_state;
@@ -147,6 +152,29 @@
 	return 0;
 }
 
+void schgm_flash_torch_priority(struct smb_charger *chg, enum torch_mode mode)
+{
+	int rc;
+	u8 reg;
+
+	/*
+	 * If torch is configured in default BOOST mode, skip any update in the
+	 * mode configuration.
+	 */
+	if (chg->headroom_mode == FIXED_MODE)
+		return;
+
+	if ((mode != TORCH_BOOST_MODE) && (mode != TORCH_BUCK_MODE))
+		return;
+
+	reg = mode;
+	rc = smblib_masked_write(chg, SCHGM_TORCH_PRIORITY_CONTROL_REG,
+					TORCH_PRIORITY_CONTROL_BIT, reg);
+	if (rc < 0)
+		pr_err("Couldn't configure Torch priority control rc=%d\n",
+				rc);
+}
+
 int schgm_flash_init(struct smb_charger *chg)
 {
 	int rc;
@@ -190,7 +218,7 @@
 
 		reg = (chg->headroom_mode == FIXED_MODE)
 					? TORCH_PRIORITY_CONTROL_BIT : 0;
-		rc = smblib_write(chg, SCHGM_TORCH_PRIORITY_CONTROL, reg);
+		rc = smblib_write(chg, SCHGM_TORCH_PRIORITY_CONTROL_REG, reg);
 		if (rc < 0) {
 			pr_err("Couldn't force 5V boost in torch mode rc=%d\n",
 					rc);
diff --git a/drivers/power/supply/qcom/schgm-flash.h b/drivers/power/supply/qcom/schgm-flash.h
index b6fff6c..aaa5932 100644
--- a/drivers/power/supply/qcom/schgm-flash.h
+++ b/drivers/power/supply/qcom/schgm-flash.h
@@ -37,7 +37,7 @@
 #define SCHGM_FLASH_CONTROL_REG			(SCHGM_FLASH_BASE + 0x60)
 #define SOC_LOW_FOR_FLASH_EN_BIT		BIT(7)
 
-#define SCHGM_TORCH_PRIORITY_CONTROL		(SCHGM_FLASH_BASE + 0x63)
+#define SCHGM_TORCH_PRIORITY_CONTROL_REG	(SCHGM_FLASH_BASE + 0x63)
 #define TORCH_PRIORITY_CONTROL_BIT		BIT(0)
 
 #define SCHGM_SOC_BASED_FLASH_DERATE_TH_CFG_REG	(SCHGM_FLASH_BASE + 0x67)
@@ -45,8 +45,15 @@
 #define SCHGM_SOC_BASED_FLASH_DISABLE_TH_CFG_REG \
 						(SCHGM_FLASH_BASE + 0x68)
 
+enum torch_mode {
+	TORCH_BUCK_MODE = 0,
+	TORCH_BOOST_MODE,
+};
+
 int schgm_flash_get_vreg_ok(struct smb_charger *chg, int *val);
+void schgm_flash_torch_priority(struct smb_charger *chg, enum torch_mode mode);
 int schgm_flash_init(struct smb_charger *chg);
+bool is_flash_active(struct smb_charger *chg);
 
 irqreturn_t schgm_flash_default_irq_handler(int irq, void *data);
 irqreturn_t schgm_flash_ilim2_irq_handler(int irq, void *data);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 39005f6..5b94ff2 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1908,7 +1908,8 @@
 		   stat);
 
 	if (stat & CHARGER_ERROR_STATUS_BAT_OV_BIT) {
-		rc = smblib_get_prop_batt_voltage_now(chg, &pval);
+		rc = smblib_get_prop_from_bms(chg,
+				POWER_SUPPLY_PROP_VOLTAGE_NOW, &pval);
 		if (!rc) {
 			/*
 			 * If Vbatt is within 40mV above Vfloat, then don't
@@ -1973,45 +1974,6 @@
 	return 0;
 }
 
-int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
-				     union power_supply_propval *val)
-{
-	int rc;
-
-	if (!chg->bms_psy)
-		return -EINVAL;
-
-	rc = power_supply_get_property(chg->bms_psy,
-				       POWER_SUPPLY_PROP_VOLTAGE_NOW, val);
-	return rc;
-}
-
-int smblib_get_prop_batt_current_now(struct smb_charger *chg,
-				     union power_supply_propval *val)
-{
-	int rc;
-
-	if (!chg->bms_psy)
-		return -EINVAL;
-
-	rc = power_supply_get_property(chg->bms_psy,
-				       POWER_SUPPLY_PROP_CURRENT_NOW, val);
-	return rc;
-}
-
-int smblib_get_prop_batt_temp(struct smb_charger *chg,
-			      union power_supply_propval *val)
-{
-	int rc;
-
-	if (!chg->bms_psy)
-		return -EINVAL;
-
-	rc = power_supply_get_property(chg->bms_psy,
-				       POWER_SUPPLY_PROP_TEMP, val);
-	return rc;
-}
-
 int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
 					union power_supply_propval *val)
 {
@@ -2047,16 +2009,17 @@
 	return 0;
 }
 
-int smblib_get_prop_batt_charge_counter(struct smb_charger *chg,
-				     union power_supply_propval *val)
+int smblib_get_prop_from_bms(struct smb_charger *chg,
+				enum power_supply_property psp,
+				union power_supply_propval *val)
 {
 	int rc;
 
 	if (!chg->bms_psy)
 		return -EINVAL;
 
-	rc = power_supply_get_property(chg->bms_psy,
-				       POWER_SUPPLY_PROP_CHARGE_COUNTER, val);
+	rc = power_supply_get_property(chg->bms_psy, psp, val);
+
 	return rc;
 }
 
@@ -2452,6 +2415,28 @@
 	switch (chg->real_charger_type) {
 	case POWER_SUPPLY_TYPE_USB_HVDCP:
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+		if (chg->smb_version == PM660_SUBTYPE)
+			val->intval = MICRO_9V;
+		else
+			val->intval = MICRO_12V;
+		break;
+	case POWER_SUPPLY_TYPE_USB_PD:
+		val->intval = chg->voltage_max_uv;
+		break;
+	default:
+		val->intval = MICRO_5V;
+		break;
+	}
+
+	return 0;
+}
+
+int smblib_get_prop_usb_voltage_max_design(struct smb_charger *chg,
+					union power_supply_propval *val)
+{
+	switch (chg->real_charger_type) {
+	case POWER_SUPPLY_TYPE_USB_HVDCP:
+	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
 	case POWER_SUPPLY_TYPE_USB_PD:
 		if (chg->smb_version == PM660_SUBTYPE)
 			val->intval = MICRO_9V;
@@ -3445,6 +3430,9 @@
 	struct smb_irq_data *irq_data = data;
 	struct smb_charger *chg = irq_data->parent_data;
 	struct storm_watch *wdata;
+	const struct apsd_result *apsd = smblib_get_apsd_result(chg);
+	int rc;
+	u8 stat = 0, max_pulses = 0;
 
 	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
 	if (!chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data)
@@ -3452,6 +3440,46 @@
 
 	wdata = &chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data->storm_data;
 	reset_storm_count(wdata);
+
+	if (!chg->non_compliant_chg_detected &&
+			apsd->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+		rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't read CHANGE_STATUS_REG rc=%d\n", rc);
+
+		if (stat & QC_5V_BIT)
+			return IRQ_HANDLED;
+
+		rc = smblib_read(chg, HVDCP_PULSE_COUNT_MAX_REG, &max_pulses);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't read QC2 max pulses rc=%d\n", rc);
+
+		chg->non_compliant_chg_detected = true;
+		chg->qc2_max_pulses = (max_pulses &
+				HVDCP_PULSE_COUNT_MAX_QC2_MASK);
+
+		if (stat & QC_12V_BIT) {
+			rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+					HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+					HVDCP_PULSE_COUNT_MAX_QC2_9V);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't force max pulses to 9V rc=%d\n",
+						rc);
+
+		} else if (stat & QC_9V_BIT) {
+			rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+					HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+					HVDCP_PULSE_COUNT_MAX_QC2_5V);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't force max pulses to 5V rc=%d\n",
+						rc);
+
+		}
+		smblib_rerun_apsd(chg);
+	}
+
 	return IRQ_HANDLED;
 }
 
@@ -4270,6 +4298,17 @@
 	if (rc < 0)
 		smblib_err(chg, "Couldn't set 120mS tCC debounce rc=%d\n", rc);
 
+	/* if non-compliant charger caused UV, restore original max pulses */
+	if (chg->non_compliant_chg_detected) {
+		rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+				HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+				chg->qc2_max_pulses);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't restore max pulses rc=%d\n",
+					rc);
+		chg->non_compliant_chg_detected = false;
+	}
+
 	/* enable APSD CC trigger for next insertion */
 	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
 				APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
@@ -4718,6 +4757,7 @@
 {
 	int rc;
 	u8 stat4, stat5;
+	bool lock = false;
 	struct smb_charger *chg = container_of(work, struct smb_charger,
 						rdstd_cc2_detach_work);
 
@@ -4780,9 +4820,28 @@
 	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
 						EXIT_SNK_BASED_ON_CC_BIT, 0);
 	smblib_reg_block_restore(chg, cc2_detach_settings);
-	mutex_lock(&chg->lock);
+
+	/*
+	 * Mutex acquisition deadlock can happen while cancelling this work
+	 * during pd_hard_reset from the function smblib_cc2_sink_removal_exit
+	 * which is called in the same lock context that we try to acquire in
+	 * this work routine.
+	 * Check if this work is running during pd_hard_reset and use trylock
+	 * instead of mutex_lock to prevent any deadlock if mutext is already
+	 * held.
+	 */
+	if (chg->pd_hard_reset) {
+		if (mutex_trylock(&chg->lock))
+			lock = true;
+	} else {
+		mutex_lock(&chg->lock);
+		lock = true;
+	}
+
 	smblib_usb_typec_change(chg);
-	mutex_unlock(&chg->lock);
+
+	if (lock)
+		mutex_unlock(&chg->lock);
 	return;
 
 rerun:
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index dc8cbc7..3b8bc1f 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -353,6 +353,7 @@
 	bool			use_extcon;
 	bool			otg_present;
 	bool			is_audio_adapter;
+	bool			disable_stat_sw_override;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -361,6 +362,8 @@
 	bool			try_sink_active;
 	int			boost_current_ua;
 	int			temp_speed_reading_count;
+	int			qc2_max_pulses;
+	bool			non_compliant_chg_detected;
 	bool			fake_usb_insertion;
 
 	/* extcon for VBUS / ID notification to USB for uUSB */
@@ -444,14 +447,6 @@
 				union power_supply_propval *val);
 int smblib_get_prop_input_current_limited(struct smb_charger *chg,
 				union power_supply_propval *val);
-int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
-				union power_supply_propval *val);
-int smblib_get_prop_batt_current_now(struct smb_charger *chg,
-				union power_supply_propval *val);
-int smblib_get_prop_batt_temp(struct smb_charger *chg,
-				union power_supply_propval *val);
-int smblib_get_prop_batt_charge_counter(struct smb_charger *chg,
-				union power_supply_propval *val);
 int smblib_set_prop_input_suspend(struct smb_charger *chg,
 				const union power_supply_propval *val);
 int smblib_set_prop_batt_capacity(struct smb_charger *chg,
@@ -480,6 +475,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_usb_voltage_max_design(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_usb_current_now(struct smb_charger *chg,
@@ -541,6 +538,9 @@
 int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
 int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_from_bms(struct smb_charger *chg,
+				enum power_supply_property psp,
+				union power_supply_propval *val);
 int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
 				const union power_supply_propval *val);
 int smblib_stat_sw_override_cfg(struct smb_charger *chg, bool override);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index d40d6fd..449d974 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -587,6 +587,15 @@
 #define EN_LEGACY_CABLE_DETECTION_BIT		BIT(1)
 #define ALLOW_PD_DRING_UFP_TCCDB_BIT		BIT(0)
 
+#define HVDCP_PULSE_COUNT_MAX_REG		(USBIN_BASE + 0x5B)
+#define HVDCP_PULSE_COUNT_MAX_QC2_MASK		GENMASK(7, 6)
+enum {
+	HVDCP_PULSE_COUNT_MAX_QC2_5V,
+	HVDCP_PULSE_COUNT_MAX_QC2_9V,
+	HVDCP_PULSE_COUNT_MAX_QC2_12V,
+	HVDCP_PULSE_COUNT_MAX_QC2_INVALID
+};
+
 #define USBIN_ADAPTER_ALLOW_CFG_REG		(USBIN_BASE + 0x60)
 #define USBIN_ADAPTER_ALLOW_MASK		GENMASK(3, 0)
 enum {
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index 327ae63..2671d6b 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -80,8 +80,12 @@
 #define BATIF_CFG_SMISC_BATID_REG		(BATIF_BASE + 0x73)
 #define CFG_SMISC_RBIAS_EXT_CTRL_BIT		BIT(2)
 
-#define SMB2CHGS_BATIF_ENG_SMISC_DIETEMP	(BATIF_BASE + 0xC0)
+#define SMB2CHG_BATIF_ENG_SMISC_DIETEMP	(BATIF_BASE + 0xC0)
 #define TDIE_COMPARATOR_THRESHOLD		GENMASK(5, 0)
+#define DIE_LOW_RANGE_BASE_DEGC			34
+#define DIE_LOW_RANGE_DELTA			16
+#define DIE_LOW_RANGE_MAX_DEGC			97
+#define DIE_LOW_RANGE_SHIFT			4
 
 #define BATIF_ENG_SCMISC_SPARE1_REG		(BATIF_BASE + 0xC2)
 #define EXT_BIAS_PIN_BIT			BIT(2)
@@ -91,13 +95,10 @@
 #define VALLEY_COMPARATOR_EN_BIT		BIT(0)
 
 #define TEMP_COMP_STATUS_REG			(MISC_BASE + 0x07)
-#define SKIN_TEMP_RST_HOT_BIT			BIT(6)
-#define SKIN_TEMP_UB_HOT_BIT			BIT(5)
-#define SKIN_TEMP_LB_HOT_BIT			BIT(4)
-#define DIE_TEMP_TSD_HOT_BIT			BIT(3)
-#define DIE_TEMP_RST_HOT_BIT			BIT(2)
-#define DIE_TEMP_UB_HOT_BIT			BIT(1)
-#define DIE_TEMP_LB_HOT_BIT			BIT(0)
+#define TEMP_RST_HOT_BIT			BIT(2)
+#define TEMP_UB_HOT_BIT				BIT(1)
+#define TEMP_LB_HOT_BIT				BIT(0)
+#define SKIN_TEMP_SHIFT				4
 
 #define MISC_RT_STS_REG				(MISC_BASE + 0x10)
 #define HARD_ILIMIT_RT_STS_BIT			BIT(5)
@@ -108,6 +109,9 @@
 #define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
 #define BARK_BITE_WDOG_PET_BIT			BIT(0)
 
+#define CLOCK_REQUEST_REG			(MISC_BASE + 0x44)
+#define CLOCK_REQUEST_CMD_BIT			BIT(0)
+
 #define WD_CFG_REG				(MISC_BASE + 0x51)
 #define WATCHDOG_TRIGGER_AFP_EN_BIT		BIT(7)
 #define BARK_WDOG_INT_EN_BIT			BIT(6)
@@ -220,6 +224,8 @@
 	bool	disable_ctm;
 	int	pl_mode;
 	int	pl_batfet_mode;
+	bool	hw_die_temp_mitigation;
+	u32	die_temp_threshold;
 };
 
 struct smb1355 {
@@ -247,8 +253,16 @@
 	struct votable		*irq_disable_votable;
 };
 
+enum {
+	CONNECTOR_TEMP = 0,
+	DIE_TEMP,
+};
+
 static bool is_secure(struct smb1355 *chip, int addr)
 {
+	if (addr == CLOCK_REQUEST_REG)
+		return true;
+
 	/* assume everything above 0xA0 is secure */
 	return (addr & 0xFF) >= 0xA0;
 }
@@ -265,6 +279,25 @@
 	return rc;
 }
 
+static int smb1355_masked_force_write(struct smb1355 *chip, u16 addr, u8 mask,
+					u8 val)
+{
+	int rc;
+
+	mutex_lock(&chip->write_lock);
+	if (is_secure(chip, addr)) {
+		rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_write_bits(chip->regmap, addr, mask, val);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
 static int smb1355_masked_write(struct smb1355 *chip, u16 addr, u8 mask, u8 val)
 {
 	int rc;
@@ -354,8 +387,7 @@
 	u8 temp_stat;
 
 	for (i = 0; i < BIT(5); i++) {
-		rc = smb1355_masked_write(chip,
-				SMB2CHGS_BATIF_ENG_SMISC_DIETEMP,
+		rc = smb1355_masked_write(chip, SMB2CHG_BATIF_ENG_SMISC_DIETEMP,
 				TDIE_COMPARATOR_THRESHOLD, i);
 		if (rc < 0) {
 			pr_err("Couldn't set temp comp threshold rc=%d\n", rc);
@@ -374,7 +406,7 @@
 			continue;
 		}
 
-		if (!(temp_stat & DIE_TEMP_UB_HOT_BIT)) {
+		if (!(temp_stat & TEMP_UB_HOT_BIT)) {
 			/* found the temp */
 			break;
 		}
@@ -440,6 +472,7 @@
 	return 0;
 }
 
+#define DEFAULT_DIE_TEMP_LOW_THRESHOLD		90
 static int smb1355_parse_dt(struct smb1355 *chip)
 {
 	struct device_node *node = chip->dev->of_node;
@@ -470,6 +503,15 @@
 	if (of_property_read_bool(node, "qcom,stacked-batfet"))
 		chip->dt.pl_batfet_mode = POWER_SUPPLY_PL_STACKED_BATFET;
 
+	chip->dt.hw_die_temp_mitigation = of_property_read_bool(node,
+					"qcom,hw-die-temp-mitigation");
+
+	chip->dt.die_temp_threshold = DEFAULT_DIE_TEMP_LOW_THRESHOLD;
+	of_property_read_u32(node, "qcom,die-temp-threshold-degc",
+				&chip->dt.die_temp_threshold);
+	if (chip->dt.die_temp_threshold > DIE_LOW_RANGE_MAX_DEGC)
+		chip->dt.die_temp_threshold = DIE_LOW_RANGE_MAX_DEGC;
+
 	return 0;
 }
 
@@ -494,6 +536,8 @@
 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
 	POWER_SUPPLY_PROP_MIN_ICL,
 	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+	POWER_SUPPLY_PROP_DIE_HEALTH,
 };
 
 static int smb1355_get_prop_batt_charge_type(struct smb1355 *chip,
@@ -520,10 +564,13 @@
 	return rc;
 }
 
-static int smb1355_get_prop_connector_health(struct smb1355 *chip)
+static int smb1355_get_prop_health(struct smb1355 *chip, int type)
 {
 	u8 temp;
-	int rc;
+	int rc, shift;
+
+	/* Connector-temp uses skin-temp configuration */
+	shift = (type == CONNECTOR_TEMP) ? SKIN_TEMP_SHIFT : 0;
 
 	rc = smb1355_read(chip, TEMP_COMP_STATUS_REG, &temp);
 	if (rc < 0) {
@@ -531,13 +578,13 @@
 		return POWER_SUPPLY_HEALTH_UNKNOWN;
 	}
 
-	if (temp & SKIN_TEMP_RST_HOT_BIT)
+	if (temp & (TEMP_RST_HOT_BIT << shift))
 		return POWER_SUPPLY_HEALTH_OVERHEAT;
 
-	if (temp & SKIN_TEMP_UB_HOT_BIT)
+	if (temp & (TEMP_UB_HOT_BIT << shift))
 		return POWER_SUPPLY_HEALTH_HOT;
 
-	if (temp & SKIN_TEMP_LB_HOT_BIT)
+	if (temp & (TEMP_LB_HOT_BIT << shift))
 		return POWER_SUPPLY_HEALTH_WARM;
 
 	return POWER_SUPPLY_HEALTH_COOL;
@@ -588,7 +635,17 @@
 		val->intval = chip->die_temp_deciDegC;
 		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
-		rc = smb1355_get_prop_charger_temp_max(chip, val);
+		/*
+		 * In case of h/w controlled die_temp mitigation,
+		 * die_temp/die_temp_max can not be reported as this
+		 * requires run time manipulation of DIE_TEMP low
+		 * threshold which will interfere with h/w mitigation
+		 * scheme.
+		 */
+		if (chip->dt.hw_die_temp_mitigation)
+			val->intval = -EINVAL;
+		else
+			rc = smb1355_get_prop_charger_temp_max(chip, val);
 		break;
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		val->intval = chip->disabled;
@@ -609,10 +666,14 @@
 		break;
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
 		if (chip->c_health == -EINVAL)
-			val->intval = smb1355_get_prop_connector_health(chip);
+			val->intval = smb1355_get_prop_health(chip,
+						CONNECTOR_TEMP);
 		else
 			val->intval = chip->c_health;
 		break;
+	case POWER_SUPPLY_PROP_DIE_HEALTH:
+		val->intval = smb1355_get_prop_health(chip, DIE_TEMP);
+		break;
 	case POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE:
 		val->intval = chip->dt.pl_batfet_mode;
 		break;
@@ -635,6 +696,10 @@
 	case POWER_SUPPLY_PROP_PARALLEL_FCC_MAX:
 		val->intval = chip->max_fcc;
 		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as device is active */
+		val->intval = 0;
+		break;
 	default:
 		pr_err_ratelimited("parallel psy get prop %d not supported\n",
 			prop);
@@ -678,13 +743,16 @@
 	}
 
 	chip->die_temp_deciDegC = -EINVAL;
-	if (disable) {
-		chip->exit_die_temp = true;
-		cancel_delayed_work_sync(&chip->die_temp_work);
-	} else {
-		/* start the work to measure temperature */
-		chip->exit_die_temp = false;
-		schedule_delayed_work(&chip->die_temp_work, 0);
+	/* Only enable temperature measurement for s/w based mitigation */
+	if (!chip->dt.hw_die_temp_mitigation) {
+		if (disable) {
+			chip->exit_die_temp = true;
+			cancel_delayed_work_sync(&chip->die_temp_work);
+		} else {
+			/* start the work to measure temperature */
+			chip->exit_die_temp = false;
+			schedule_delayed_work(&chip->die_temp_work, 0);
+		}
 	}
 
 	if (chip->irq_disable_votable)
@@ -726,6 +794,20 @@
 	return rc;
 }
 
+static int smb1355_clk_request(struct smb1355 *chip, bool enable)
+{
+	int rc;
+
+	rc = smb1355_masked_force_write(chip, CLOCK_REQUEST_REG,
+				CLOCK_REQUEST_CMD_BIT,
+				enable ? CLOCK_REQUEST_CMD_BIT : 0);
+	if (rc < 0)
+		pr_err("Couldn't %s clock rc=%d\n",
+			       enable ? "enable" : "disable", rc);
+
+	return rc;
+}
+
 static int smb1355_parallel_set_prop(struct power_supply *psy,
 				     enum power_supply_property prop,
 				     const union power_supply_propval *val)
@@ -752,6 +834,11 @@
 		chip->c_health = val->intval;
 		power_supply_changed(chip->parallel_psy);
 		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		if (!val->intval)
+			break;
+		rc = smb1355_clk_request(chip, false);
+		break;
 	default:
 		pr_debug("parallel power supply set prop %d not supported\n",
 			prop);
@@ -929,6 +1016,12 @@
 static int smb1355_init_hw(struct smb1355 *chip)
 {
 	int rc;
+	u8 val, range;
+
+	/* request clock always on */
+	rc = smb1355_clk_request(chip, true);
+	if (rc < 0)
+		return rc;
 
 	/* enable watchdog bark and bite interrupts, and disable the watchdog */
 	rc = smb1355_masked_write(chip, WD_CFG_REG, WDOG_TIMER_EN_BIT
@@ -991,13 +1084,35 @@
 		return rc;
 	}
 
+	/* Configure DIE temp Low threshold */
+	if (chip->dt.hw_die_temp_mitigation) {
+		range = (chip->dt.die_temp_threshold - DIE_LOW_RANGE_BASE_DEGC)
+						/ (DIE_LOW_RANGE_DELTA);
+		val = (chip->dt.die_temp_threshold
+				- ((range * DIE_LOW_RANGE_DELTA)
+						+ DIE_LOW_RANGE_BASE_DEGC))
+				% DIE_LOW_RANGE_DELTA;
+
+		rc = smb1355_masked_write(chip, SMB2CHG_BATIF_ENG_SMISC_DIETEMP,
+				TDIE_COMPARATOR_THRESHOLD,
+				(range << DIE_LOW_RANGE_SHIFT) | val);
+		if (rc < 0) {
+			pr_err("Couldn't set temp comp threshold rc=%d\n", rc);
+			return rc;
+		}
+	}
+
 	/*
-	 * Enable thermal Die temperature comparator source and disable hw
-	 * mitigation for skin/die
+	 * Enable thermal Die temperature comparator source and
+	 * enable hardware controlled current adjustment for die temp
+	 * if charger is configured in h/w controlled die temp mitigation.
 	 */
+	val = THERMREG_DIE_CMP_SRC_EN_BIT;
+	if (!chip->dt.hw_die_temp_mitigation)
+		val |= BYP_THERM_CHG_CURR_ADJUST_BIT;
 	rc = smb1355_masked_write(chip, MISC_THERMREG_SRC_CFG_REG,
 		THERMREG_DIE_CMP_SRC_EN_BIT | BYP_THERM_CHG_CURR_ADJUST_BIT,
-		THERMREG_DIE_CMP_SRC_EN_BIT | BYP_THERM_CHG_CURR_ADJUST_BIT);
+		val);
 	if (rc < 0) {
 		pr_err("Couldn't set Skin temperature comparator src rc=%d\n",
 			rc);
@@ -1008,8 +1123,9 @@
 	 * Disable hysterisis for die temperature. This is so that sw can run
 	 * stepping scheme quickly
 	 */
+	val = chip->dt.hw_die_temp_mitigation ? DIE_TEMP_COMP_HYST_BIT : 0;
 	rc = smb1355_masked_write(chip, BATIF_ENG_SCMISC_SPARE1_REG,
-				DIE_TEMP_COMP_HYST_BIT, 0);
+				DIE_TEMP_COMP_HYST_BIT, val);
 	if (rc < 0) {
 		pr_err("Couldn't disable hyst. for die rc=%d\n", rc);
 		return rc;
@@ -1340,6 +1456,8 @@
 	rc = smb1355_set_parallel_charging(chip, true);
 	if (rc < 0)
 		pr_err("Couldn't disable parallel path rc=%d\n", rc);
+
+	smb1355_clk_request(chip, false);
 }
 
 static struct platform_driver smb1355_driver = {
diff --git a/drivers/power/supply/qcom/smb1360-charger-fg.c b/drivers/power/supply/qcom/smb1360-charger-fg.c
index ed9c610..4e98ec7 100644
--- a/drivers/power/supply/qcom/smb1360-charger-fg.c
+++ b/drivers/power/supply/qcom/smb1360-charger-fg.c
@@ -3424,8 +3424,8 @@
 	chip->otg_vreg.rdesc.owner = THIS_MODULE;
 	chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
 	chip->otg_vreg.rdesc.ops = &smb1360_otg_reg_ops;
-	chip->otg_vreg.rdesc.of_match = chip->dev->of_node->name;
-	chip->otg_vreg.rdesc.name = chip->dev->of_node->name;
+	chip->otg_vreg.rdesc.of_match = "qcom,smb1360-vbus";
+	chip->otg_vreg.rdesc.name = "qcom,smb1360-vbus";
 
 	cfg.dev = chip->dev;
 	cfg.driver_data = chip;
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index f1df8f0..86ecda5 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -1371,10 +1371,12 @@
 	[USBIN_PLUGIN_IRQ] = {
 		.name		= "usbin-plugin",
 		.handler	= smblib_handle_usb_plugin,
+		.wake		= true,
 	},
 	[USBIN_SRC_CHANGE_IRQ] = {
 		.name		= "usbin-src-change",
 		.handler	= smblib_handle_usb_source_change,
+		.wake		= true,
 	},
 	[USBIN_ICL_CHANGE_IRQ] = {
 		.name		= "usbin-icl-change",
@@ -1383,6 +1385,7 @@
 	[TYPE_C_CHANGE_IRQ] = {
 		.name		= "type-c-change",
 		.handler	= smblib_handle_usb_typec_change,
+		.wake		= true,
 	},
 /* DC INPUT IRQs */
 	[DCIN_COLLAPSE_IRQ] = {
@@ -1825,6 +1828,8 @@
 		goto cleanup;
 	}
 
+	device_init_wakeup(chip->chg.dev, true);
+
 	pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode);
 	return rc;
 
diff --git a/drivers/power/supply/qcom/smb1390-charger.c b/drivers/power/supply/qcom/smb1390-charger.c
index 55a1f45..a92c975 100644
--- a/drivers/power/supply/qcom/smb1390-charger.c
+++ b/drivers/power/supply/qcom/smb1390-charger.c
@@ -82,6 +82,7 @@
 #define FCC_VOTER	"FCC_VOTER"
 #define ICL_VOTER	"ICL_VOTER"
 #define USB_VOTER	"USB_VOTER"
+#define SWITCHER_TOGGLE_VOTER	"SWITCHER_TOGGLE_VOTER"
 
 enum {
 	SWITCHER_OFF_WINDOW_IRQ = 0,
@@ -100,6 +101,7 @@
 	struct regmap		*regmap;
 	struct notifier_block	nb;
 	struct class		cp_class;
+	struct wakeup_source	*cp_ws;
 
 	/* work structs */
 	struct work_struct	status_change_work;
@@ -114,6 +116,7 @@
 	struct votable		*pl_disable_votable;
 	struct votable		*fcc_votable;
 	struct votable		*hvdcp_hw_inov_dis_votable;
+	struct votable		*cp_awake_votable;
 
 	/* power supplies */
 	struct power_supply	*usb_psy;
@@ -124,6 +127,7 @@
 	bool			status_change_running;
 	bool			taper_work_running;
 	int			adc_channel;
+	int			irq_status;
 };
 
 struct smb_irq {
@@ -204,6 +208,18 @@
 	return true;
 }
 
+static void cp_toggle_switcher(struct smb1390 *chip)
+{
+	vote(chip->disable_votable, SWITCHER_TOGGLE_VOTER, true, 0);
+
+	/* Delay for toggling switcher */
+	usleep_range(20, 30);
+
+	vote(chip->disable_votable, SWITCHER_TOGGLE_VOTER, false, 0);
+
+	return;
+}
+
 static irqreturn_t default_irq_handler(int irq, void *data)
 {
 	struct smb1390 *chip = data;
@@ -212,40 +228,13 @@
 	for (i = 0; i < NUM_IRQS; ++i) {
 		if (irq == chip->irqs[i])
 			pr_debug("%s IRQ triggered\n", smb_irqs[i].name);
+			chip->irq_status |= 1 << i;
 	}
 
 	kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t irev_irq_handler(int irq, void *data)
-{
-	struct smb1390 *chip = data;
-	int rc;
-
-	pr_debug("IREV IRQ triggered\n");
-
-	rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
-			CMD_EN_SWITCHER_BIT, 0);
-	if (rc < 0) {
-		pr_err("Couldn't disable switcher by command mode, rc=%d\n",
-			rc);
-		goto out;
-	}
-
-	rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
-			CMD_EN_SWITCHER_BIT, CMD_EN_SWITCHER_BIT);
-	if (rc < 0) {
-		pr_err("Couldn't enable switcher by command mode, rc=%d\n",
-			rc);
-		goto out;
-	}
-
-out:
-	kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
-	return IRQ_HANDLED;
-}
-
 static const struct smb_irq smb_irqs[] = {
 	[SWITCHER_OFF_WINDOW_IRQ] = {
 		.name		= "switcher-off-window",
@@ -264,7 +253,7 @@
 	},
 	[IREV_IRQ] = {
 		.name		= "irev-fault",
-		.handler	= irev_irq_handler,
+		.handler	= default_irq_handler,
 		.wake		= true,
 	},
 	[VPH_OV_HARD_IRQ] = {
@@ -338,6 +327,38 @@
 	return count;
 }
 
+static ssize_t cp_irq_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	struct smb1390 *chip = container_of(c, struct smb1390, cp_class);
+	int rc, val;
+
+	rc = smb1390_read(chip, CORE_INT_RT_STS_REG, &val);
+	if (rc < 0)
+		return -EINVAL;
+
+	val |= chip->irq_status;
+	chip->irq_status = 0;
+
+	return snprintf(buf, PAGE_SIZE, "%x\n", val);
+}
+
+static ssize_t toggle_switcher_store(struct class *c,
+			struct class_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct smb1390 *chip = container_of(c, struct smb1390, cp_class);
+	unsigned long val;
+
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val)
+		cp_toggle_switcher(chip);
+
+	return count;
+}
+
 static ssize_t die_temp_show(struct class *c, struct class_attribute *attr,
 			     char *buf)
 {
@@ -358,6 +379,8 @@
 	__ATTR_RO(stat1),
 	__ATTR_RO(stat2),
 	__ATTR_RW(enable),
+	__ATTR_RO(cp_irq),
+	__ATTR_WO(toggle_switcher),
 	__ATTR_RO(die_temp),
 	__ATTR_NULL,
 };
@@ -378,11 +401,12 @@
 		if (rc < 0)
 			return rc;
 
-		vote(chip->hvdcp_hw_inov_dis_votable, CP_VOTER, false, 0);
 		vote(chip->pl_disable_votable, CP_VOTER, false, 0);
+		vote(chip->cp_awake_votable, CP_VOTER, false, 0);
 	} else {
 		vote(chip->hvdcp_hw_inov_dis_votable, CP_VOTER, true, 0);
 		vote(chip->pl_disable_votable, CP_VOTER, true, 0);
+		vote(chip->cp_awake_votable, CP_VOTER, true, 0);
 		rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
 				   CMD_EN_SWITCHER_BIT, CMD_EN_SWITCHER_BIT);
 		if (rc < 0)
@@ -428,6 +452,20 @@
 	return rc;
 }
 
+static int smb1390_awake_vote_cb(struct votable *votable, void *data,
+				int awake, const char *client)
+{
+	struct smb1390 *chip = data;
+
+	if (awake)
+		__pm_stay_awake(chip->cp_ws);
+	else
+		__pm_relax(chip->cp_ws);
+
+	pr_debug("client: %s awake: %d\n", client, awake);
+	return 0;
+}
+
 static int smb1390_notifier_cb(struct notifier_block *nb,
 			       unsigned long event, void *data)
 {
@@ -589,6 +627,11 @@
 	if (IS_ERR(chip->ilim_votable))
 		return PTR_ERR(chip->ilim_votable);
 
+	chip->cp_awake_votable = create_votable("CP_AWAKE", VOTE_SET_ANY,
+			smb1390_awake_vote_cb, chip);
+	if (IS_ERR(chip->cp_awake_votable))
+		return PTR_ERR(chip->cp_awake_votable);
+
 	return 0;
 }
 
@@ -722,16 +765,21 @@
 	rc = smb1390_parse_dt(chip);
 	if (rc < 0) {
 		pr_err("Couldn't parse device tree rc=%d\n", rc);
-		goto out_work;
+		return rc;
 	}
 
 	chip->vadc_dev = qpnp_get_vadc(chip->dev, "smb");
 	if (IS_ERR(chip->vadc_dev)) {
 		rc = PTR_ERR(chip->vadc_dev);
-		pr_err("Couldn't get vadc dev rc=%d\n", rc);
-		goto out_work;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't get vadc dev rc=%d\n", rc);
+		return rc;
 	}
 
+	chip->cp_ws = wakeup_source_register("qcom-chargepump");
+	if (!chip->cp_ws)
+		return rc;
+
 	rc = smb1390_create_votables(chip);
 	if (rc < 0) {
 		pr_err("Couldn't create votables rc=%d\n", rc);
@@ -778,6 +826,7 @@
 out_work:
 	cancel_work(&chip->taper_work);
 	cancel_work(&chip->status_change_work);
+	wakeup_source_unregister(chip->cp_ws);
 	return rc;
 }
 
@@ -790,8 +839,10 @@
 
 	/* explicitly disable charging */
 	vote(chip->disable_votable, USER_VOTER, true, 0);
+	vote(chip->hvdcp_hw_inov_dis_votable, CP_VOTER, false, 0);
 	cancel_work(&chip->taper_work);
 	cancel_work(&chip->status_change_work);
+	wakeup_source_unregister(chip->cp_ws);
 	smb1390_destroy_votables(chip);
 	return 0;
 }
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 8756186..23070e6 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -22,8 +22,10 @@
 #include "smb5-lib.h"
 #include "smb5-reg.h"
 #include "battery.h"
+#include "schgm-flash.h"
 #include "step-chg-jeita.h"
 #include "storm-watch.h"
+#include "schgm-flash.h"
 
 #define smblib_err(chg, fmt, ...)		\
 	pr_err("%s: %s: " fmt, chg->name,	\
@@ -39,6 +41,10 @@
 				__func__, ##__VA_ARGS__);	\
 	} while (0)
 
+#define typec_rp_med_high(chg, typec_mode)			\
+	((typec_mode == POWER_SUPPLY_TYPEC_SOURCE_MEDIUM	\
+	|| typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH)	\
+	&& !chg->typec_legacy)
 
 int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
 {
@@ -112,6 +118,19 @@
 	return 0;
 }
 
+int smblib_icl_override(struct smb_charger *chg, bool override)
+{
+	int rc;
+
+	rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
+				ICL_OVERRIDE_AFTER_APSD_BIT,
+				override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+
+	return rc;
+}
+
 int smblib_stat_sw_override_cfg(struct smb_charger *chg, bool override)
 {
 	int rc = 0;
@@ -303,6 +322,25 @@
 	return result;
 }
 
+#define AICL_RANGE2_MIN_MV		5600
+#define AICL_RANGE2_STEP_DELTA_MV	200
+#define AICL_RANGE2_OFFSET		16
+int smblib_get_aicl_cont_threshold(struct smb_chg_param *param, u8 val_raw)
+{
+	int base = param->min_u;
+	u8 reg = val_raw;
+	int step = param->step_u;
+
+
+	if (val_raw >= AICL_RANGE2_OFFSET) {
+		reg = val_raw - AICL_RANGE2_OFFSET;
+		base = AICL_RANGE2_MIN_MV;
+		step = AICL_RANGE2_STEP_DELTA_MV;
+	}
+
+	return base + (reg * step);
+}
+
 /********************
  * REGISTER SETTERS *
  ********************/
@@ -455,6 +493,23 @@
 {
 	int rc = 0;
 
+	/* PMI632 only support max. 9V */
+	if (chg->smb_version == PMI632_SUBTYPE) {
+		switch (allowed_voltage) {
+		case USBIN_ADAPTER_ALLOW_12V:
+		case USBIN_ADAPTER_ALLOW_9V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+			break;
+		case USBIN_ADAPTER_ALLOW_5V_OR_12V:
+		case USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_5V_OR_9V;
+			break;
+		case USBIN_ADAPTER_ALLOW_5V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+			break;
+		}
+	}
+
 	rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n",
@@ -505,9 +560,49 @@
 	return rc;
 }
 
+int smblib_set_aicl_cont_threshold(struct smb_chg_param *param,
+				int val_u, u8 *val_raw)
+{
+	int base = param->min_u;
+	int offset = 0;
+	int step = param->step_u;
+
+	if (val_u > param->max_u)
+		val_u = param->max_u;
+	if (val_u < param->min_u)
+		val_u = param->min_u;
+
+	if (val_u >= AICL_RANGE2_MIN_MV) {
+		base = AICL_RANGE2_MIN_MV;
+		step = AICL_RANGE2_STEP_DELTA_MV;
+		offset = AICL_RANGE2_OFFSET;
+	};
+
+	*val_raw = ((val_u - base) / step) + offset;
+
+	return 0;
+}
+
 /********************
  * HELPER FUNCTIONS *
  ********************/
+int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable)
+{
+	int rc;
+	u8 mask = HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT;
+
+	if (chg->pd_not_supported)
+		return 0;
+
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, mask,
+						enable ? mask : 0);
+	if (rc < 0)
+		smblib_err(chg, "failed to write USBIN_OPTIONS_1_CFG rc=%d\n",
+				rc);
+
+	return rc;
+}
+
 static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
 {
 	int rc = 0;
@@ -654,7 +749,6 @@
 	return 0;
 }
 
-#define USBIN_100MA	100000
 static void smblib_uusb_removal(struct smb_charger *chg)
 {
 	int rc;
@@ -663,10 +757,6 @@
 
 	cancel_delayed_work_sync(&chg->pl_enable_work);
 
-	rc = smblib_request_dpdm(chg, false);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't to disable DPDM rc=%d\n", rc);
-
 	if (chg->wa_flags & BOOST_BACK_WA) {
 		data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
 		if (data) {
@@ -683,6 +773,8 @@
 	/* reset both usbin current and voltage votes */
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+	vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+			is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA);
 	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
 
 	/* reconfigure allowed voltage for HVDCP */
@@ -705,8 +797,6 @@
 		smblib_err(chg, "Couldn't write float charger options rc=%d\n",
 			rc);
 
-	/* leave the ICL configured to 100mA for next insertion */
-	vote(chg->usb_icl_votable, DEFAULT_100MA_VOTER, true, USBIN_100MA);
 	/* clear USB ICL vote for USB_PSY_VOTER */
 	rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
 	if (rc < 0)
@@ -772,6 +862,7 @@
 }
 
 #define USBIN_25MA	25000
+#define USBIN_100MA	100000
 #define USBIN_150MA	150000
 #define USBIN_500MA	500000
 #define USBIN_900MA	900000
@@ -854,7 +945,7 @@
 int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 {
 	int rc = 0;
-	bool hc_mode = false;
+	bool hc_mode = false, override = false;
 
 	/* suspend and return if 25mA or less is requested */
 	if (icl_ua <= USBIN_25MA)
@@ -864,9 +955,10 @@
 		goto set_mode;
 
 	/* configure current */
-	if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
-		|| (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
-		&& (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)) {
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB
+		&& (chg->typec_legacy
+		|| chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+		|| chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)) {
 		rc = set_sdp_current(chg, icl_ua);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't set SDP ICL rc=%d\n", rc);
@@ -880,11 +972,28 @@
 			goto out;
 		}
 		hc_mode = true;
+
+		/*
+		 * Micro USB mode follows ICL register independent of override
+		 * bit, configure override only for typeC mode.
+		 */
+		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC)
+			override = true;
 	}
 
 set_mode:
 	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
 		USBIN_MODE_CHG_BIT, hc_mode ? USBIN_MODE_CHG_BIT : 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set USBIN_ICL_OPTIONS rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = smblib_icl_override(chg, override);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
+		goto out;
+	}
 
 	/* unsuspend after configuring current and override */
 	rc = smblib_set_usb_suspend(chg, false);
@@ -895,7 +1004,7 @@
 
 	/* Re-run AICL */
 	if (chg->real_charger_type != POWER_SUPPLY_TYPE_USB)
-		rc = smblib_rerun_aicl(chg);
+		rc = smblib_run_aicl(chg, RERUN_AICL);
 out:
 	return rc;
 }
@@ -925,7 +1034,8 @@
 			return INT_MAX;
 
 		/* override is set */
-		rc = smblib_get_charge_param(chg, &chg->param.usb_icl, icl_ua);
+		rc = smblib_get_charge_param(chg, &chg->param.icl_max_stat,
+					icl_ua);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't get HC ICL rc=%d\n", rc);
 			return rc;
@@ -954,18 +1064,6 @@
 	return smblib_set_dc_suspend(chg, (bool)suspend);
 }
 
-static int smblib_pd_disallowed_votable_indirect_callback(
-	struct votable *votable, void *data, int disallowed, const char *client)
-{
-	struct smb_charger *chg = data;
-	int rc;
-
-	rc = vote(chg->pd_allowed_votable, PD_DISALLOWED_INDIRECT_VOTER,
-		!disallowed, 0);
-
-	return rc;
-}
-
 static int smblib_awake_vote_callback(struct votable *votable, void *data,
 			int awake, const char *client)
 {
@@ -1459,6 +1557,19 @@
 	return rc;
 }
 
+int smblib_get_prop_batt_cycle_count(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_CYCLE_COUNT, val);
+	return rc;
+}
+
 /***********************
  * BATTERY PSY SETTERS *
  ***********************/
@@ -1548,7 +1659,7 @@
 	return 0;
 }
 
-int smblib_rerun_aicl(struct smb_charger *chg)
+int smblib_run_aicl(struct smb_charger *chg, int type)
 {
 	int rc;
 	u8 stat;
@@ -1566,8 +1677,8 @@
 
 	smblib_dbg(chg, PR_MISC, "re-running AICL\n");
 
-	rc = smblib_masked_write(chg, AICL_CMD_REG, RERUN_AICL_BIT,
-				RERUN_AICL_BIT);
+	stat = (type == RERUN_AICL) ? RERUN_AICL_BIT : RESTART_AICL_BIT;
+	rc = smblib_masked_write(chg, AICL_CMD_REG, stat, stat);
 	if (rc < 0)
 		smblib_err(chg, "Couldn't write to AICL_CMD_REG rc=%d\n",
 				rc);
@@ -1827,6 +1938,28 @@
 	switch (chg->real_charger_type) {
 	case POWER_SUPPLY_TYPE_USB_HVDCP:
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+		if (chg->smb_version == PMI632_SUBTYPE)
+			val->intval = MICRO_9V;
+		else
+			val->intval = MICRO_12V;
+		break;
+	case POWER_SUPPLY_TYPE_USB_PD:
+		val->intval = chg->voltage_max_uv;
+		break;
+	default:
+		val->intval = MICRO_5V;
+		break;
+	}
+
+	return 0;
+}
+
+int smblib_get_prop_usb_voltage_max_design(struct smb_charger *chg,
+					union power_supply_propval *val)
+{
+	switch (chg->real_charger_type) {
+	case POWER_SUPPLY_TYPE_USB_HVDCP:
+	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
 	case POWER_SUPPLY_TYPE_USB_PD:
 		if (chg->smb_version == PMI632_SUBTYPE)
 			val->intval = MICRO_9V;
@@ -1989,13 +2122,6 @@
 	return rc;
 }
 
-int smblib_get_prop_pd_allowed(struct smb_charger *chg,
-			       union power_supply_propval *val)
-{
-	val->intval = get_effective_result(chg->pd_allowed_votable);
-	return 0;
-}
-
 int smblib_get_prop_input_current_settled(struct smb_charger *chg,
 					  union power_supply_propval *val)
 {
@@ -2039,13 +2165,7 @@
 int smblib_get_pe_start(struct smb_charger *chg,
 			       union power_supply_propval *val)
 {
-	/*
-	 * hvdcp timeout voter is the last one to allow pd. Use its vote
-	 * to indicate start of pe engine
-	 */
-	val->intval
-		= !get_client_vote_locked(chg->pd_disallowed_votable_indirect,
-			APSD_VOTER);
+	val->intval = chg->ok_to_pd;
 	return 0;
 }
 
@@ -2055,7 +2175,7 @@
 	int rc;
 	u8 stat;
 
-	rc = smblib_read(chg, TEMP_RANGE_STATUS_REG, &stat);
+	rc = smblib_read(chg, MISC_TEMP_RANGE_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read TEMP_RANGE_STATUS_REG rc=%d\n",
 									rc);
@@ -2083,13 +2203,6 @@
 	return 0;
 }
 
-#define SDP_CURRENT_UA			500000
-#define CDP_CURRENT_UA			1500000
-#define DCP_CURRENT_UA			1500000
-#define HVDCP_CURRENT_UA		3000000
-#define TYPEC_DEFAULT_CURRENT_UA	900000
-#define TYPEC_MEDIUM_CURRENT_UA		1500000
-#define TYPEC_HIGH_CURRENT_UA		3000000
 static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
 {
 	int rp_ua;
@@ -2129,27 +2242,44 @@
 					int usb_current)
 {
 	int rc = 0, rp_ua, typec_mode;
+	union power_supply_propval val = {0, };
 
 	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
 		if (usb_current == -ETIMEDOUT) {
-			/*
-			 * Valid FLOAT charger, report the current based
-			 * of Rp
-			 */
+			if ((chg->float_cfg & FLOAT_OPTIONS_MASK)
+						== FORCE_FLOAT_SDP_CFG_BIT) {
+				/*
+				 * Confiugure USB500 mode if Float charger is
+				 * configured for SDP.
+				 */
+				rc = set_sdp_current(chg, USBIN_500MA);
+				if (rc < 0)
+					smblib_err(chg,
+						"Couldn't set SDP ICL rc=%d\n",
+						rc);
+
+				return rc;
+			}
+
 			if (chg->connector_type ==
 					POWER_SUPPLY_CONNECTOR_TYPEC) {
+				/*
+				 * Valid FLOAT charger, report the current
+				 * based of Rp.
+				 */
 				typec_mode = smblib_get_prop_typec_mode(chg);
 				rp_ua = get_rp_based_dcp_current(chg,
 								typec_mode);
 				rc = vote(chg->usb_icl_votable,
-						DYNAMIC_RP_VOTER, true, rp_ua);
-				if (rc < 0) {
-					pr_err("Couldn't vote ICL DYNAMIC_RP_VOTER rc=%d\n",
-							rc);
+					SW_ICL_MAX_VOTER, true, rp_ua);
+				if (rc < 0)
 					return rc;
-				}
+			} else {
+				rc = vote(chg->usb_icl_votable,
+					SW_ICL_MAX_VOTER, true, DCP_CURRENT_UA);
+				if (rc < 0)
+					return rc;
 			}
-			/* No specific handling required for micro-USB */
 		} else {
 			/*
 			 * FLOAT charger detected as SDP by USB driver,
@@ -2158,27 +2288,36 @@
 			 */
 			chg->real_charger_type = POWER_SUPPLY_TYPE_USB;
 			rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
-							true, usb_current);
-			if (rc < 0) {
-				pr_err("Couldn't vote ICL USB_PSY_VOTER rc=%d\n",
-						rc);
+						true, usb_current);
+			if (rc < 0)
 				return rc;
-			}
+			rc = vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER,
+							false, 0);
+			if (rc < 0)
+				return rc;
 		}
 	} else {
-		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
-					true, usb_current);
+		rc = smblib_get_prop_usb_present(chg, &val);
+		if (!rc && !val.intval)
+			return 0;
+
+		/* if flash is active force 500mA */
+		if ((usb_current < SDP_CURRENT_UA) && is_flash_active(chg))
+			usb_current = SDP_CURRENT_UA;
+
+		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, true,
+							usb_current);
 		if (rc < 0) {
 			pr_err("Couldn't vote ICL USB_PSY_VOTER rc=%d\n", rc);
 			return rc;
 		}
 
-	}
+		rc = vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, false, 0);
+		if (rc < 0) {
+			pr_err("Couldn't remove SW_ICL_MAX vote rc=%d\n", rc);
+			return rc;
+		}
 
-	rc = vote(chg->usb_icl_votable, DEFAULT_100MA_VOTER, false, 0);
-	if (rc < 0) {
-		pr_err("Couldn't unvote ICL DEFAULT_100MA_VOTER rc=%d\n", rc);
-		return rc;
 	}
 
 	return 0;
@@ -2298,14 +2437,14 @@
 	return rc;
 }
 
-static int __smblib_set_prop_pd_active(struct smb_charger *chg, bool pd_active)
+int smblib_set_prop_pd_active(struct smb_charger *chg,
+				const union power_supply_propval *val)
 {
 	int rc = 0;
 
-	chg->pd_active = pd_active;
+	chg->pd_active = val->intval;
 
 	if (chg->pd_active) {
-		vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
 		vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
 
 		/*
@@ -2313,24 +2452,24 @@
 		 * It is guaranteed that pd_active is set prior to
 		 * pd_current_max
 		 */
-		rc = vote(chg->usb_icl_votable, PD_VOTER, true, USBIN_500MA);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't vote for USB ICL rc=%d\n",
-									rc);
-
-		/* clear USB ICL vote for DCP_VOTER */
-		rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't un-vote DCP from USB ICL rc=%d\n",
-									rc);
-
-		/* remove USB_PSY_VOTER */
-		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
+		vote(chg->usb_icl_votable, PD_VOTER, true, USBIN_500MA);
+		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, false, 0);
 	} else {
-		vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
+		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA);
+		vote(chg->usb_icl_votable, PD_VOTER, false, 0);
 		vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
+
+		/* PD hard resets failed, rerun apsd */
+		if (chg->ok_to_pd) {
+			chg->ok_to_pd = false;
+			rc = smblib_configure_hvdcp_apsd(chg, true);
+			if (rc < 0) {
+				dev_err(chg->dev,
+					"Couldn't enable APSD rc=%d\n", rc);
+				return rc;
+			}
+			smblib_rerun_apsd_if_required(chg);
+		}
 	}
 
 	smblib_update_usb_type(chg);
@@ -2338,15 +2477,6 @@
 	return rc;
 }
 
-int smblib_set_prop_pd_active(struct smb_charger *chg,
-			      const union power_supply_propval *val)
-{
-	if (!get_effective_result(chg->pd_allowed_votable))
-		return -EINVAL;
-
-	return __smblib_set_prop_pd_active(chg, val->intval);
-}
-
 int smblib_set_prop_ship_mode(struct smb_charger *chg,
 				const union power_supply_propval *val)
 {
@@ -2604,13 +2734,79 @@
 	return IRQ_HANDLED;
 }
 
+#define AICL_STEP_MV		200
+#define MAX_AICL_THRESHOLD_MV	4800
 irqreturn_t usbin_uv_irq_handler(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
 	struct smb_charger *chg = irq_data->parent_data;
 	struct storm_watch *wdata;
+	int rc;
 
 	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if ((chg->wa_flags & WEAK_ADAPTER_WA)
+			&& is_storming(&irq_data->storm_data)) {
+
+		if (chg->aicl_max_reached) {
+			smblib_dbg(chg, PR_MISC,
+					"USBIN_UV storm at max AICL threshold\n");
+			return IRQ_HANDLED;
+		}
+
+		smblib_dbg(chg, PR_MISC, "USBIN_UV storm at threshold %d\n",
+				chg->aicl_5v_threshold_mv);
+
+		/* suspend USBIN before updating AICL threshold */
+		vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER, true, 0);
+
+		/* delay for VASHDN deglitch */
+		msleep(20);
+
+		if (chg->aicl_5v_threshold_mv > MAX_AICL_THRESHOLD_MV) {
+			/* reached max AICL threshold */
+			chg->aicl_max_reached = true;
+			goto unsuspend_input;
+		}
+
+		/* Increase AICL threshold by 200mV */
+		rc = smblib_set_charge_param(chg, &chg->param.aicl_5v_threshold,
+				chg->aicl_5v_threshold_mv + AICL_STEP_MV);
+		if (rc < 0)
+			dev_err(chg->dev,
+				"Error in setting AICL threshold rc=%d\n", rc);
+		else
+			chg->aicl_5v_threshold_mv += AICL_STEP_MV;
+
+		rc = smblib_set_charge_param(chg,
+				&chg->param.aicl_cont_threshold,
+				chg->aicl_cont_threshold_mv + AICL_STEP_MV);
+		if (rc < 0)
+			dev_err(chg->dev,
+				"Error in setting AICL threshold rc=%d\n", rc);
+		else
+			chg->aicl_cont_threshold_mv += AICL_STEP_MV;
+
+unsuspend_input:
+		if (chg->smb_version == PMI632_SUBTYPE)
+			schgm_flash_torch_priority(chg, TORCH_BOOST_MODE);
+
+		if (chg->aicl_max_reached) {
+			smblib_dbg(chg, PR_MISC,
+				"Reached max AICL threshold resctricting ICL to 100mA\n");
+			vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER,
+					true, USBIN_100MA);
+			smblib_run_aicl(chg, RESTART_AICL);
+		} else {
+			smblib_run_aicl(chg, RESTART_AICL);
+			vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER,
+					false, 0);
+		}
+
+		wdata = &chg->irq_info[USBIN_UV_IRQ].irq_data->storm_data;
+		reset_storm_count(wdata);
+	}
+
 	if (!chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data)
 		return IRQ_HANDLED;
 
@@ -2629,6 +2825,24 @@
 	struct smb_charger *chg = irq_data->parent_data;
 
 	if (chg->mode == PARALLEL_MASTER) {
+		/*
+		 * Ignore if change in ICL is due to DIE temp mitigation.
+		 * This is to prevent any further ICL split.
+		 */
+		if (chg->hw_die_temp_mitigation) {
+			rc = smblib_read(chg, MISC_DIE_TEMP_STATUS_REG, &stat);
+			if (rc < 0) {
+				smblib_err(chg,
+					"Couldn't read DIE_TEMP rc=%d\n", rc);
+				return IRQ_HANDLED;
+			}
+			if (stat & (DIE_TEMP_UB_BIT | DIE_TEMP_LB_BIT)) {
+				smblib_dbg(chg, PR_PARALLEL,
+					"skip ICL change DIE_TEMP %x\n", stat);
+				return IRQ_HANDLED;
+			}
+		}
+
 		rc = smblib_read(chg, AICL_STATUS_REG, &stat);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n",
@@ -2657,11 +2871,7 @@
 
 static void smblib_micro_usb_plugin(struct smb_charger *chg, bool vbus_rising)
 {
-	if (vbus_rising) {
-		/* use the typec flag even though its not typec */
-		chg->typec_present = 1;
-	} else {
-		chg->typec_present = 0;
+	if (!vbus_rising) {
 		smblib_update_usb_type(chg);
 		smblib_notify_device_mode(chg, false);
 		smblib_uusb_removal(chg);
@@ -2746,9 +2956,38 @@
 			}
 		}
 
+		if (chg->wa_flags & WEAK_ADAPTER_WA) {
+			chg->aicl_5v_threshold_mv =
+					chg->default_aicl_5v_threshold_mv;
+			chg->aicl_cont_threshold_mv =
+					chg->default_aicl_cont_threshold_mv;
+
+			smblib_set_charge_param(chg,
+					&chg->param.aicl_5v_threshold,
+					chg->aicl_5v_threshold_mv);
+			smblib_set_charge_param(chg,
+					&chg->param.aicl_cont_threshold,
+					chg->aicl_cont_threshold_mv);
+			chg->aicl_max_reached = false;
+
+			if (chg->smb_version == PMI632_SUBTYPE)
+				schgm_flash_torch_priority(chg,
+						TORCH_BUCK_MODE);
+
+			data = chg->irq_info[USBIN_UV_IRQ].irq_data;
+			if (data) {
+				wdata = &data->storm_data;
+				reset_storm_count(wdata);
+			}
+			vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER,
+					false, 0);
+		}
+
 		rc = smblib_request_dpdm(chg, false);
 		if (rc < 0)
 			smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
+
+		smblib_update_usb_type(chg);
 	}
 
 	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
@@ -2764,12 +3003,10 @@
 	struct smb_irq_data *irq_data = data;
 	struct smb_charger *chg = irq_data->parent_data;
 
-	mutex_lock(&chg->lock);
 	if (chg->pd_hard_reset)
 		smblib_usb_plugin_hard_reset_locked(chg);
 	else
 		smblib_usb_plugin_locked(chg);
-	mutex_unlock(&chg->lock);
 
 	return IRQ_HANDLED;
 }
@@ -2858,8 +3095,6 @@
 	if (!rising)
 		return;
 
-	vote(chg->pd_disallowed_votable_indirect, APSD_VOTER, false, 0);
-
 	if (chg->mode == PARALLEL_MASTER)
 		vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
 
@@ -2872,33 +3107,18 @@
 static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
 					      bool rising, bool qc_charger)
 {
-	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
-
-	/* Hold off PD only until hvdcp 2.0 detection timeout */
 	if (rising) {
 
-		/* enable HDC and ICL irq for QC2/3 charger */
-		if (qc_charger)
+		if (qc_charger) {
+			/* enable HDC and ICL irq for QC2/3 charger */
 			vote(chg->usb_irq_enable_votable, QC_VOTER, true, 0);
-		else
-			vote(chg->pd_disallowed_votable_indirect, APSD_VOTER,
-							false, 0);
-
-		/*
-		 * HVDCP detection timeout done
-		 * If adapter is not QC2.0/QC3.0 - it is a plain old DCP.
-		 */
-		if (!qc_charger && (apsd_result->bit & DCP_CHARGER_BIT))
-			/* enforce DCP ICL if specified */
+			vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+				HVDCP_CURRENT_UA);
+		} else {
+			/* A plain DCP, enforce DCP ICL if specified */
 			vote(chg->usb_icl_votable, DCP_VOTER,
 				chg->dcp_icl_ua != -EINVAL, chg->dcp_icl_ua);
-
-		/*
-		 * if pd is not allowed, then set pd_active = false right here,
-		 * so that it starts the hvdcp engine
-		 */
-		if (!get_effective_result(chg->pd_allowed_votable))
-			__smblib_set_prop_pd_active(chg, 0);
+		}
 	}
 
 	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s %s\n", __func__,
@@ -2913,6 +3133,72 @@
 		   rising ? "rising" : "falling");
 }
 
+static void update_sw_icl_max(struct smb_charger *chg, int pst)
+{
+	int typec_mode;
+	int rp_ua;
+
+	/* while PD is active it should have complete ICL control */
+	if (chg->pd_active)
+		return;
+
+	/*
+	 * HVDCP 2/3, handled separately
+	 * For UNKNOWN(input not present) return without updating ICL
+	 */
+	if (pst == POWER_SUPPLY_TYPE_USB_HVDCP
+			|| pst == POWER_SUPPLY_TYPE_USB_HVDCP_3
+			|| pst == POWER_SUPPLY_TYPE_UNKNOWN)
+		return;
+
+	/* TypeC rp med or high, use rp value */
+	typec_mode = smblib_get_prop_typec_mode(chg);
+	if (typec_rp_med_high(chg, typec_mode)) {
+		rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, rp_ua);
+		return;
+	}
+
+	/* rp-std or legacy, USB BC 1.2 */
+	switch (pst) {
+	case POWER_SUPPLY_TYPE_USB:
+		/*
+		 * USB_PSY will vote to increase the current to 500/900mA once
+		 * enumeration is done.
+		 */
+		if (!is_client_vote_enabled(chg->usb_icl_votable,
+						USB_PSY_VOTER)) {
+			/* if flash is active force 500mA */
+			vote(chg->usb_icl_votable, USB_PSY_VOTER, true,
+					is_flash_active(chg) ?
+					SDP_CURRENT_UA : SDP_100_MA);
+		}
+		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, false, 0);
+		break;
+	case POWER_SUPPLY_TYPE_USB_CDP:
+		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+					CDP_CURRENT_UA);
+		break;
+	case POWER_SUPPLY_TYPE_USB_DCP:
+		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+					DCP_CURRENT_UA);
+		break;
+	case POWER_SUPPLY_TYPE_USB_FLOAT:
+		/*
+		 * limit ICL to 100mA, the USB driver will enumerate to check
+		 * if this is a SDP and appropriately set the current
+		 */
+		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+					SDP_100_MA);
+		break;
+	default:
+		smblib_err(chg, "Unknown APSD %d; forcing 500mA\n", pst);
+		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+					SDP_CURRENT_UA);
+		break;
+	}
+}
+
 static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
 {
 	const struct apsd_result *apsd_result;
@@ -2922,25 +3208,18 @@
 
 	apsd_result = smblib_update_usb_type(chg);
 
+	update_sw_icl_max(chg, apsd_result->pst);
+
 	switch (apsd_result->bit) {
 	case SDP_CHARGER_BIT:
 	case CDP_CHARGER_BIT:
 	case FLOAT_CHARGER_BIT:
-		/* if not DCP then no hvdcp timeout happens. Enable pd here */
-		vote(chg->pd_disallowed_votable_indirect, APSD_VOTER,
-				false, 0);
 		if ((chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 				|| chg->use_extcon)
 			smblib_notify_device_mode(chg, true);
 		break;
 	case OCP_CHARGER_BIT:
-		vote(chg->usb_icl_votable, DEFAULT_100MA_VOTER, false, 0);
-		/* if not DCP then no hvdcp timeout happens, Enable pd here. */
-		vote(chg->pd_disallowed_votable_indirect, APSD_VOTER,
-				false, 0);
-		break;
 	case DCP_CHARGER_BIT:
-		vote(chg->usb_icl_votable, DEFAULT_100MA_VOTER, false, 0);
 		break;
 	default:
 		break;
@@ -2957,6 +3236,14 @@
 	int rc = 0;
 	u8 stat;
 
+	/*
+	 * Prepared to run PD or PD is active. At this moment, APSD is disabled,
+	 * but there still can be irq on apsd_done from previously unfinished
+	 * APSD run, skip it.
+	 */
+	if (chg->ok_to_pd)
+		return IRQ_HANDLED;
+
 	rc = smblib_read(chg, APSD_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
@@ -2972,7 +3259,7 @@
 		 * charger-mis-detection.
 		 */
 		chg->uusb_apsd_rerun_done = true;
-		smblib_rerun_apsd(chg);
+		smblib_rerun_apsd_if_required(chg);
 		return IRQ_HANDLED;
 	}
 
@@ -3011,32 +3298,70 @@
 
 static void typec_sink_insertion(struct smb_charger *chg)
 {
-	/* when a sink is inserted we should not wait on hvdcp timeout to
-	 * enable pd
-	 */
-	vote(chg->pd_disallowed_votable_indirect, APSD_VOTER, false, 0);
+	vote(chg->usb_icl_votable, OTG_VOTER, true, 0);
+
 	if (chg->use_extcon) {
 		smblib_notify_usb_host(chg, true);
 		chg->otg_present = true;
 	}
+
+	if (!chg->pr_swap_in_progress)
+		chg->ok_to_pd = (!(*chg->pd_disabled) || chg->early_usb_attach)
+					&& !chg->pd_not_supported;
+}
+
+static void typec_src_insertion(struct smb_charger *chg)
+{
+	int rc = 0;
+	u8 stat;
+
+	if (chg->pr_swap_in_progress)
+		return;
+
+	rc = smblib_read(chg, LEGACY_CABLE_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATE_MACHINE_STATUS_REG rc=%d\n",
+			rc);
+		return;
+	}
+
+	chg->typec_legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
+	chg->ok_to_pd = (!(chg->typec_legacy || *chg->pd_disabled)
+			|| chg->early_usb_attach) && !chg->pd_not_supported;
+	if (!chg->ok_to_pd) {
+		rc = smblib_configure_hvdcp_apsd(chg, true);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't enable APSD rc=%d\n", rc);
+			return;
+		}
+		smblib_rerun_apsd_if_required(chg);
+	}
 }
 
 static void typec_sink_removal(struct smb_charger *chg)
 {
-	smblib_set_charge_param(chg, &chg->param.freq_switcher,
-			chg->chg_freq.freq_above_otg_threshold);
-	chg->boost_current_ua = 0;
+	vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
+
+	if (chg->use_extcon) {
+		if (chg->otg_present)
+			smblib_notify_usb_host(chg, false);
+		chg->otg_present = false;
+	}
 }
 
-static void smblib_handle_typec_removal(struct smb_charger *chg)
+static void typec_src_removal(struct smb_charger *chg)
 {
 	int rc;
 	struct smb_irq_data *data;
 	struct storm_watch *wdata;
 
-	rc = smblib_request_dpdm(chg, false);
+	/* disable apsd */
+	rc = smblib_configure_hvdcp_apsd(chg, false);
 	if (rc < 0)
-		smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
+		smblib_err(chg, "Couldn't disable APSD rc=%d\n", rc);
+
+	smblib_update_usb_type(chg);
 
 	if (chg->wa_flags & BOOST_BACK_WA) {
 		data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
@@ -3052,7 +3377,8 @@
 	cancel_delayed_work_sync(&chg->pl_enable_work);
 
 	/* reset input current limit voters */
-	vote(chg->usb_icl_votable, DEFAULT_100MA_VOTER, true, USBIN_100MA);
+	vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+			is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA);
 	vote(chg->usb_icl_votable, PD_VOTER, false, 0);
 	vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
 	vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
@@ -3060,12 +3386,6 @@
 	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
 	vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
 	vote(chg->usb_icl_votable, CTM_VOTER, false, 0);
-	vote(chg->usb_icl_votable, DYNAMIC_RP_VOTER, false, 0);
-
-	/* reset power delivery voters */
-	vote(chg->pd_allowed_votable, PD_VOTER, false, 0);
-	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
-	vote(chg->pd_disallowed_votable_indirect, APSD_VOTER, true, 0);
 
 	/* reset usb irq voters */
 	vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
@@ -3078,14 +3398,10 @@
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
 	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
 
-	chg->vconn_attempts = 0;
-	chg->otg_attempts = 0;
 	chg->pulse_cnt = 0;
 	chg->usb_icl_delta_ua = 0;
 	chg->voltage_min_uv = MICRO_5V;
 	chg->voltage_max_uv = MICRO_5V;
-	chg->pd_active = 0;
-	chg->pd_hard_reset = 0;
 
 	/* write back the default FLOAT charger configuration */
 	rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
@@ -3094,12 +3410,6 @@
 		smblib_err(chg, "Couldn't write float charger options rc=%d\n",
 			rc);
 
-	/* reset back to 103mS tCC debounce */
-	rc = smblib_masked_write(chg, TYPE_C_DEBOUNCE_OPTION_REG,
-					REDUCE_TCCDEBOUNCE_TO_2MS_BIT, 0);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't set 120mS tCC debounce rc=%d\n", rc);
-
 	/* reconfigure allowed voltage for HVDCP */
 	rc = smblib_set_adapter_allowance(chg,
 			USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
@@ -3107,157 +3417,35 @@
 		smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
 			rc);
 
-	/* enable DRP */
-	rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
-				 TYPEC_POWER_ROLE_CMD_MASK, 0);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
+	if (chg->use_extcon)
+		smblib_notify_device_mode(chg, false);
 
-	/* HW controlled CC_OUT */
-	rc = smblib_masked_write(chg, TYPE_C_CCOUT_CONTROL_REG,
-				TYPEC_CCOUT_SRC_BIT, 0);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n", rc);
-
-	/* clear exit sink based on cc */
-	rc = smblib_masked_write(chg, TYPE_C_EXIT_STATE_CFG_REG,
-						EXIT_SNK_BASED_ON_CC_BIT, 0);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't clear exit_sink_based_on_cc rc=%d\n",
-				rc);
-
-	typec_sink_removal(chg);
-	smblib_update_usb_type(chg);
-
-	if (chg->use_extcon) {
-		if (chg->otg_present)
-			smblib_notify_usb_host(chg, false);
-		else
-			smblib_notify_device_mode(chg, false);
-	}
-	chg->otg_present = false;
-}
-
-static void smblib_handle_typec_insertion(struct smb_charger *chg)
-{
-	int rc;
-	u8 stat;
-
-	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0);
-
-	rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
-		return;
-	}
-
-	if (stat & SNK_SRC_MODE_BIT) {
-		typec_sink_insertion(chg);
-	} else {
-		rc = smblib_request_dpdm(chg, true);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
-		typec_sink_removal(chg);
-	}
+	chg->typec_legacy = false;
 }
 
 static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
 {
-	int rp_ua;
 	const struct apsd_result *apsd = smblib_get_apsd_result(chg);
 
-	if ((apsd->pst != POWER_SUPPLY_TYPE_USB_DCP)
-		&& (apsd->pst != POWER_SUPPLY_TYPE_USB_FLOAT))
-		return;
-
-	/*
-	 * if APSD indicates FLOAT and the USB stack had detected SDP,
-	 * do not respond to Rp changes as we do not confirm that its
-	 * a legacy cable
-	 */
-	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
-		return;
 	/*
 	 * We want the ICL vote @ 100mA for a FLOAT charger
 	 * until the detection by the USB stack is complete.
 	 * Ignore the Rp changes unless there is a
-	 * pre-existing valid vote.
+	 * pre-existing valid vote or FLOAT is configured for
+	 * SDP current.
 	 */
-	if (apsd->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
-		(get_client_vote(chg->usb_icl_votable, DEFAULT_100MA_VOTER)
-					<= USBIN_100MA))
+	if (apsd->pst == POWER_SUPPLY_TYPE_USB_FLOAT) {
+		if (get_client_vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER)
+					<= USBIN_100MA
+			|| (chg->float_cfg & FLOAT_OPTIONS_MASK)
+					== FORCE_FLOAT_SDP_CFG_BIT)
 		return;
+	}
 
-	/*
-	 * handle Rp change for DCP/FLOAT/OCP.
-	 * Update the current only if the Rp is different from
-	 * the last Rp value.
-	 */
+	update_sw_icl_max(chg, apsd->pst);
+
 	smblib_dbg(chg, PR_MISC, "CC change old_mode=%d new_mode=%d\n",
 						chg->typec_mode, typec_mode);
-
-	rp_ua = get_rp_based_dcp_current(chg, typec_mode);
-	vote(chg->usb_icl_votable, DYNAMIC_RP_VOTER, true, rp_ua);
-}
-
-static void smblib_handle_typec_cc_state_change(struct smb_charger *chg)
-{
-	u8 stat;
-	int typec_mode, rc;
-
-	if (chg->pr_swap_in_progress)
-		return;
-
-	typec_mode = smblib_get_prop_typec_mode(chg);
-	if (chg->typec_present && (typec_mode != chg->typec_mode))
-		smblib_handle_rp_change(chg, typec_mode);
-
-	chg->typec_mode = typec_mode;
-
-	if (!chg->typec_present && chg->typec_mode != POWER_SUPPLY_TYPEC_NONE) {
-		chg->typec_present = true;
-		smblib_dbg(chg, PR_MISC, "TypeC %s insertion\n",
-			smblib_typec_mode_name[chg->typec_mode]);
-		smblib_handle_typec_insertion(chg);
-	} else if (chg->typec_present &&
-				chg->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
-		chg->typec_present = false;
-		smblib_dbg(chg, PR_MISC, "TypeC removal\n");
-		smblib_handle_typec_removal(chg);
-	}
-
-	rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
-		return;
-	}
-	/* suspend usb if sink */
-	if ((stat & SNK_SRC_MODE_BIT) && chg->typec_present)
-		vote(chg->usb_icl_votable, OTG_VOTER, true, 0);
-	else
-		vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
-
-	smblib_dbg(chg, PR_INTERRUPT, "IRQ: cc-state-change; Type-C %s detected\n",
-				smblib_typec_mode_name[chg->typec_mode]);
-}
-
-static void smblib_usb_typec_change(struct smb_charger *chg)
-{
-	int rc;
-	u8 stat;
-
-	smblib_handle_typec_cc_state_change(chg);
-
-	rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
-		return;
-	}
-
-	if (stat & TYPEC_VBUS_ERROR_STATUS_BIT)
-		smblib_dbg(chg, PR_INTERRUPT, "IRQ: vbus-error\n");
-
-	power_supply_changed(chg->usb_psy);
 }
 
 irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data)
@@ -3281,17 +3469,81 @@
 {
 	struct smb_irq_data *irq_data = data;
 	struct smb_charger *chg = irq_data->parent_data;
+	int typec_mode;
 
-	if ((chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
-			|| chg->pr_swap_in_progress) {
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) {
 		smblib_dbg(chg, PR_INTERRUPT,
-				"Ignoring since pr_swap_in_progress\n");
+				"Ignoring for micro USB\n");
 		return IRQ_HANDLED;
 	}
 
-	mutex_lock(&chg->lock);
-	smblib_usb_typec_change(chg);
-	mutex_unlock(&chg->lock);
+	typec_mode = smblib_get_prop_typec_mode(chg);
+	if (chg->sink_src_mode != UNATTACHED_MODE
+			&& (typec_mode != chg->typec_mode))
+		smblib_handle_rp_change(chg, typec_mode);
+	chg->typec_mode = typec_mode;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: cc-state-change; Type-C %s detected\n",
+				smblib_typec_mode_name[chg->typec_mode]);
+
+	power_supply_changed(chg->usb_psy);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t typec_attach_detach_irq_handler(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	u8 stat;
+	int rc;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	rc = smblib_read(chg, TYPE_C_STATE_MACHINE_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATE_MACHINE_STATUS_REG rc=%d\n",
+			rc);
+		return IRQ_HANDLED;
+	}
+
+	if (stat & TYPEC_ATTACH_DETACH_STATE_BIT) {
+		rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n",
+				rc);
+			return IRQ_HANDLED;
+		}
+
+		if (stat & SNK_SRC_MODE_BIT) {
+			chg->sink_src_mode = SRC_MODE;
+			typec_sink_insertion(chg);
+		} else {
+			chg->sink_src_mode = SINK_MODE;
+			typec_src_insertion(chg);
+		}
+
+	} else {
+		switch (chg->sink_src_mode) {
+		case SRC_MODE:
+			typec_sink_removal(chg);
+			break;
+		case SINK_MODE:
+			typec_src_removal(chg);
+			break;
+		default:
+			break;
+		}
+
+		if (!chg->pr_swap_in_progress) {
+			chg->ok_to_pd = false;
+			chg->sink_src_mode = UNATTACHED_MODE;
+			chg->early_usb_attach = false;
+		}
+	}
+
+	power_supply_changed(chg->usb_psy);
+
 	return IRQ_HANDLED;
 }
 
@@ -3426,19 +3678,55 @@
 				const union power_supply_propval *val)
 {
 	int rc;
+	u8 stat = 0, orientation;
 
 	chg->pr_swap_in_progress = val->intval;
 
-	/*
-	 * call the cc changed irq to handle real removals while
-	 * PR_SWAP was in progress
-	 */
-	smblib_usb_typec_change(chg);
 	rc = smblib_masked_write(chg, TYPE_C_DEBOUNCE_OPTION_REG,
 			REDUCE_TCCDEBOUNCE_TO_2MS_BIT,
 			val->intval ? REDUCE_TCCDEBOUNCE_TO_2MS_BIT : 0);
 	if (rc < 0)
 		smblib_err(chg, "Couldn't set tCC debounce rc=%d\n", rc);
+
+	rc = smblib_masked_write(chg, TYPE_C_EXIT_STATE_CFG_REG,
+			BYPASS_VSAFE0V_DURING_ROLE_SWAP_BIT,
+			val->intval ? BYPASS_VSAFE0V_DURING_ROLE_SWAP_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set exit state cfg rc=%d\n", rc);
+
+	if (chg->pr_swap_in_progress) {
+		rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+				rc);
+		}
+
+		orientation =
+			stat & CC_ORIENTATION_BIT ? TYPEC_CCOUT_VALUE_BIT : 0;
+		rc = smblib_masked_write(chg, TYPE_C_CCOUT_CONTROL_REG,
+			TYPEC_CCOUT_SRC_BIT | TYPEC_CCOUT_BUFFER_EN_BIT
+					| TYPEC_CCOUT_VALUE_BIT,
+			TYPEC_CCOUT_SRC_BIT | TYPEC_CCOUT_BUFFER_EN_BIT
+					| orientation);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read TYPE_C_CCOUT_CONTROL_REG rc=%d\n",
+				rc);
+		}
+	} else {
+		rc = smblib_masked_write(chg, TYPE_C_CCOUT_CONTROL_REG,
+			TYPEC_CCOUT_SRC_BIT, 0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read TYPE_C_CCOUT_CONTROL_REG rc=%d\n",
+				rc);
+		}
+
+		/* enable DRP */
+		rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, 0);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
+	}
+
 	return 0;
 }
 
@@ -3461,6 +3749,9 @@
 	otg = !!(stat & U_USB_GROUND_NOVBUS_BIT);
 	if (chg->otg_present != otg)
 		smblib_notify_usb_host(chg, otg);
+	else
+		goto out;
+
 	chg->otg_present = otg;
 	if (!otg)
 		chg->boost_current_ua = 0;
@@ -3681,23 +3972,6 @@
 		return rc;
 	}
 
-	chg->pd_disallowed_votable_indirect
-		= create_votable("PD_DISALLOWED_INDIRECT", VOTE_SET_ANY,
-			smblib_pd_disallowed_votable_indirect_callback, chg);
-	if (IS_ERR(chg->pd_disallowed_votable_indirect)) {
-		rc = PTR_ERR(chg->pd_disallowed_votable_indirect);
-		chg->pd_disallowed_votable_indirect = NULL;
-		return rc;
-	}
-
-	chg->pd_allowed_votable = create_votable("PD_ALLOWED",
-					VOTE_SET_ANY, NULL, NULL);
-	if (IS_ERR(chg->pd_allowed_votable)) {
-		rc = PTR_ERR(chg->pd_allowed_votable);
-		chg->pd_allowed_votable = NULL;
-		return rc;
-	}
-
 	chg->awake_votable = create_votable("AWAKE", VOTE_SET_ANY,
 					smblib_awake_vote_callback,
 					chg);
@@ -3735,10 +4009,6 @@
 		destroy_votable(chg->dc_suspend_votable);
 	if (chg->usb_icl_votable)
 		destroy_votable(chg->usb_icl_votable);
-	if (chg->pd_disallowed_votable_indirect)
-		destroy_votable(chg->pd_disallowed_votable_indirect);
-	if (chg->pd_allowed_votable)
-		destroy_votable(chg->pd_allowed_votable);
 	if (chg->awake_votable)
 		destroy_votable(chg->awake_votable);
 	if (chg->chg_disable_votable)
@@ -3750,7 +4020,6 @@
 	int rc = 0;
 
 	mutex_init(&chg->lock);
-	mutex_init(&chg->otg_oc_lock);
 	INIT_WORK(&chg->bms_update_work, bms_update_work);
 	INIT_WORK(&chg->pl_update_work, pl_update_work);
 	INIT_WORK(&chg->jeita_update_work, jeita_update_work);
@@ -3763,6 +4032,7 @@
 	chg->fake_input_current_limited = -EINVAL;
 	chg->fake_batt_status = -EINVAL;
 	chg->jeita_configured = false;
+	chg->sink_src_mode = UNATTACHED_MODE;
 
 	switch (chg->mode) {
 	case PARALLEL_MASTER:
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 5d6d3a6..e59b11b 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -56,6 +56,7 @@
 #define CTM_VOTER			"CTM_VOTER"
 #define SW_QC3_VOTER			"SW_QC3_VOTER"
 #define AICL_RERUN_VOTER		"AICL_RERUN_VOTER"
+#define SW_ICL_MAX_VOTER		"SW_ICL_MAX_VOTER"
 #define QNOVO_VOTER			"QNOVO_VOTER"
 #define BATT_PROFILE_VOTER		"BATT_PROFILE_VOTER"
 #define OTG_DELAY_VOTER			"OTG_DELAY_VOTER"
@@ -65,23 +66,43 @@
 #define PL_FCC_LOW_VOTER		"PL_FCC_LOW_VOTER"
 #define WBC_VOTER			"WBC_VOTER"
 #define HW_LIMIT_VOTER			"HW_LIMIT_VOTER"
-#define DYNAMIC_RP_VOTER		"DYNAMIC_RP_VOTER"
-#define DEFAULT_100MA_VOTER		"DEFAULT_100MA_VOTER"
 #define FORCE_RECHARGE_VOTER		"FORCE_RECHARGE_VOTER"
+#define AICL_THRESHOLD_VOTER		"AICL_THRESHOLD_VOTER"
 
 #define BOOST_BACK_STORM_COUNT	3
 #define WEAK_CHG_STORM_COUNT	8
 
 #define VBAT_TO_VRAW_ADC(v)		div_u64((u64)v * 1000000UL, 194637UL)
 
+#define SDP_100_MA			100000
+#define SDP_CURRENT_UA			500000
+#define CDP_CURRENT_UA			1500000
+#define DCP_CURRENT_UA			1500000
+#define HVDCP_CURRENT_UA		3000000
+#define TYPEC_DEFAULT_CURRENT_UA	900000
+#define TYPEC_MEDIUM_CURRENT_UA		1500000
+#define TYPEC_HIGH_CURRENT_UA		3000000
+
 enum smb_mode {
 	PARALLEL_MASTER = 0,
 	PARALLEL_SLAVE,
 	NUM_MODES,
 };
 
+enum sink_src_mode {
+	SINK_MODE,
+	SRC_MODE,
+	UNATTACHED_MODE,
+};
+
 enum {
 	BOOST_BACK_WA			= BIT(0),
+	WEAK_ADAPTER_WA			= BIT(1),
+};
+
+enum {
+	RERUN_AICL			= BIT(0),
+	RESTART_AICL			= BIT(1),
 };
 
 enum smb_irq_index {
@@ -228,11 +249,14 @@
 	struct smb_chg_param	fcc;
 	struct smb_chg_param	fv;
 	struct smb_chg_param	usb_icl;
+	struct smb_chg_param	icl_max_stat;
 	struct smb_chg_param	icl_stat;
 	struct smb_chg_param	otg_cl;
 	struct smb_chg_param	jeita_cc_comp_hot;
 	struct smb_chg_param	jeita_cc_comp_cold;
 	struct smb_chg_param	freq_switcher;
+	struct smb_chg_param	aicl_5v_threshold;
+	struct smb_chg_param	aicl_cont_threshold;
 };
 
 struct parallel_params {
@@ -259,17 +283,18 @@
 	struct smb_params	param;
 	struct smb_iio		iio;
 	int			*debug_mask;
+	int			*pd_disabled;
 	enum smb_mode		mode;
 	struct smb_chg_freq	chg_freq;
 	int			smb_version;
 	int			otg_delay_ms;
 	int			*weak_chg_icl_ua;
 	struct qpnp_vadc_chip	*vadc_dev;
+	bool			pd_not_supported;
 
 	/* locks */
 	struct mutex		lock;
 	struct mutex		ps_change_lock;
-	struct mutex		otg_oc_lock;
 
 	/* power supplies */
 	struct power_supply		*batt_psy;
@@ -296,8 +321,6 @@
 	struct votable		*fcc_votable;
 	struct votable		*fv_votable;
 	struct votable		*usb_icl_votable;
-	struct votable		*pd_disallowed_votable_indirect;
-	struct votable		*pd_allowed_votable;
 	struct votable		*awake_votable;
 	struct votable		*pl_disable_votable;
 	struct votable		*chg_disable_votable;
@@ -315,10 +338,17 @@
 	struct delayed_work	uusb_otg_work;
 	struct delayed_work	bb_removal_work;
 
-	/* cached status */
+	/* pd */
 	int			voltage_min_uv;
 	int			voltage_max_uv;
 	int			pd_active;
+	bool			pd_hard_reset;
+	bool			pr_swap_in_progress;
+	bool			early_usb_attach;
+	bool			ok_to_pd;
+	bool			typec_legacy;
+
+	/* cached status */
 	bool			system_suspend_supported;
 	int			boost_threshold_ua;
 	int			system_temp_level;
@@ -334,15 +364,10 @@
 	int			connector_type;
 	bool			otg_en;
 	bool			suspend_input_on_debug_batt;
-	int			otg_attempts;
-	int			vconn_attempts;
 	int			default_icl_ua;
 	int			otg_cl_ua;
 	bool			uusb_apsd_rerun_done;
-	bool			pd_hard_reset;
-	bool			typec_present;
 	int			fake_input_current_limited;
-	bool			pr_swap_in_progress;
 	int			typec_mode;
 	int			usb_icl_change_irq_enabled;
 	u32			jeita_status;
@@ -352,6 +377,15 @@
 	int			hw_max_icl_ua;
 	int			auto_recharge_soc;
 	bool			jeita_configured;
+	enum sink_src_mode	sink_src_mode;
+	bool			hw_die_temp_mitigation;
+	bool			hw_connector_mitigation;
+	int			connector_pull_up;
+	int			aicl_5v_threshold_mv;
+	int			default_aicl_5v_threshold_mv;
+	int			aicl_cont_threshold_mv;
+	int			default_aicl_cont_threshold_mv;
+	bool			aicl_max_reached;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -386,7 +420,7 @@
 int smblib_get_charge_param(struct smb_charger *chg,
 			    struct smb_chg_param *param, int *val_u);
 int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend);
-
+int smblib_get_aicl_cont_threshold(struct smb_chg_param *param, u8 val_raw);
 int smblib_enable_charging(struct smb_charger *chg, bool enable);
 int smblib_set_charge_param(struct smb_charger *chg,
 			    struct smb_chg_param *param, int val_u);
@@ -403,6 +437,8 @@
 				int val_u, u8 *val_raw);
 int smblib_set_prop_boost_current(struct smb_charger *chg,
 				const union power_supply_propval *val);
+int smblib_set_aicl_cont_threshold(struct smb_chg_param *param,
+				int val_u, u8 *val_raw);
 int smblib_vbus_regulator_enable(struct regulator_dev *rdev);
 int smblib_vbus_regulator_disable(struct regulator_dev *rdev);
 int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev);
@@ -420,6 +456,7 @@
 irqreturn_t usb_source_change_irq_handler(int irq, void *data);
 irqreturn_t icl_change_irq_handler(int irq, void *data);
 irqreturn_t typec_state_change_irq_handler(int irq, void *data);
+irqreturn_t typec_attach_detach_irq_handler(int irq, void *data);
 irqreturn_t dc_plugin_irq_handler(int irq, void *data);
 irqreturn_t high_duty_cycle_irq_handler(int irq, void *data);
 irqreturn_t switcher_power_ok_irq_handler(int irq, void *data);
@@ -454,6 +491,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_batt_charge_counter(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_batt_cycle_count(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_set_prop_input_suspend(struct smb_charger *chg,
 				const union power_supply_propval *val);
 int smblib_set_prop_batt_capacity(struct smb_charger *chg,
@@ -481,12 +520,12 @@
 				union power_supply_propval *val);
 int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_usb_voltage_max_design(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_typec_power_role(struct smb_charger *chg,
 				union power_supply_propval *val);
-int smblib_get_prop_pd_allowed(struct smb_charger *chg,
-				union power_supply_propval *val);
 int smblib_get_prop_input_current_settled(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
@@ -519,7 +558,7 @@
 				union power_supply_propval *val);
 int smblib_dp_dm(struct smb_charger *chg, int val);
 int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable);
-int smblib_rerun_aicl(struct smb_charger *chg);
+int smblib_run_aicl(struct smb_charger *chg, int type);
 int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
 int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua);
 int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
@@ -530,6 +569,8 @@
 int smblib_stat_sw_override_cfg(struct smb_charger *chg, bool override);
 int smblib_configure_wdog(struct smb_charger *chg, bool enable);
 int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val);
+int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable);
+int smblib_icl_override(struct smb_charger *chg, bool override);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h
index e199087..e8cdda3 100644
--- a/drivers/power/supply/qcom/smb5-reg.h
+++ b/drivers/power/supply/qcom/smb5-reg.h
@@ -105,6 +105,8 @@
 /********************************
  *  DCDC Peripheral Registers  *
  ********************************/
+#define ICL_MAX_STATUS_REG			(DCDC_BASE + 0x06)
+#define ICL_STATUS_REG				(DCDC_BASE + 0x07)
 #define AICL_ICL_STATUS_REG			(DCDC_BASE + 0x08)
 
 #define AICL_STATUS_REG				(DCDC_BASE + 0x0A)
@@ -144,6 +146,19 @@
 #define SHIP_MODE_REG				(BATIF_BASE + 0x40)
 #define SHIP_MODE_EN_BIT			BIT(0)
 
+#define BATIF_ADC_CHANNEL_EN_REG		(BATIF_BASE + 0x82)
+#define IBATT_CHANNEL_EN_BIT			BIT(6)
+#define CONN_THM_CHANNEL_EN_BIT			BIT(4)
+#define DIE_TEMP_CHANNEL_EN_BIT			BIT(2)
+
+#define BATIF_ADC_INTERNAL_PULL_UP_REG		(BATIF_BASE + 0x86)
+#define INTERNAL_PULL_UP_CONN_THM_MASK		GENMASK(5, 4)
+#define CONN_THM_SHIFT				4
+#define INTERNAL_PULL_NO_PULL			0x00
+#define INTERNAL_PULL_30K_PULL			0x01
+#define INTERNAL_PULL_100K_PULL			0x02
+#define INTERNAL_PULL_400K_PULL			0x03
+
 /********************************
  *  USBIN Peripheral Registers  *
  ********************************/
@@ -215,6 +230,7 @@
 #define HVDCP_AUTH_ALG_EN_CFG_BIT		BIT(6)
 #define HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT	BIT(5)
 #define BC1P2_SRC_DETECT_BIT			BIT(3)
+#define HVDCP_EN_BIT				BIT(2)
 
 #define USBIN_OPTIONS_2_CFG_REG			(USBIN_BASE + 0x63)
 #define FLOAT_OPTIONS_MASK			GENMASK(2, 0)
@@ -235,6 +251,8 @@
 #define USBIN_AICL_OPTIONS_CFG_REG		(USBIN_BASE + 0x80)
 #define USBIN_AICL_ADC_EN_BIT			BIT(3)
 
+#define USBIN_5V_AICL_THRESHOLD_REG		(USBIN_BASE + 0x81)
+#define USBIN_CONT_AICL_THRESHOLD_REG		(USBIN_BASE + 0x84)
 /********************************
  *  DCIN Peripheral Registers   *
  ********************************/
@@ -262,6 +280,9 @@
 #define SRC_RA_OPEN_BIT				BIT(1)
 #define AUDIO_ACCESS_RA_RA_BIT			BIT(0)
 
+#define TYPE_C_STATE_MACHINE_STATUS_REG		(TYPEC_BASE + 0x09)
+#define TYPEC_ATTACH_DETACH_STATE_BIT		BIT(5)
+
 #define TYPE_C_MISC_STATUS_REG			(TYPEC_BASE + 0x0B)
 #define SNK_SRC_MODE_BIT			BIT(6)
 #define TYPEC_VBUS_ERROR_STATUS_BIT		BIT(4)
@@ -269,6 +290,7 @@
 #define CC_ATTACHED_BIT				BIT(0)
 
 #define LEGACY_CABLE_STATUS_REG			(TYPEC_BASE + 0x0D)
+#define TYPEC_LEGACY_CABLE_STATUS_BIT		BIT(1)
 #define TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT	BIT(0)
 
 #define TYPEC_U_USB_STATUS_REG			(TYPEC_BASE + 0x0F)
@@ -276,7 +298,8 @@
 #define U_USB_GROUND_BIT			BIT(4)
 
 #define TYPE_C_MODE_CFG_REG			(TYPEC_BASE + 0x44)
-#define TYPEC_POWER_ROLE_CMD_MASK		GENMASK(2, 0)
+#define TYPEC_POWER_ROLE_CMD_MASK		GENMASK(2, 1)
+#define EN_TRY_SNK_BIT				BIT(4)
 #define EN_SRC_ONLY_BIT				BIT(2)
 #define EN_SNK_ONLY_BIT				BIT(1)
 #define TYPEC_DISABLE_CMD_BIT			BIT(0)
@@ -287,10 +310,12 @@
 #define VCONN_EN_SRC_BIT			BIT(0)
 
 #define TYPE_C_CCOUT_CONTROL_REG		(TYPEC_BASE + 0x48)
+#define TYPEC_CCOUT_BUFFER_EN_BIT		BIT(2)
 #define TYPEC_CCOUT_VALUE_BIT			BIT(1)
 #define TYPEC_CCOUT_SRC_BIT			BIT(0)
 
 #define TYPE_C_EXIT_STATE_CFG_REG		(TYPEC_BASE + 0x50)
+#define BYPASS_VSAFE0V_DURING_ROLE_SWAP_BIT	BIT(3)
 #define EXIT_SNK_BASED_ON_CC_BIT		BIT(0)
 
 #define TYPE_C_INTERRUPT_EN_CFG_1_REG			(TYPEC_BASE + 0x5E)
@@ -318,12 +343,12 @@
 #define TYPEC_U_USB_CFG_REG			(TYPEC_BASE + 0x70)
 #define EN_MICRO_USB_MODE_BIT			BIT(0)
 
-#define TYPEC_MICRO_USB_MODE_REG		(TYPEC_BASE + 0x70)
+#define TYPEC_MICRO_USB_MODE_REG		(TYPEC_BASE + 0x73)
 #define MICRO_USB_MODE_ONLY_BIT			BIT(0)
 /********************************
  *  MISC Peripheral Registers  *
  ********************************/
-#define TEMP_RANGE_STATUS_REG			(MISC_BASE + 0x06)
+#define MISC_TEMP_RANGE_STATUS_REG		(MISC_BASE + 0x06)
 #define THERM_REG_ACTIVE_BIT			BIT(6)
 #define TLIM_BIT				BIT(5)
 #define TEMP_RANGE_MASK				GENMASK(4, 1)
@@ -333,10 +358,17 @@
 #define TEMP_BELOW_RANGE_BIT			BIT(1)
 #define THERMREG_DISABLED_BIT			BIT(0)
 
+#define MISC_DIE_TEMP_STATUS_REG		(MISC_BASE + 0x07)
+#define DIE_TEMP_SHDN_BIT			BIT(3)
+#define DIE_TEMP_RST_BIT			BIT(2)
+#define DIE_TEMP_UB_BIT				BIT(1)
+#define DIE_TEMP_LB_BIT				BIT(0)
+
 #define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
 #define BARK_BITE_WDOG_PET_BIT			BIT(0)
 
 #define AICL_CMD_REG				(MISC_BASE + 0x44)
+#define RESTART_AICL_BIT			BIT(1)
 #define RERUN_AICL_BIT				BIT(0)
 
 #define MISC_SMB_EN_CMD_REG			(MISC_BASE + 0x48)
@@ -356,6 +388,16 @@
 #define BARK_WDOG_TIMEOUT_MASK			GENMASK(3, 2)
 #define BITE_WDOG_TIMEOUT_MASK			GENMASK(1, 0)
 
+#define MISC_THERMREG_SRC_CFG_REG		(MISC_BASE + 0x70)
+#define THERMREG_SW_ICL_ADJUST_BIT		BIT(7)
+#define DIE_ADC_SEL_BIT				BIT(6)
+#define THERMREG_SMB_ADC_SRC_EN_BIT		BIT(5)
+#define THERMREG_CONNECTOR_ADC_SRC_EN_BIT	BIT(4)
+#define SKIN_ADC_CFG_BIT			BIT(3)
+#define THERMREG_SKIN_ADC_SRC_EN_BIT		BIT(2)
+#define THERMREG_DIE_ADC_SRC_EN_BIT		BIT(1)
+#define THERMREG_DIE_CMP_SRC_EN_BIT		BIT(0)
+
 #define MISC_SMB_CFG_REG			(MISC_BASE + 0x90)
 #define SMB_EN_SEL_BIT				BIT(4)
 #define CP_EN_POLARITY_CFG_BIT			BIT(3)
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 14bde0d..5b10b50 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -538,6 +538,7 @@
 
 	power_zone->id = result;
 	idr_init(&power_zone->idr);
+	result = -ENOMEM;
 	power_zone->name = kstrdup(name, GFP_KERNEL);
 	if (!power_zone->name)
 		goto err_name_alloc;
diff --git a/drivers/pwm/pwm-qti-lpg.c b/drivers/pwm/pwm-qti-lpg.c
index 31f5204..d24bef1 100644
--- a/drivers/pwm/pwm-qti-lpg.c
+++ b/drivers/pwm/pwm-qti-lpg.c
@@ -19,10 +19,12 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
+#include <linux/qpnp/qpnp-pbs.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
 #include <linux/types.h>
@@ -105,6 +107,34 @@
 #define LPG_LUT_VALUE_MSB_MASK		BIT(0)
 #define LPG_LUT_COUNT_MAX		47
 
+/* LPG config settings in SDAM */
+#define SDAM_REG_PBS_SEQ_EN			0x42
+#define PBS_SW_TRG_BIT				BIT(0)
+
+#define SDAM_REG_RAMP_STEP_DURATION		0x47
+
+#define SDAM_LUT_EN_OFFSET			0x0
+#define SDAM_PATTERN_CONFIG_OFFSET		0x1
+#define SDAM_END_INDEX_OFFSET			0x3
+#define SDAM_START_INDEX_OFFSET			0x4
+#define SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET	0x6
+
+/* SDAM_REG_LUT_EN */
+#define SDAM_LUT_EN_BIT				BIT(0)
+
+/* SDAM_REG_PATTERN_CONFIG */
+#define SDAM_PATTERN_LOOP_ENABLE		BIT(3)
+#define SDAM_PATTERN_RAMP_TOGGLE		BIT(2)
+#define SDAM_PATTERN_EN_PAUSE_END		BIT(1)
+#define SDAM_PATTERN_EN_PAUSE_START		BIT(0)
+
+/* SDAM_REG_PAUSE_MULTIPLIER */
+#define SDAM_PAUSE_START_SHIFT			4
+#define SDAM_PAUSE_START_MASK			GENMASK(7, 4)
+#define SDAM_PAUSE_END_MASK			GENMASK(3, 0)
+
+#define SDAM_LUT_COUNT_MAX			64
+
 enum lpg_src {
 	LUT_PATTERN = 0,
 	PWM_VALUE,
@@ -151,6 +181,7 @@
 	u32				lpg_idx;
 	u32				reg_base;
 	u32				max_pattern_length;
+	u32				lpg_sdam_base;
 	u8				src_sel;
 	u8				subtype;
 	bool				lut_written;
@@ -165,7 +196,11 @@
 	struct qpnp_lpg_channel	*lpgs;
 	struct qpnp_lpg_lut	*lut;
 	struct mutex		bus_lock;
+	struct nvmem_device	*sdam_nvmem;
+	struct device_node	*pbs_dev_node;
 	u32			num_lpgs;
+	unsigned long		pbs_en_bitmap;
+	bool			use_sdam;
 };
 
 static int qpnp_lpg_read(struct qpnp_lpg_channel *lpg, u16 addr, u8 *val)
@@ -192,7 +227,7 @@
 	mutex_lock(&lpg->chip->bus_lock);
 	rc = regmap_write(lpg->chip->regmap, lpg->reg_base + addr, val);
 	if (rc < 0)
-		dev_err(lpg->chip->dev, "Write addr 0x%x with value %d failed, rc=%d\n",
+		dev_err(lpg->chip->dev, "Write addr 0x%x with value 0x%x failed, rc=%d\n",
 				lpg->reg_base + addr, val, rc);
 	mutex_unlock(&lpg->chip->bus_lock);
 
@@ -245,6 +280,90 @@
 	return rc;
 }
 
+static int qpnp_sdam_write(struct qpnp_lpg_chip *chip, u16 addr, u8 val)
+{
+	int rc;
+
+	mutex_lock(&chip->bus_lock);
+	rc = nvmem_device_write(chip->sdam_nvmem, addr, 1, &val);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM add 0x%x failed, rc=%d\n",
+				addr, rc);
+
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
+static int qpnp_lpg_sdam_write(struct qpnp_lpg_channel *lpg, u16 addr, u8 val)
+{
+	struct qpnp_lpg_chip *chip = lpg->chip;
+	int rc;
+
+	mutex_lock(&chip->bus_lock);
+	rc = nvmem_device_write(chip->sdam_nvmem,
+			lpg->lpg_sdam_base + addr, 1, &val);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM add 0x%x failed, rc=%d\n",
+				lpg->lpg_sdam_base + addr, rc);
+
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
+static int qpnp_lpg_sdam_masked_write(struct qpnp_lpg_channel *lpg,
+					u16 addr, u8 mask, u8 val)
+{
+	int rc;
+	u8 tmp;
+	struct qpnp_lpg_chip *chip = lpg->chip;
+
+	mutex_lock(&chip->bus_lock);
+
+	rc = nvmem_device_read(chip->sdam_nvmem,
+			lpg->lpg_sdam_base + addr, 1, &tmp);
+	if (rc < 0) {
+		dev_err(chip->dev, "Read SDAM addr %d failed, rc=%d\n",
+				lpg->lpg_sdam_base + addr, rc);
+		goto unlock;
+	}
+
+	tmp = tmp & ~mask;
+	tmp |= val & mask;
+	rc = nvmem_device_write(chip->sdam_nvmem,
+			lpg->lpg_sdam_base + addr, 1, &tmp);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM addr %d failed, rc=%d\n",
+				lpg->lpg_sdam_base + addr, rc);
+
+unlock:
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
+static int qpnp_lut_sdam_write(struct qpnp_lpg_lut *lut,
+		u16 addr, u8 *val, size_t length)
+{
+	struct qpnp_lpg_chip *chip = lut->chip;
+	int rc;
+
+	if (addr >= SDAM_LUT_COUNT_MAX)
+		return -EINVAL;
+
+	mutex_lock(&chip->bus_lock);
+	rc = nvmem_device_write(chip->sdam_nvmem,
+			lut->reg_base + addr, length, val);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM addr %d failed, rc=%d\n",
+				lut->reg_base + addr, rc);
+
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
 static struct qpnp_lpg_channel *pwm_dev_to_qpnp_lpg(struct pwm_chip *pwm_chip,
 				struct pwm_device *pwm) {
 
@@ -365,14 +484,111 @@
 	return rc;
 }
 
-static int qpnp_lpg_set_lut_pattern(struct qpnp_lpg_channel *lpg,
+static int qpnp_lpg_set_sdam_lut_pattern(struct qpnp_lpg_channel *lpg,
 		unsigned int *pattern, unsigned int length)
 {
 	struct qpnp_lpg_lut *lut = lpg->chip->lut;
 	int i, rc = 0;
-	u16 full_duty_value, pwm_values[LPG_LUT_COUNT_MAX + 1] = {0};
+	u8 val[SDAM_LUT_COUNT_MAX + 1], addr;
+
+	if (length > lpg->max_pattern_length) {
+		dev_err(lpg->chip->dev, "new pattern length (%d) larger than predefined (%d)\n",
+				length, lpg->max_pattern_length);
+		return -EINVAL;
+	}
+
+	/* Program LUT pattern */
+	mutex_lock(&lut->lock);
+	addr = lpg->ramp_config.lo_idx;
+	for (i = 0; i < length; i++)
+		val[i] = pattern[i] * 255 / 100;
+
+	rc = qpnp_lut_sdam_write(lut, addr, val, length);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write pattern in SDAM failed, rc=%d",
+				rc);
+		goto unlock;
+	}
+
+	lpg->ramp_config.pattern_length = length;
+unlock:
+	mutex_unlock(&lut->lock);
+
+	return rc;
+}
+
+static int qpnp_lpg_set_sdam_ramp_config(struct qpnp_lpg_channel *lpg)
+{
+	struct lpg_ramp_config *ramp = &lpg->ramp_config;
+	u8 addr, mask, val;
+	int rc = 0;
+
+	/* clear PBS scatchpad register */
+	val = 0;
+	rc = qpnp_lpg_sdam_write(lpg,
+			SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET failed, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	/* Set ramp step duration, one WAIT_TICK is 7.8ms */
+	val = (ramp->step_ms * 1000 / 7800) & 0xff;
+	if (val > 0)
+		val--;
+	addr = SDAM_REG_RAMP_STEP_DURATION;
+	rc = qpnp_sdam_write(lpg->chip, addr, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_RAMP_STEP_DURATION failed, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	/* Set hi_idx and lo_idx */
+	rc = qpnp_lpg_sdam_write(lpg, SDAM_END_INDEX_OFFSET, ramp->hi_idx);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_END_INDEX failed, rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	rc = qpnp_lpg_sdam_write(lpg, SDAM_START_INDEX_OFFSET,
+						ramp->lo_idx);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_START_INDEX failed, rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	/* Set LPG_PATTERN_CONFIG */
+	addr = SDAM_PATTERN_CONFIG_OFFSET;
+	mask = SDAM_PATTERN_LOOP_ENABLE;
+	val = 0;
+	if (ramp->pattern_repeat)
+		val |= SDAM_PATTERN_LOOP_ENABLE;
+
+	rc = qpnp_lpg_sdam_masked_write(lpg, addr, mask, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_PATTERN_CONFIG failed, rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int qpnp_lpg_set_lut_pattern(struct qpnp_lpg_channel *lpg,
+		unsigned int *pattern, unsigned int length)
+{
+	struct qpnp_lpg_lut *lut = lpg->chip->lut;
+	u16 full_duty_value, pwm_values[SDAM_LUT_COUNT_MAX + 1] = {0};
+	int i, rc = 0;
 	u8 lsb, msb, addr;
 
+	if (lpg->chip->use_sdam)
+		return qpnp_lpg_set_sdam_lut_pattern(lpg, pattern, length);
+
 	if (length > lpg->max_pattern_length) {
 		dev_err(lpg->chip->dev, "new pattern length (%d) larger than predefined (%d)\n",
 				length, lpg->max_pattern_length);
@@ -426,6 +642,9 @@
 	u8 lsb, msb, addr, mask, val;
 	int rc = 0;
 
+	if (lpg->chip->use_sdam)
+		return qpnp_lpg_set_sdam_ramp_config(lpg);
+
 	/* Set ramp step duration */
 	lsb = ramp->step_ms & 0xff;
 	msb = ramp->step_ms >> 8;
@@ -507,6 +726,8 @@
 static void __qpnp_lpg_calc_pwm_period(int period_ns,
 			struct lpg_pwm_config *pwm_config)
 {
+	struct qpnp_lpg_channel *lpg = container_of(pwm_config,
+			struct qpnp_lpg_channel, pwm_config);
 	struct lpg_pwm_config configs[NUM_PWM_SIZE];
 	int i, j, m, n;
 	int tmp1, tmp2;
@@ -522,7 +743,12 @@
 	 *
 	 * Searching the closest settings for the requested PWM period.
 	 */
-	for (n = 0; n < ARRAY_SIZE(pwm_size); n++) {
+	if (lpg->chip->use_sdam)
+		/* SDAM pattern control can only use 9 bit resolution */
+		n = 1;
+	else
+		n = 0;
+	for (; n < ARRAY_SIZE(pwm_size); n++) {
 		pwm_clk_period_ns = period_ns >> pwm_size[n];
 		for (i = ARRAY_SIZE(clk_freq_hz) - 1; i >= 0; i--) {
 			for (j = 0; j < ARRAY_SIZE(clk_prediv); j++) {
@@ -654,6 +880,45 @@
 	return rc;
 }
 
+static int qpnp_lpg_pbs_trigger_enable(struct qpnp_lpg_channel *lpg, bool en)
+{
+	struct qpnp_lpg_chip *chip = lpg->chip;
+	int rc = 0;
+
+	if (en) {
+		if (chip->pbs_en_bitmap == 0) {
+			rc = qpnp_sdam_write(chip, SDAM_REG_PBS_SEQ_EN,
+					PBS_SW_TRG_BIT);
+			if (rc < 0) {
+				dev_err(chip->dev, "Write SDAM_REG_PBS_SEQ_EN failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+
+			rc = qpnp_pbs_trigger_event(chip->pbs_dev_node,
+					PBS_SW_TRG_BIT);
+			if (rc < 0) {
+				dev_err(chip->dev, "Failed to trigger PBS, rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+		set_bit(lpg->lpg_idx, &chip->pbs_en_bitmap);
+	} else {
+		clear_bit(lpg->lpg_idx, &chip->pbs_en_bitmap);
+		if (chip->pbs_en_bitmap == 0) {
+			rc = qpnp_sdam_write(chip, SDAM_REG_PBS_SEQ_EN, 0);
+			if (rc < 0) {
+				dev_err(chip->dev, "Write SDAM_REG_PBS_SEQ_EN failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
 static int qpnp_lpg_pwm_src_enable(struct qpnp_lpg_channel *lpg, bool en)
 {
 	struct qpnp_lpg_chip *chip = lpg->chip;
@@ -665,7 +930,7 @@
 					LPG_EN_RAMP_GEN_MASK;
 	val = lpg->src_sel << LPG_PWM_SRC_SELECT_SHIFT;
 
-	if (lpg->src_sel == LUT_PATTERN)
+	if (lpg->src_sel == LUT_PATTERN && !chip->use_sdam)
 		val |= 1 << LPG_EN_RAMP_GEN_SHIFT;
 
 	if (en)
@@ -678,6 +943,27 @@
 		return rc;
 	}
 
+	if (chip->use_sdam) {
+		if (lpg->src_sel == LUT_PATTERN && en) {
+			val = SDAM_LUT_EN_BIT;
+			en = true;
+		} else {
+			val = 0;
+			en = false;
+		}
+
+		rc = qpnp_lpg_sdam_write(lpg, SDAM_LUT_EN_OFFSET, val);
+		if (rc < 0) {
+			dev_err(chip->dev, "Write SDAM_REG_LUT_EN failed, rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		qpnp_lpg_pbs_trigger_enable(lpg, en);
+
+		return rc;
+	}
+
 	if (lpg->src_sel == LUT_PATTERN && en) {
 		mutex_lock(&lut->lock);
 		val = 1 << lpg->lpg_idx;
@@ -697,6 +983,7 @@
 	struct qpnp_lpg_channel *lpg;
 	enum lpg_src src_sel;
 	int rc;
+	bool is_enabled;
 
 	lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
 	if (lpg == NULL) {
@@ -714,6 +1001,23 @@
 	if (src_sel == lpg->src_sel)
 		return 0;
 
+	is_enabled = pwm_is_enabled(pwm);
+	if (is_enabled) {
+		/*
+		 * Disable the channel first then enable it later to make
+		 * sure the output type is changed successfully. This is
+		 * especially useful in SDAM use case to stop the PBS
+		 * sequence when changing the PWM output type from
+		 * MODULATED to FIXED.
+		 */
+		rc = qpnp_lpg_pwm_src_enable(lpg, false);
+		if (rc < 0) {
+			dev_err(pwm_chip->dev, "Enable PWM output failed for channel %d, rc=%d\n",
+					lpg->lpg_idx, rc);
+			return rc;
+		}
+	}
+
 	if (src_sel == LUT_PATTERN) {
 		/* program LUT if it's never been programmed */
 		if (!lpg->lut_written) {
@@ -738,7 +1042,14 @@
 
 	lpg->src_sel = src_sel;
 
-	if (pwm_is_enabled(pwm)) {
+	if (is_enabled) {
+		rc = qpnp_lpg_set_pwm_config(lpg);
+		if (rc < 0) {
+			dev_err(pwm_chip->dev, "Config PWM failed for channel %d, rc=%d\n",
+							lpg->lpg_idx, rc);
+			return rc;
+		}
+
 		rc = qpnp_lpg_pwm_src_enable(lpg, true);
 		if (rc < 0) {
 			dev_err(pwm_chip->dev, "Enable PWM output failed for channel %d, rc=%d\n",
@@ -979,7 +1290,7 @@
 	struct qpnp_lpg_channel *lpg;
 	struct lpg_ramp_config *ramp;
 	int rc = 0, i;
-	u32 base, length, lpg_chan_id, tmp;
+	u32 base, length, lpg_chan_id, tmp, max_count;
 	const __be32 *addr;
 
 	addr = of_get_address(chip->dev->of_node, 0, NULL, NULL);
@@ -1010,18 +1321,47 @@
 		}
 	}
 
-	addr = of_get_address(chip->dev->of_node, 1, NULL, NULL);
-	if (!addr) {
-		pr_debug("NO LUT address assigned\n");
-		return 0;
-	}
-
 	chip->lut = devm_kmalloc(chip->dev, sizeof(*chip->lut), GFP_KERNEL);
 	if (!chip->lut)
 		return -ENOMEM;
 
+	chip->sdam_nvmem = devm_nvmem_device_get(chip->dev, "ppg_sdam");
+	if (IS_ERR_OR_NULL(chip->sdam_nvmem)) {
+		if (PTR_ERR(chip->sdam_nvmem) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		addr = of_get_address(chip->dev->of_node, 1, NULL, NULL);
+		if (!addr) {
+			pr_debug("NO LUT address assigned\n");
+			devm_kfree(chip->dev, chip->lut);
+			chip->lut = NULL;
+			return 0;
+		}
+
+		chip->lut->reg_base = be32_to_cpu(*addr);
+		max_count = LPG_LUT_COUNT_MAX;
+	} else {
+		chip->use_sdam = true;
+		chip->pbs_dev_node = of_parse_phandle(chip->dev->of_node,
+				"qcom,pbs-client", 0);
+		if (!chip->pbs_dev_node) {
+			dev_err(chip->dev, "Missing qcom,pbs-client property\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(chip->dev->of_node,
+				"qcom,lut-sdam-base",
+				&chip->lut->reg_base);
+		if (rc < 0) {
+			dev_err(chip->dev, "Read qcom,lut-sdam-base failed, rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		max_count = SDAM_LUT_COUNT_MAX;
+	}
+
 	chip->lut->chip = chip;
-	chip->lut->reg_base = be32_to_cpu(*addr);
 	mutex_init(&chip->lut->lock);
 
 	rc = of_property_count_elems_of_size(chip->dev->of_node,
@@ -1033,13 +1373,13 @@
 	}
 
 	length = rc;
-	if (length > LPG_LUT_COUNT_MAX) {
+	if (length > max_count) {
 		dev_err(chip->dev, "qcom,lut-patterns length %d exceed max %d\n",
-				length, LPG_LUT_COUNT_MAX);
+				length, max_count);
 		return -EINVAL;
 	}
 
-	chip->lut->pattern = devm_kcalloc(chip->dev, LPG_LUT_COUNT_MAX,
+	chip->lut->pattern = devm_kcalloc(chip->dev, max_count,
 			sizeof(*chip->lut->pattern), GFP_KERNEL);
 	if (!chip->lut->pattern)
 		return -ENOMEM;
@@ -1066,12 +1406,24 @@
 			return rc;
 		}
 
-		if (lpg_chan_id > chip->num_lpgs) {
+		if (lpg_chan_id < 1 || lpg_chan_id > chip->num_lpgs) {
 			dev_err(chip->dev, "lpg-chann-id %d is out of range 1~%d\n",
 					lpg_chan_id, chip->num_lpgs);
 			return -EINVAL;
 		}
 
+		if (chip->use_sdam) {
+			rc = of_property_read_u32(child,
+					"qcom,lpg-sdam-base",
+					&tmp);
+			if (rc < 0) {
+				dev_err(chip->dev, "get qcom,lpg-sdam-base failed for lpg%d, rc=%d\n",
+						lpg_chan_id, rc);
+				return rc;
+			}
+			chip->lpgs[lpg_chan_id - 1].lpg_sdam_base = tmp;
+		}
+
 		/* lpg channel id is indexed from 1 in hardware */
 		lpg = &chip->lpgs[lpg_chan_id - 1];
 		ramp = &lpg->ramp_config;
@@ -1091,9 +1443,9 @@
 			return rc;
 		}
 		ramp->lo_idx = (u8)tmp;
-		if (ramp->lo_idx >= LPG_LUT_COUNT_MAX) {
+		if (ramp->lo_idx >= max_count) {
 			dev_err(chip->dev, "qcom,ramp-low-index should less than max %d\n",
-					LPG_LUT_COUNT_MAX);
+						max_count);
 			return -EINVAL;
 		}
 
@@ -1105,14 +1457,14 @@
 		}
 		ramp->hi_idx = (u8)tmp;
 
-		if (ramp->hi_idx > LPG_LUT_COUNT_MAX) {
+		if (ramp->hi_idx > max_count) {
 			dev_err(chip->dev, "qcom,ramp-high-index shouldn't exceed max %d\n",
-						LPG_LUT_COUNT_MAX);
+						max_count);
 			return -EINVAL;
 		}
 
-		if (ramp->hi_idx <= ramp->lo_idx) {
-			dev_err(chip->dev, "high-index(%d) should be larger than low-index(%d)\n",
+		if (chip->use_sdam && ramp->hi_idx <= ramp->lo_idx) {
+			dev_err(chip->dev, "high-index(%d) should be larger than low-index(%d) when SDAM used\n",
 						ramp->hi_idx, ramp->lo_idx);
 			return -EINVAL;
 		}
@@ -1121,6 +1473,12 @@
 		ramp->pattern = &chip->lut->pattern[ramp->lo_idx];
 		lpg->max_pattern_length = ramp->pattern_length;
 
+		ramp->pattern_repeat = of_property_read_bool(child,
+				"qcom,ramp-pattern-repeat");
+
+		if (chip->use_sdam)
+			continue;
+
 		rc = of_property_read_u32(child,
 				"qcom,ramp-pause-hi-count", &tmp);
 		if (rc < 0)
@@ -1138,9 +1496,6 @@
 		ramp->ramp_dir_low_to_hi = of_property_read_bool(child,
 				"qcom,ramp-from-low-to-high");
 
-		ramp->pattern_repeat = of_property_read_bool(child,
-				"qcom,ramp-pattern-repeat");
-
 		ramp->toggle =  of_property_read_bool(child,
 				"qcom,ramp-toggle");
 	}
@@ -1148,6 +1503,36 @@
 	return 0;
 }
 
+static int qpnp_lpg_sdam_hw_init(struct qpnp_lpg_chip *chip)
+{
+	struct qpnp_lpg_channel *lpg;
+	int i, rc = 0;
+
+	if (!chip->use_sdam)
+		return 0;
+
+	for (i = 0; i < chip->num_lpgs; i++) {
+		lpg = &chip->lpgs[i];
+		if (lpg->lpg_sdam_base != 0) {
+			rc = qpnp_lpg_sdam_write(lpg, SDAM_LUT_EN_OFFSET, 0);
+			if (rc < 0) {
+				dev_err(chip->dev, "Write SDAM_REG_LUT_EN failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+			rc = qpnp_lpg_sdam_write(lpg,
+					SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET, 0);
+			if (rc < 0) {
+				dev_err(lpg->chip->dev, "Write SDAM_REG_PBS_SCRATCH_LUT_COUNTER failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
 static int qpnp_lpg_probe(struct platform_device *pdev)
 {
 	int rc;
@@ -1172,6 +1557,13 @@
 		goto err_out;
 	}
 
+	rc = qpnp_lpg_sdam_hw_init(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "SDAM HW init failed, rc=%d\n",
+				rc);
+		goto err_out;
+	}
+
 	dev_set_drvdata(chip->dev, chip);
 	chip->pwm_chip.dev = chip->dev;
 	chip->pwm_chip.base = -1;
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 1c85ecc..0fcf94f 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -156,8 +156,12 @@
 	if (div < 0)
 		return div;
 
-	/* Let the core driver set pwm->period if disabled and duty_ns == 0 */
-	if (!pwm_is_enabled(pwm) && !duty_ns)
+	/*
+	 * Let the core driver set pwm->period if disabled and duty_ns == 0.
+	 * But, this driver should prevent to set the new duty_ns if current
+	 * duty_cycle is not set
+	 */
+	if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle)
 		return 0;
 
 	rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index 9510016..2eff0cf 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -178,6 +178,7 @@
 
 #define CPR4_REG_MISC				0x700
 #define CPR4_MISC_RESET_STEP_QUOT_LOOP_EN	BIT(2)
+#define CPR4_MISC_THREAD_HAS_ALWAYS_VOTE_EN	BIT(3)
 #define CPR4_MISC_MARGIN_TABLE_ROW_SELECT_MASK	GENMASK(23, 20)
 #define CPR4_MISC_MARGIN_TABLE_ROW_SELECT_SHIFT	20
 #define CPR4_MISC_TEMP_SENSOR_ID_START_MASK	GENMASK(27, 24)
@@ -733,6 +734,11 @@
 				CPR4_MISC_RESET_STEP_QUOT_LOOP_EN,
 				CPR4_MISC_RESET_STEP_QUOT_LOOP_EN);
 
+	if (ctrl->thread_has_always_vote_en)
+		cpr3_masked_write(ctrl, CPR4_REG_MISC,
+			CPR4_MISC_THREAD_HAS_ALWAYS_VOTE_EN,
+			CPR4_MISC_THREAD_HAS_ALWAYS_VOTE_EN);
+
 	if (ctrl->supports_hw_closed_loop) {
 		if (ctrl->saw_use_unit_mV)
 			pmic_step_size = ctrl->step_volt / 1000;
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 778f482..39d3f82 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -766,6 +766,12 @@
  *			the CPR controller to first use the default step_quot
  *			and then later switch to the run-time calibrated
  *			step_quot.
+ * @thread_has_always_vote_en: Boolean value which indicates that this CPR
+ *			controller should be configured to keep thread vote
+ *			always enabled. This configuration allows the CPR
+ *			controller to not consider MID/DN recommendations from
+ *			other thread when all sensors mapped to a thread
+ *			collapsed.
  *
  * This structure contains both configuration and runtime state data.  The
  * elements cpr_allowed_sw, use_hw_closed_loop, aggr_corner, cpr_enabled,
@@ -879,6 +885,7 @@
 	struct notifier_block	panic_notifier;
 	bool			support_ldo300_vreg;
 	bool			reset_step_quot_loop_en;
+	bool			thread_has_always_vote_en;
 };
 
 /* Used for rounding voltages to the closest physically available set point. */
diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c
index 39ee3c5..9e138ce 100644
--- a/drivers/regulator/cpr3-util.c
+++ b/drivers/regulator/cpr3-util.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1241,6 +1241,16 @@
 					"qcom,cpr-reset-step-quot-loop-en");
 
 	/*
+	 * Configure CPR controller to not consider MID/DN recommendations
+	 * from other thread when all sensors mapped to a thread collapsed
+	 * in a multi-thread configuration.
+	 */
+	if (ctrl->thread_count > 1)
+		ctrl->thread_has_always_vote_en
+			= of_property_read_bool(ctrl->dev->of_node,
+					"qcom,cpr-thread-has-always-vote-en");
+
+	/*
 	 * Regulator device handles are not necessary for CPRh controllers
 	 * since communication with the regulators is completely managed
 	 * in hardware.
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 83e89e5..b73a237 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -268,8 +268,7 @@
 	drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
 	if (drvdata->desc.name == NULL) {
 		dev_err(&pdev->dev, "Failed to allocate supply name\n");
-		ret = -ENOMEM;
-		goto err;
+		return -ENOMEM;
 	}
 
 	if (config->nr_gpios != 0) {
@@ -289,7 +288,7 @@
 				dev_err(&pdev->dev,
 					"Could not obtain regulator setting GPIOs: %d\n",
 					ret);
-			goto err_memstate;
+			goto err_memgpio;
 		}
 	}
 
@@ -300,7 +299,7 @@
 	if (drvdata->states == NULL) {
 		dev_err(&pdev->dev, "Failed to allocate state data\n");
 		ret = -ENOMEM;
-		goto err_memgpio;
+		goto err_stategpio;
 	}
 	drvdata->nr_states = config->nr_states;
 
@@ -321,7 +320,7 @@
 	default:
 		dev_err(&pdev->dev, "No regulator type set\n");
 		ret = -EINVAL;
-		goto err_memgpio;
+		goto err_memstate;
 	}
 
 	/* build initial state from gpio init data. */
@@ -358,22 +357,21 @@
 	if (IS_ERR(drvdata->dev)) {
 		ret = PTR_ERR(drvdata->dev);
 		dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
-		goto err_stategpio;
+		goto err_memstate;
 	}
 
 	platform_set_drvdata(pdev, drvdata);
 
 	return 0;
 
-err_stategpio:
-	gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
 err_memstate:
 	kfree(drvdata->states);
+err_stategpio:
+	gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
 err_memgpio:
 	kfree(drvdata->gpios);
 err_name:
 	kfree(drvdata->desc.name);
-err:
 	return ret;
 }
 
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 4f613ec..037675b 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -282,6 +282,7 @@
 				dev_err(dev,
 					"failed to parse DT for regulator %s\n",
 					child->name);
+				of_node_put(child);
 				return -EINVAL;
 			}
 			match->of_node = of_node_get(child);
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index 07a6198..cc01399 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -167,6 +167,8 @@
 #define PM660_BST_HEADROOM_DEFAULT_MV	200
 #define BST_HEADROOM_DEFAULT_MV		150
 
+#define PMIC5_LCDB_OFF_ON_DELAY_US	20000
+
 struct ldo_regulator {
 	struct regulator_desc		rdesc;
 	struct regulator_dev		*rdev;
@@ -1340,22 +1342,27 @@
 
 static int qpnp_lcdb_regulator_register(struct qpnp_lcdb *lcdb, u8 type)
 {
-	int rc = 0;
+	int rc = 0, off_on_delay = 0;
 	struct regulator_init_data *init_data;
 	struct regulator_config cfg = {};
 	struct regulator_desc *rdesc;
 	struct regulator_dev *rdev;
 	struct device_node *node;
 
+	if (lcdb->pmic_rev_id->pmic_subtype != PM660L_SUBTYPE)
+		off_on_delay = PMIC5_LCDB_OFF_ON_DELAY_US;
+
 	if (type == LDO) {
 		node			= lcdb->ldo.node;
 		rdesc			= &lcdb->ldo.rdesc;
 		rdesc->ops		= &qpnp_lcdb_ldo_ops;
+		rdesc->off_on_delay	= off_on_delay;
 		rdev			= lcdb->ldo.rdev;
 	} else if (type == NCP) {
 		node			= lcdb->ncp.node;
 		rdesc			= &lcdb->ncp.rdesc;
 		rdesc->ops		= &qpnp_lcdb_ncp_ops;
+		rdesc->off_on_delay	= off_on_delay;
 		rdev			= lcdb->ncp.rdev;
 	} else {
 		pr_err("Invalid regulator type %d\n", type);
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index e1cfa06..e79f2a1 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -49,6 +49,11 @@
 
 	tv64.tv_sec = rtc_tm_to_time64(&tm);
 
+#if BITS_PER_LONG == 32
+	if (tv64.tv_sec > INT_MAX)
+		goto err_read;
+#endif
+
 	err = do_settimeofday64(&tv64);
 
 	dev_info(rtc->dev.parent,
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 99b5e35..162afcc 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -227,6 +227,13 @@
 			missing = year;
 	}
 
+	/* Can't proceed if alarm is still invalid after replacing
+	 * missing fields.
+	 */
+	err = rtc_valid_tm(&alarm->time);
+	if (err)
+		goto done;
+
 	/* with luck, no rollover is needed */
 	t_now = rtc_tm_to_time64(&now);
 	t_alm = rtc_tm_to_time64(&alarm->time);
@@ -278,9 +285,9 @@
 		dev_warn(&rtc->dev, "alarm rollover not handled\n");
 	}
 
-done:
 	err = rtc_valid_tm(&alarm->time);
 
+done:
 	if (err) {
 		dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n",
 			alarm->time.tm_year + 1900, alarm->time.tm_mon + 1,
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 58698d2..c4ca6a3 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -168,6 +168,7 @@
 /* Sets the given date and time to the real time clock. */
 static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
 {
+	struct m41t80_data *clientdata = i2c_get_clientdata(client);
 	unsigned char buf[8];
 	int err, flags;
 
@@ -183,6 +184,17 @@
 	buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year - 100);
 	buf[M41T80_REG_WDAY] = tm->tm_wday;
 
+	/* If the square wave output is controlled in the weekday register */
+	if (clientdata->features & M41T80_FEATURE_SQ_ALT) {
+		int val;
+
+		val = i2c_smbus_read_byte_data(client, M41T80_REG_WDAY);
+		if (val < 0)
+			return val;
+
+		buf[M41T80_REG_WDAY] |= (val & 0xf0);
+	}
+
 	err = i2c_smbus_write_i2c_block_data(client, M41T80_REG_SSEC,
 					     sizeof(buf), buf);
 	if (err < 0) {
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index e4324dc..180ec1f 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -57,7 +57,7 @@
 
 static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
 {
-	long rc = OPAL_BUSY;
+	s64 rc = OPAL_BUSY;
 	int retries = 10;
 	u32 y_m_d;
 	u64 h_m_s_ms;
@@ -66,13 +66,17 @@
 
 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
 		rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
-		if (rc == OPAL_BUSY_EVENT)
+		if (rc == OPAL_BUSY_EVENT) {
+			msleep(OPAL_BUSY_DELAY_MS);
 			opal_poll_events(NULL);
-		else if (retries-- && (rc == OPAL_HARDWARE
-				       || rc == OPAL_INTERNAL_ERROR))
-			msleep(10);
-		else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
-			break;
+		} else if (rc == OPAL_BUSY) {
+			msleep(OPAL_BUSY_DELAY_MS);
+		} else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+			if (retries--) {
+				msleep(10); /* Wait 10ms before retry */
+				rc = OPAL_BUSY; /* go around again */
+			}
+		}
 	}
 
 	if (rc != OPAL_SUCCESS)
@@ -87,21 +91,26 @@
 
 static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
 {
-	long rc = OPAL_BUSY;
+	s64 rc = OPAL_BUSY;
 	int retries = 10;
 	u32 y_m_d = 0;
 	u64 h_m_s_ms = 0;
 
 	tm_to_opal(tm, &y_m_d, &h_m_s_ms);
+
 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
 		rc = opal_rtc_write(y_m_d, h_m_s_ms);
-		if (rc == OPAL_BUSY_EVENT)
+		if (rc == OPAL_BUSY_EVENT) {
+			msleep(OPAL_BUSY_DELAY_MS);
 			opal_poll_events(NULL);
-		else if (retries-- && (rc == OPAL_HARDWARE
-				       || rc == OPAL_INTERNAL_ERROR))
-			msleep(10);
-		else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
-			break;
+		} else if (rc == OPAL_BUSY) {
+			msleep(OPAL_BUSY_DELAY_MS);
+		} else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+			if (retries--) {
+				msleep(10); /* Wait 10ms before retry */
+				rc = OPAL_BUSY; /* go around again */
+			}
+		}
 	}
 
 	return rc == OPAL_SUCCESS ? 0 : -EIO;
@@ -150,6 +159,16 @@
 
 	y_m_d = be32_to_cpu(__y_m_d);
 	h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
+
+	/* check if no alarm is set */
+	if (y_m_d == 0 && h_m_s_ms == 0) {
+		pr_debug("No alarm is set\n");
+		rc = -ENOENT;
+		goto exit;
+	} else {
+		pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms);
+	}
+
 	opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
 
 exit:
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 0f11c2a..3e8fd33 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -132,20 +132,23 @@
 {
 	struct snvs_rtc_data *data = dev_get_drvdata(dev);
 	unsigned long time;
+	int ret;
 
 	rtc_tm_to_time(tm, &time);
 
 	/* Disable RTC first */
-	snvs_rtc_enable(data, false);
+	ret = snvs_rtc_enable(data, false);
+	if (ret)
+		return ret;
 
 	/* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */
 	regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH);
 	regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH));
 
 	/* Enable RTC again */
-	snvs_rtc_enable(data, true);
+	ret = snvs_rtc_enable(data, true);
 
-	return 0;
+	return ret;
 }
 
 static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -257,7 +260,7 @@
 		of_property_read_u32(pdev->dev.of_node, "offset", &data->offset);
 	}
 
-	if (!data->regmap) {
+	if (IS_ERR(data->regmap)) {
 		dev_err(&pdev->dev, "Can't find snvs syscon\n");
 		return -ENODEV;
 	}
@@ -287,7 +290,11 @@
 	regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff);
 
 	/* Enable RTC */
-	snvs_rtc_enable(data, true);
+	ret = snvs_rtc_enable(data, true);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable rtc %d\n", ret);
+		goto error_rtc_device_register;
+	}
 
 	device_init_wakeup(&pdev->dev, true);
 
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 560d9a5..a952808 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -86,7 +86,8 @@
 	for (i = 2; i < 6; i++)
 		buf[i] = __raw_readl(&rtcreg->dat);
 	spin_unlock_irq(&pdata->lock);
-	sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
+	sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
+		(buf[3] << 8) | buf[2];
 	rtc_time_to_tm(sec, tm);
 	return rtc_valid_tm(tm);
 }
@@ -147,7 +148,8 @@
 	alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
 	alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
 	spin_unlock_irq(&pdata->lock);
-	sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
+	sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
+		(buf[3] << 8) | buf[2];
 	rtc_time_to_tm(sec, &alrm->time);
 	return rtc_valid_tm(&alrm->time);
 }
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5ecd408..0da2465 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1950,8 +1950,12 @@
 {
 	int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
 
-	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
-		/* dasd is being set offline. */
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+		/*
+		 * dasd is being set offline
+		 * but it is no safe offline where we have to allow I/O
+		 */
 		return 1;
 	}
 	if (device->stopped) {
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 1e56018..e453d2a 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -591,13 +591,22 @@
 int dasd_alias_add_device(struct dasd_device *device)
 {
 	struct dasd_eckd_private *private = device->private;
-	struct alias_lcu *lcu;
+	__u8 uaddr = private->uid.real_unit_addr;
+	struct alias_lcu *lcu = private->lcu;
 	unsigned long flags;
 	int rc;
 
-	lcu = private->lcu;
 	rc = 0;
 	spin_lock_irqsave(&lcu->lock, flags);
+	/*
+	 * Check if device and lcu type differ. If so, the uac data may be
+	 * outdated and needs to be updated.
+	 */
+	if (private->uid.type !=  lcu->uac->unit[uaddr].ua_type) {
+		lcu->flags |= UPDATE_PENDING;
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "uid type mismatch - trigger rescan");
+	}
 	if (!(lcu->flags & UPDATE_PENDING)) {
 		rc = _add_device_to_lcu(lcu, device, device);
 		if (rc)
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 41e28b2..8ac27ef 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,6 +2,8 @@
 # S/390 character devices
 #
 
+CFLAGS_REMOVE_sclp_early_core.o	+= $(CC_FLAGS_EXPOLINE)
+
 obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
 	 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
 	 sclp_early.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1167469..67903c9 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -451,6 +451,7 @@
 
 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
 {
+	struct channel_path *chp;
 	struct chp_link link;
 	struct chp_id chpid;
 	int status;
@@ -463,10 +464,17 @@
 	chpid.id = sei_area->rsid;
 	/* allocate a new channel path structure, if needed */
 	status = chp_get_status(chpid);
-	if (status < 0)
-		chp_new(chpid);
-	else if (!status)
+	if (!status)
 		return;
+
+	if (status < 0) {
+		chp_new(chpid);
+	} else {
+		chp = chpid_to_chp(chpid);
+		mutex_lock(&chp->lock);
+		chp_update_desc(chp);
+		mutex_unlock(&chp->lock);
+	}
 	memset(&link, 0, sizeof(struct chp_link));
 	link.chpid = chpid;
 	if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8327d47..c46e31e 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -822,6 +822,7 @@
 
 	ccw_device_set_timeout(cdev, 0);
 	cdev->private->iretry = 255;
+	cdev->private->async_kill_io_rc = -ETIMEDOUT;
 	ret = ccw_device_cancel_halt_clear(cdev);
 	if (ret == -EBUSY) {
 		ccw_device_set_timeout(cdev, 3*HZ);
@@ -898,7 +899,7 @@
 	/* OK, i/o is dead now. Call interrupt handler. */
 	if (cdev->handler)
 		cdev->handler(cdev, cdev->private->intparm,
-			      ERR_PTR(-EIO));
+			      ERR_PTR(cdev->private->async_kill_io_rc));
 }
 
 static void
@@ -915,14 +916,16 @@
 	ccw_device_online_verify(cdev, 0);
 	if (cdev->handler)
 		cdev->handler(cdev, cdev->private->intparm,
-			      ERR_PTR(-EIO));
+			      ERR_PTR(cdev->private->async_kill_io_rc));
 }
 
 void ccw_device_kill_io(struct ccw_device *cdev)
 {
 	int ret;
 
+	ccw_device_set_timeout(cdev, 0);
 	cdev->private->iretry = 255;
+	cdev->private->async_kill_io_rc = -EIO;
 	ret = ccw_device_cancel_halt_clear(cdev);
 	if (ret == -EBUSY) {
 		ccw_device_set_timeout(cdev, 3*HZ);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 877d9f6..85b2896 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -158,7 +158,7 @@
 }
 
 /**
- * ccw_device_start_key() - start a s390 channel program with key
+ * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
  * @cdev: target ccw device
  * @cpa: logical start address of channel program
  * @intparm: user specific interruption parameter; will be presented back to
@@ -169,10 +169,15 @@
  * @key: storage key to be used for the I/O
  * @flags: additional flags; defines the action to be performed for I/O
  *	   processing.
+ * @expires: timeout value in jiffies
  *
  * Start a S/390 channel program. When the interrupt arrives, the
  * IRQ handler is called, either immediately, delayed (dev-end missing,
  * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
  * Returns:
  *  %0, if the operation was successful;
  *  -%EBUSY, if the device is busy, or status pending;
@@ -181,9 +186,9 @@
  * Context:
  *  Interrupts disabled, ccw device lock held
  */
-int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
-			 unsigned long intparm, __u8 lpm, __u8 key,
-			 unsigned long flags)
+int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+				 unsigned long intparm, __u8 lpm, __u8 key,
+				 unsigned long flags, int expires)
 {
 	struct subchannel *sch;
 	int ret;
@@ -223,6 +228,8 @@
 	switch (ret) {
 	case 0:
 		cdev->private->intparm = intparm;
+		if (expires)
+			ccw_device_set_timeout(cdev, expires);
 		break;
 	case -EACCES:
 	case -ENODEV:
@@ -233,7 +240,7 @@
 }
 
 /**
- * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
+ * ccw_device_start_key() - start a s390 channel program with key
  * @cdev: target ccw device
  * @cpa: logical start address of channel program
  * @intparm: user specific interruption parameter; will be presented back to
@@ -244,15 +251,10 @@
  * @key: storage key to be used for the I/O
  * @flags: additional flags; defines the action to be performed for I/O
  *	   processing.
- * @expires: timeout value in jiffies
  *
  * Start a S/390 channel program. When the interrupt arrives, the
  * IRQ handler is called, either immediately, delayed (dev-end missing,
  * or sense required) or never (no IRQ handler registered).
- * This function notifies the device driver if the channel program has not
- * completed during the time specified by @expires. If a timeout occurs, the
- * channel program is terminated via xsch, hsch or csch, and the device's
- * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
  * Returns:
  *  %0, if the operation was successful;
  *  -%EBUSY, if the device is busy, or status pending;
@@ -261,19 +263,12 @@
  * Context:
  *  Interrupts disabled, ccw device lock held
  */
-int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
-				 unsigned long intparm, __u8 lpm, __u8 key,
-				 unsigned long flags, int expires)
+int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
+			 unsigned long intparm, __u8 lpm, __u8 key,
+			 unsigned long flags)
 {
-	int ret;
-
-	if (!cdev)
-		return -ENODEV;
-	ccw_device_set_timeout(cdev, expires);
-	ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
-	if (ret != 0)
-		ccw_device_set_timeout(cdev, 0);
-	return ret;
+	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
+					    flags, 0);
 }
 
 /**
@@ -488,18 +483,20 @@
 EXPORT_SYMBOL(ccw_device_get_id);
 
 /**
- * ccw_device_tm_start_key() - perform start function
+ * ccw_device_tm_start_timeout_key() - perform start function
  * @cdev: ccw device on which to perform the start function
  * @tcw: transport-command word to be started
  * @intparm: user defined parameter to be passed to the interrupt handler
  * @lpm: mask of paths to use
  * @key: storage key to use for storage access
+ * @expires: time span in jiffies after which to abort request
  *
  * Start the tcw on the given ccw device. Return zero on success, non-zero
  * otherwise.
  */
-int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
-			    unsigned long intparm, u8 lpm, u8 key)
+int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
+				    unsigned long intparm, u8 lpm, u8 key,
+				    int expires)
 {
 	struct subchannel *sch;
 	int rc;
@@ -526,37 +523,32 @@
 			return -EACCES;
 	}
 	rc = cio_tm_start_key(sch, tcw, lpm, key);
-	if (rc == 0)
+	if (rc == 0) {
 		cdev->private->intparm = intparm;
+		if (expires)
+			ccw_device_set_timeout(cdev, expires);
+	}
 	return rc;
 }
-EXPORT_SYMBOL(ccw_device_tm_start_key);
+EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
 
 /**
- * ccw_device_tm_start_timeout_key() - perform start function
+ * ccw_device_tm_start_key() - perform start function
  * @cdev: ccw device on which to perform the start function
  * @tcw: transport-command word to be started
  * @intparm: user defined parameter to be passed to the interrupt handler
  * @lpm: mask of paths to use
  * @key: storage key to use for storage access
- * @expires: time span in jiffies after which to abort request
  *
  * Start the tcw on the given ccw device. Return zero on success, non-zero
  * otherwise.
  */
-int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
-				    unsigned long intparm, u8 lpm, u8 key,
-				    int expires)
+int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
+			    unsigned long intparm, u8 lpm, u8 key)
 {
-	int ret;
-
-	ccw_device_set_timeout(cdev, expires);
-	ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
-	if (ret != 0)
-		ccw_device_set_timeout(cdev, 0);
-	return ret;
+	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
 }
-EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
+EXPORT_SYMBOL(ccw_device_tm_start_key);
 
 /**
  * ccw_device_tm_start() - perform start function
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 220f491..1d98434 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -154,6 +154,7 @@
 	unsigned long intparm;	/* user interruption parameter */
 	struct qdio_irq *qdio_data;
 	struct irb irb;		/* device status */
+	int async_kill_io_rc;
 	struct senseid senseid;	/* SenseID info */
 	struct pgid pgid[8];	/* path group IDs per chpid*/
 	struct ccw1 iccws[2];	/* ccws for SNID/SID/SPGID commands */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 71bf9bd..66e9bb0 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -126,7 +126,7 @@
 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
 			int start, int count, int auto_ack)
 {
-	int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
+	int rc, tmp_count = count, tmp_start = start, nr = q->nr;
 	unsigned int ccq = 0;
 
 	qperf_inc(q, eqbs);
@@ -149,14 +149,7 @@
 		qperf_inc(q, eqbs_partial);
 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
 			tmp_count);
-		/*
-		 * Retry once, if that fails bail out and process the
-		 * extracted buffers before trying again.
-		 */
-		if (!retried++)
-			goto again;
-		else
-			return count - tmp_count;
+		return count - tmp_count;
 	}
 
 	DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
@@ -212,7 +205,10 @@
 	return 0;
 }
 
-/* returns number of examined buffers and their common state in *state */
+/*
+ * Returns number of examined buffers and their common state in *state.
+ * Requested number of buffers-to-examine must be > 0.
+ */
 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
 				 unsigned char *state, unsigned int count,
 				 int auto_ack, int merge_pending)
@@ -223,17 +219,23 @@
 	if (is_qebsm(q))
 		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
 
-	for (i = 0; i < count; i++) {
-		if (!__state) {
-			__state = q->slsb.val[bufnr];
-			if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
-				__state = SLSB_P_OUTPUT_EMPTY;
-		} else if (merge_pending) {
-			if ((q->slsb.val[bufnr] & __state) != __state)
-				break;
-		} else if (q->slsb.val[bufnr] != __state)
-			break;
+	/* get initial state: */
+	__state = q->slsb.val[bufnr];
+	if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+		__state = SLSB_P_OUTPUT_EMPTY;
+
+	for (i = 1; i < count; i++) {
 		bufnr = next_buf(bufnr);
+
+		/* merge PENDING into EMPTY: */
+		if (merge_pending &&
+		    q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
+		    __state == SLSB_P_OUTPUT_EMPTY)
+			continue;
+
+		/* stop if next state differs from initial state: */
+		if (q->slsb.val[bufnr] != __state)
+			break;
 	}
 	*state = __state;
 	return i;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 48b3866..3528690 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -140,7 +140,7 @@
 	int i;
 
 	for (i = 0; i < nr_queues; i++) {
-		q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+		q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
 		if (!q)
 			return -ENOMEM;
 
@@ -456,7 +456,6 @@
 {
 	struct ciw *ciw;
 	struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
-	int rc;
 
 	memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
 	memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,16 +492,14 @@
 	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
 	if (!ciw) {
 		DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
-		rc = -EINVAL;
-		goto out_err;
+		return -EINVAL;
 	}
 	irq_ptr->equeue = *ciw;
 
 	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
 	if (!ciw) {
 		DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
-		rc = -EINVAL;
-		goto out_err;
+		return -EINVAL;
 	}
 	irq_ptr->aqueue = *ciw;
 
@@ -510,9 +507,6 @@
 	irq_ptr->orig_handler = init_data->cdev->handler;
 	init_data->cdev->handler = qdio_int_handler;
 	return 0;
-out_err:
-	qdio_release_memory(irq_ptr);
-	return rc;
 }
 
 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 34367d1..4534a7c 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
  *
  * Debug traces for zfcp.
  *
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -287,6 +287,27 @@
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
+/**
+ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock must not be held.
+ */
+void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+			    struct zfcp_port *port, struct scsi_device *sdev,
+			    u8 want, u8 need)
+{
+	unsigned long flags;
+
+	read_lock_irqsave(&adapter->erp_lock, flags);
+	zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
+	read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
 
 /**
  * zfcp_dbf_rec_run_lvl - trace event related to running recovery
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 21c8c68..7a7984a 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
  *
  * External function declarations.
  *
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2018
  */
 
 #ifndef ZFCP_EXT_H
@@ -34,6 +34,9 @@
 extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
 extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
 			      struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+				   struct zfcp_port *port,
+				   struct scsi_device *sdev, u8 want, u8 need);
 extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
 extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
 				 struct zfcp_erp_action *erp);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index a9b8104..bb99db2 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -616,9 +616,9 @@
 	ids.port_id = port->d_id;
 	ids.roles = FC_RPORT_ROLE_FCP_TARGET;
 
-	zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
-			  ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
-			  ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+	zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
+			       ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+			       ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
 	rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
 	if (!rport) {
 		dev_err(&port->adapter->ccw_device->dev,
@@ -640,9 +640,9 @@
 	struct fc_rport *rport = port->rport;
 
 	if (rport) {
-		zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
-				  ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
-				  ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+		zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
+				       ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+				       ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
 		fc_remote_port_delete(rport);
 		port->rport = NULL;
 	}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e2962f1..fe670b6 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1374,9 +1374,10 @@
 	host = aac->scsi_host_ptr;
 	scsi_block_requests(host);
 	aac_adapter_disable_int(aac);
-	if (aac->thread->pid != current->pid) {
+	if (aac->thread && aac->thread->pid != current->pid) {
 		spin_unlock_irq(host->host_lock);
 		kthread_stop(aac->thread);
+		aac->thread = NULL;
 		jafo = 1;
 	}
 
@@ -1445,6 +1446,7 @@
 					  aac->name);
 		if (IS_ERR(aac->thread)) {
 			retval = PTR_ERR(aac->thread);
+			aac->thread = NULL;
 			goto out;
 		}
 	}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index d5b26fa..ad902a6 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1083,6 +1083,7 @@
 				up(&fib->event_wait);
 		}
 		kthread_stop(aac->thread);
+		aac->thread = NULL;
 	}
 	aac_adapter_disable_int(aac);
 	cpu = cpumask_first(cpu_online_mask);
@@ -1203,8 +1204,10 @@
 	 *	Map in the registers from the adapter.
 	 */
 	aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
-	if ((*aac_drivers[index].init)(aac))
+	if ((*aac_drivers[index].init)(aac)) {
+		error = -ENODEV;
 		goto out_unmap;
+	}
 
 	if (aac->sync_mode) {
 		if (aac_sync_mode)
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 2438879..936e8c7 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2011,7 +2011,7 @@
 		 * have valid data in the sense buffer that could
 		 * confuse the higher levels.
 		 */
-		memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+		memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 //printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
 //{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
 	/*
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index fdd4eb4..e58786f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -191,6 +191,7 @@
 	struct bnx2fc_cmd_mgr *cmd_mgr;
 	spinlock_t hba_lock;
 	struct mutex hba_mutex;
+	struct mutex hba_stats_mutex;
 	unsigned long adapter_state;
 		#define ADAPTER_STATE_UP		0
 		#define ADAPTER_STATE_GOING_DOWN	1
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f9ddb61..bee7d37 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -670,15 +670,17 @@
 	if (!fw_stats)
 		return NULL;
 
+	mutex_lock(&hba->hba_stats_mutex);
+
 	bnx2fc_stats = fc_get_host_stats(shost);
 
 	init_completion(&hba->stat_req_done);
 	if (bnx2fc_send_stat_req(hba))
-		return bnx2fc_stats;
+		goto unlock_stats_mutex;
 	rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
 	if (!rc) {
 		BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
-		return bnx2fc_stats;
+		goto unlock_stats_mutex;
 	}
 	BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
 	bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -700,6 +702,9 @@
 
 	memcpy(&hba->prev_stats, hba->stats_buffer,
 	       sizeof(struct fcoe_statistics_params));
+
+unlock_stats_mutex:
+	mutex_unlock(&hba->hba_stats_mutex);
 	return bnx2fc_stats;
 }
 
@@ -1348,6 +1353,7 @@
 	}
 	spin_lock_init(&hba->hba_lock);
 	mutex_init(&hba->hba_mutex);
+	mutex_init(&hba->hba_stats_mutex);
 
 	hba->cnic = cnic;
 
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index f501095..bd39590 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1869,6 +1869,7 @@
 		/* we will not receive ABTS response for this IO */
 		BNX2FC_IO_DBG(io_req, "Timer context finished processing "
 			   "this scsi cmd\n");
+		return;
 	}
 
 	/* Cancel the timeout_work, as we received IO completion */
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 622bdab..dab195f 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -1769,7 +1769,6 @@
 		goto bye;
 	}
 
-	mempool_free(mbp, hw->mb_mempool);
 	if (finicsum != cfcsum) {
 		csio_warn(hw,
 		      "Config File checksum mismatch: csum=%#x, computed=%#x\n",
@@ -1780,6 +1779,10 @@
 	rv = csio_hw_validate_caps(hw, mbp);
 	if (rv != 0)
 		goto bye;
+
+	mempool_free(mbp, hw->mb_mempool);
+	mbp = NULL;
+
 	/*
 	 * Note that we're operating with parameters
 	 * not supplied by the driver, rather than from hard-wired
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 4abd3fc..c2b6829 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1695,6 +1695,15 @@
 		 */
 		switch (session->state) {
 		case ISCSI_STATE_FAILED:
+			/*
+			 * cmds should fail during shutdown, if the session
+			 * state is bad, allowing completion to happen
+			 */
+			if (unlikely(system_state != SYSTEM_RUNNING)) {
+				reason = FAILURE_SESSION_FAILED;
+				sc->result = DID_NO_CONNECT << 16;
+				break;
+			}
 		case ISCSI_STATE_IN_RECOVERY:
 			reason = FAILURE_SESSION_IN_RECOVERY;
 			sc->result = DID_IMM_RETRY << 16;
@@ -1980,6 +1989,19 @@
 
 	if (session->state != ISCSI_STATE_LOGGED_IN) {
 		/*
+		 * During shutdown, if session is prematurely disconnected,
+		 * recovery won't happen and there will be hung cmds. Not
+		 * handling cmds would trigger EH, also bad in this case.
+		 * Instead, handle cmd, allow completion to happen and let
+		 * upper layer to deal with the result.
+		 */
+		if (unlikely(system_state != SYSTEM_RUNNING)) {
+			sc->result = DID_NO_CONNECT << 16;
+			ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
+			rc = BLK_EH_HANDLED;
+			goto done;
+		}
+		/*
 		 * We are probably in the middle of iscsi recovery so let
 		 * that complete and handle the error.
 		 */
@@ -2083,7 +2105,7 @@
 		task->last_timeout = jiffies;
 	spin_unlock(&session->frwd_lock);
 	ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
-		     "timer reset" : "nh");
+		     "timer reset" : "shutdown or nh");
 	return rc;
 }
 
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 022bb6e..12886f9 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -282,6 +282,7 @@
 	phy->phy->minimum_linkrate = dr->pmin_linkrate;
 	phy->phy->maximum_linkrate = dr->pmax_linkrate;
 	phy->phy->negotiated_linkrate = phy->linkrate;
+	phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
 
  skip:
 	if (new_phy)
@@ -675,7 +676,7 @@
 	res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
 			            resp, RPEL_RESP_SIZE);
 
-	if (!res)
+	if (res)
 		goto out;
 
 	phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -684,6 +685,7 @@
 	phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
 
  out:
+	kfree(req);
 	kfree(resp);
 	return res;
 
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 519dac4..9a8c2f9 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -222,6 +222,7 @@
 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
 {
 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
+	struct domain_device *dev = cmd_to_domain_dev(cmd);
 	struct sas_task *task = TO_SAS_TASK(cmd);
 
 	/* At this point, we only get called following an actual abort
@@ -230,6 +231,14 @@
 	 */
 	sas_end_task(cmd, task);
 
+	if (dev_is_sata(dev)) {
+		/* defer commands to libata so that libata EH can
+		 * handle ata qcs correctly
+		 */
+		list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
+		return;
+	}
+
 	/* now finish the command and move it on to the error
 	 * handler done list, this also takes it off the
 	 * error handler pending list.
@@ -237,22 +246,6 @@
 	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
 }
 
-static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
-{
-	struct domain_device *dev = cmd_to_domain_dev(cmd);
-	struct sas_ha_struct *ha = dev->port->ha;
-	struct sas_task *task = TO_SAS_TASK(cmd);
-
-	if (!dev_is_sata(dev)) {
-		sas_eh_finish_cmd(cmd);
-		return;
-	}
-
-	/* report the timeout to libata */
-	sas_end_task(cmd, task);
-	list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
-}
-
 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
 {
 	struct scsi_cmnd *cmd, *n;
@@ -260,7 +253,7 @@
 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
 		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
 		    cmd->device->lun == my_cmd->device->lun)
-			sas_eh_defer_cmd(cmd);
+			sas_eh_finish_cmd(cmd);
 	}
 }
 
@@ -622,12 +615,12 @@
 		case TASK_IS_DONE:
 			SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
 				    task);
-			sas_eh_defer_cmd(cmd);
+			sas_eh_finish_cmd(cmd);
 			continue;
 		case TASK_IS_ABORTED:
 			SAS_DPRINTK("%s: task 0x%p is aborted\n",
 				    __func__, task);
-			sas_eh_defer_cmd(cmd);
+			sas_eh_finish_cmd(cmd);
 			continue;
 		case TASK_IS_AT_LU:
 			SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -638,7 +631,7 @@
 					    "recovered\n",
 					    SAS_ADDR(task->dev),
 					    cmd->device->lun);
-				sas_eh_defer_cmd(cmd);
+				sas_eh_finish_cmd(cmd);
 				sas_scsi_clear_queue_lu(work_q, cmd);
 				goto Again;
 			}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4532990..cf15b97 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -635,7 +635,12 @@
 	LPFC_MBOXQ_t *pmboxq;
 	int mbxstatus = MBXERR_ERROR;
 
+	/*
+	 * If the link is offline, disabled or BLOCK_MGMT_IO
+	 * it doesn't make any sense to allow issue_lip
+	 */
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+	    (phba->hba_flag & LINK_DISABLED) ||
 	    (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
 		return -EPERM;
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7d2ad63..8173645 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -690,8 +690,9 @@
 	    (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
 		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
-			/* Set the lpfc data pending flag */
-			set_bit(LPFC_DATA_READY, &phba->data_flags);
+			/* Preserve legacy behavior. */
+			if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
+				set_bit(LPFC_DATA_READY, &phba->data_flags);
 		} else {
 			if (phba->link_state >= LPFC_LINK_UP) {
 				pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0902ed2..6df06e7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -116,6 +116,8 @@
 	/* set consumption flag every once in a while */
 	if (!((q->host_index + 1) % q->entry_repost))
 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
+	else
+		bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
 	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
 		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
 	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 7568e06..caa0045 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4065,19 +4065,6 @@
 		return 0;
 	}
 
-	/*
-	 * Bug work around for firmware SATL handling.  The loop
-	 * is based on atomic operations and ensures consistency
-	 * since we're lockless at this point
-	 */
-	do {
-		if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
-			scmd->result = SAM_STAT_BUSY;
-			scmd->scsi_done(scmd);
-			return 0;
-		}
-	} while (_scsih_set_satl_pending(scmd, true));
-
 	sas_target_priv_data = sas_device_priv_data->sas_target;
 
 	/* invalid device handle */
@@ -4103,6 +4090,19 @@
 	    sas_device_priv_data->block)
 		return SCSI_MLQUEUE_DEVICE_BUSY;
 
+	/*
+	 * Bug work around for firmware SATL handling.  The loop
+	 * is based on atomic operations and ensures consistency
+	 * since we're lockless at this point
+	 */
+	do {
+		if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+			scmd->result = SAM_STAT_BUSY;
+			scmd->scsi_done(scmd);
+			return 0;
+		}
+	} while (_scsih_set_satl_pending(scmd, true));
+
 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -4124,6 +4124,7 @@
 	if (!smid) {
 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
 		    ioc->name, __func__);
+		_scsih_set_satl_pending(scmd, false);
 		goto out;
 	}
 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -4154,6 +4155,7 @@
 	if (mpi_request->DataLength) {
 		if (ioc->build_sg_scmd(ioc, scmd, smid)) {
 			mpt3sas_base_free_smid(ioc, smid);
+			_scsih_set_satl_pending(scmd, false);
 			goto out;
 		}
 	} else
@@ -8851,7 +8853,7 @@
 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
 	    "fw_event_%s%d", ioc->driver_name, ioc->id);
 	ioc->firmware_event_thread = alloc_ordered_workqueue(
-	    ioc->firmware_event_name, WQ_MEM_RECLAIM);
+	    ioc->firmware_event_name, 0);
 	if (!ioc->firmware_event_thread) {
 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 7de5d8d..eb5471b 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -1080,16 +1080,16 @@
 			void __iomem *regs = mvi->regs_ex - 0x10200;
 
 			int drive = (i/3) & (4-1); /* drive number on host */
-			u32 block = mr32(MVS_SGPIO_DCTRL +
+			int driveshift = drive * 8; /* bit offset of drive */
+			u32 block = ioread32be(regs + MVS_SGPIO_DCTRL +
 				MVS_SGPIO_HOST_OFFSET * mvi->id);
 
-
 			/*
 			* if bit is set then create a mask with the first
 			* bit of the drive set in the mask ...
 			*/
-			u32 bit = (write_data[i/8] & (1 << (i&(8-1)))) ?
-				1<<(24-drive*8) : 0;
+			u32 bit = get_unaligned_be32(write_data) & (1 << i) ?
+				1 << driveshift : 0;
 
 			/*
 			* ... and then shift it to the right position based
@@ -1098,26 +1098,27 @@
 			switch (i%3) {
 			case 0: /* activity */
 				block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT)
-					<< (24-drive*8));
+					<< driveshift);
 					/* hardwire activity bit to SOF */
 				block |= LED_BLINKA_SOF << (
 					MVS_SGPIO_DCTRL_ACT_SHIFT +
-					(24-drive*8));
+					driveshift);
 				break;
 			case 1: /* id */
 				block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT)
-					<< (24-drive*8));
+					<< driveshift);
 				block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT;
 				break;
 			case 2: /* fail */
 				block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT)
-					<< (24-drive*8));
+					<< driveshift);
 				block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT;
 				break;
 			}
 
-			mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id,
-				block);
+			iowrite32be(block,
+				regs + MVS_SGPIO_DCTRL +
+				MVS_SGPIO_HOST_OFFSET * mvi->id);
 
 		}
 
@@ -1132,7 +1133,7 @@
 			void __iomem *regs = mvi->regs_ex - 0x10200;
 
 			mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id,
-				be32_to_cpu(((u32 *) write_data)[i]));
+				((u32 *) write_data)[i]);
 		}
 		return reg_count;
 	}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index bddaabb..73c99f2 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -272,7 +272,8 @@
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
 	/* Read all mbox registers? */
-	mboxes = (1 << ha->mbx_count) - 1;
+	WARN_ON_ONCE(ha->mbx_count > 32);
+	mboxes = (1ULL << ha->mbx_count) - 1;
 	if (!ha->mcp)
 		ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
 	else
@@ -2516,7 +2517,8 @@
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	/* Read all mbox registers? */
-	mboxes = (1 << ha->mbx_count) - 1;
+	WARN_ON_ONCE(ha->mbx_count > 32);
+	mboxes = (1ULL << ha->mbx_count) - 1;
 	if (!ha->mcp)
 		ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
 	else
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7cfc27..ce1d063f 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -168,6 +168,8 @@
 #define DEV_DB_NON_PERSISTENT	0
 #define DEV_DB_PERSISTENT	1
 
+#define QL4_ISP_REG_DISCONNECT 0xffffffffU
+
 #define COPY_ISID(dst_isid, src_isid) {			\
 	int i, j;					\
 	for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;)	\
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 01c3610..d8c0343 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -262,6 +262,24 @@
 
 static struct scsi_transport_template *qla4xxx_scsi_transport;
 
+static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
+{
+	u32 reg_val = 0;
+	int rval = QLA_SUCCESS;
+
+	if (is_qla8022(ha))
+		reg_val = readl(&ha->qla4_82xx_reg->host_status);
+	else if (is_qla8032(ha) || is_qla8042(ha))
+		reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+	else
+		reg_val = readw(&ha->reg->ctrl_status);
+
+	if (reg_val == QL4_ISP_REG_DISCONNECT)
+		rval = QLA_ERROR;
+
+	return rval;
+}
+
 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
 			     uint32_t iface_type, uint32_t payload_size,
 			     uint32_t pid, struct sockaddr *dst_addr)
@@ -9196,10 +9214,17 @@
 	struct srb *srb = NULL;
 	int ret = SUCCESS;
 	int wait = 0;
+	int rval;
 
 	ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
 		   ha->host_no, id, lun, cmd, cmd->cmnd[0]);
 
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	srb = (struct srb *) CMD_SP(cmd);
 	if (!srb) {
@@ -9251,6 +9276,7 @@
 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
 	int ret = FAILED, stat;
+	int rval;
 
 	if (!ddb_entry)
 		return ret;
@@ -9270,6 +9296,12 @@
 		      cmd, jiffies, cmd->request->timeout / HZ,
 		      ha->dpc_flags, cmd->result, cmd->allowed));
 
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
 	/* FIXME: wait for hba to go online */
 	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
 	if (stat != QLA_SUCCESS) {
@@ -9313,6 +9345,7 @@
 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
 	int stat, ret;
+	int rval;
 
 	if (!ddb_entry)
 		return FAILED;
@@ -9330,6 +9363,12 @@
 		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
 		      ha->dpc_flags, cmd->result, cmd->allowed));
 
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
 	stat = qla4xxx_reset_target(ha, ddb_entry);
 	if (stat != QLA_SUCCESS) {
 		starget_printk(KERN_INFO, scsi_target(cmd->device),
@@ -9384,9 +9423,16 @@
 {
 	int return_status = FAILED;
 	struct scsi_qla_host *ha;
+	int rval;
 
 	ha = to_qla_host(cmd->device->host);
 
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
 	if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
 		qla4_83xx_set_idc_dontreset(ha);
 
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 57a3ee0..3ccc858 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2044,6 +2044,8 @@
 	if (!shost->use_clustering)
 		q->limits.cluster = 0;
 
+	if (shost->inlinecrypt_support)
+		queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
 	/*
 	 * Set a reasonable default alignment:  The larger of 32-byte (dword),
 	 * which is a common minimum for HBAs, and the minimum DMA alignment,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4fb494a..1cb0403 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1860,6 +1860,8 @@
 				break;	/* standby */
 			if (sshdr.asc == 4 && sshdr.ascq == 0xc)
 				break;	/* unavailable */
+			if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
+				break;	/* sanitize in progress */
 			/*
 			 * Issue command to spin up drive when not ready
 			 */
@@ -2319,6 +2321,7 @@
 	int res;
 	struct scsi_device *sdp = sdkp->device;
 	struct scsi_mode_data data;
+	int disk_ro = get_disk_ro(sdkp->disk);
 
 	set_disk_ro(sdkp->disk, 0);
 	if (sdp->skip_ms_page_3f) {
@@ -2358,7 +2361,7 @@
 			  "Test WP failed, assume Write Enabled\n");
 	} else {
 		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
-		set_disk_ro(sdkp->disk, sdkp->write_prot);
+		set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
 	}
 }
 
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 3d9ad4c..15d5af0 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1897,7 +1897,7 @@
 		num = (rem_sz > scatter_elem_sz_prev) ?
 			scatter_elem_sz_prev : rem_sz;
 
-		schp->pages[k] = alloc_pages(gfp_mask, order);
+		schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
 		if (!schp->pages[k])
 			goto out;
 
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e635973..0169984 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -522,6 +522,8 @@
 	struct scsi_cd *cd;
 	int ret = -ENXIO;
 
+	check_disk_change(bdev);
+
 	mutex_lock(&sr_mutex);
 	cd = scsi_cd_get(bdev->bd_disk);
 	if (cd) {
@@ -582,18 +584,28 @@
 static unsigned int sr_block_check_events(struct gendisk *disk,
 					  unsigned int clearing)
 {
-	struct scsi_cd *cd = scsi_cd(disk);
+	unsigned int ret = 0;
+	struct scsi_cd *cd;
 
-	if (atomic_read(&cd->device->disk_events_disable_depth))
+	cd = scsi_cd_get(disk);
+	if (!cd)
 		return 0;
 
-	return cdrom_check_events(&cd->cdi, clearing);
+	if (!atomic_read(&cd->device->disk_events_disable_depth))
+		ret = cdrom_check_events(&cd->cdi, clearing);
+
+	scsi_cd_put(cd);
+	return ret;
 }
 
 static int sr_block_revalidate_disk(struct gendisk *disk)
 {
-	struct scsi_cd *cd = scsi_cd(disk);
 	struct scsi_sense_hdr sshdr;
+	struct scsi_cd *cd;
+
+	cd = scsi_cd_get(disk);
+	if (!cd)
+		return -ENXIO;
 
 	/* if the unit is not ready, nothing more to do */
 	if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
@@ -602,6 +614,7 @@
 	sr_cd_check(&cd->cdi);
 	get_sectorsize(cd);
 out:
+	scsi_cd_put(cd);
 	return 0;
 }
 
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 0dd1984..d92b280 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1580,7 +1580,7 @@
 	.eh_timed_out =		storvsc_eh_timed_out,
 	.slave_alloc =		storvsc_device_alloc,
 	.slave_configure =	storvsc_device_configure,
-	.cmd_per_lun =		255,
+	.cmd_per_lun =		2048,
 	.this_id =		-1,
 	.use_clustering =	ENABLE_CLUSTERING,
 	/* Make sure we dont get a sg segment crosses a page boundary */
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 6b349e3..c6425e3 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -536,7 +536,7 @@
 	 *  Look for the greatest clock divisor that allows an 
 	 *  input speed faster than the period.
 	 */
-	while (div-- > 0)
+	while (--div > 0)
 		if (kpc >= (div_10M[div] << 2)) break;
 
 	/*
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index b1c86d4..d4fe6ee 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -172,17 +172,15 @@
 static void ufs_qcom_ice_cfg_work(struct work_struct *work)
 {
 	unsigned long flags;
-	struct ice_data_setting ice_set;
 	struct ufs_qcom_host *qcom_host =
 		container_of(work, struct ufs_qcom_host, ice_cfg_work);
-	struct request *req_pending = NULL;
 
 	if (!qcom_host->ice.vops->config_start)
 		return;
 
 	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
-	req_pending = qcom_host->req_pending;
-	if (!req_pending) {
+	if (!qcom_host->req_pending) {
+		qcom_host->work_pending = false;
 		spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
 		return;
 	}
@@ -191,24 +189,15 @@
 	/*
 	 * config_start is called again as previous attempt returned -EAGAIN,
 	 * this call shall now take care of the necessary key setup.
-	 * 'ice_set' will not actually be used, instead the next call to
-	 * config_start() for this request, in the normal call flow, will
-	 * succeed as the key has now been setup.
 	 */
 	qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
-		qcom_host->req_pending, &ice_set, false);
+		qcom_host->req_pending, NULL, false);
 
 	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
 	qcom_host->req_pending = NULL;
+	qcom_host->work_pending = false;
 	spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
 
-	/*
-	 * Resume with requests processing. We assume config_start has been
-	 * successful, but even if it wasn't we still must resume in order to
-	 * allow for the request to be retried.
-	 */
-	ufshcd_scsi_unblock_requests(qcom_host->hba);
-
 }
 
 /**
@@ -294,18 +283,14 @@
 			 * requires a non-atomic context, this means we should
 			 * call the function again from the worker thread to do
 			 * the configuration. For this request the error will
-			 * propagate so it will be re-queued and until the
-			 * configuration is is completed we block further
-			 * request processing.
+			 * propagate so it will be re-queued.
 			 */
 			if (err == -EAGAIN) {
 				dev_dbg(qcom_host->hba->dev,
 					"%s: scheduling task for ice setup\n",
 					__func__);
 
-				if (!qcom_host->req_pending) {
-					ufshcd_scsi_block_requests(
-						qcom_host->hba);
+				if (!qcom_host->work_pending) {
 					qcom_host->req_pending = cmd->request;
 
 					if (!queue_work(ice_workqueue,
@@ -316,10 +301,9 @@
 						&qcom_host->ice_work_lock,
 						flags);
 
-						ufshcd_scsi_unblock_requests(
-							qcom_host->hba);
 						return err;
 					}
+					qcom_host->work_pending = true;
 				}
 
 			} else {
@@ -418,9 +402,7 @@
 			 * requires a non-atomic context, this means we should
 			 * call the function again from the worker thread to do
 			 * the configuration. For this request the error will
-			 * propagate so it will be re-queued and until the
-			 * configuration is is completed we block further
-			 * request processing.
+			 * propagate so it will be re-queued.
 			 */
 			if (err == -EAGAIN) {
 
@@ -428,9 +410,8 @@
 					"%s: scheduling task for ice setup\n",
 					__func__);
 
-				if (!qcom_host->req_pending) {
-					ufshcd_scsi_block_requests(
-						qcom_host->hba);
+				if (!qcom_host->work_pending) {
+
 					qcom_host->req_pending = cmd->request;
 					if (!queue_work(ice_workqueue,
 						&qcom_host->ice_cfg_work)) {
@@ -440,10 +421,9 @@
 						&qcom_host->ice_work_lock,
 						flags);
 
-						ufshcd_scsi_unblock_requests(
-							qcom_host->hba);
 						return err;
 					}
+					qcom_host->work_pending = true;
 				}
 
 			} else {
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 39ab28a..93c3af0 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -897,11 +897,18 @@
 		req = lrbp->cmd->request;
 	else
 		return 0;
-
-	/* Use request LBA as the DUN value */
-	if (req->bio)
-		*dun = (req->bio->bi_iter.bi_sector) >>
-				UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+	/*
+	 * Right now ICE do not support variable dun but can be
+	 * taken as future enhancement
+	 * if (bio_dun(req->bio)) {
+	 *      dun @bio can be split, so we have to adjust offset
+	 *      *dun = bio_dun(req->bio);
+	 * } else
+	 */
+	if (req->bio) {
+		*dun = req->bio->bi_iter.bi_sector;
+		*dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+	}
 
 	ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
 
@@ -2133,6 +2140,8 @@
 		dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
 			__func__, err);
 		goto out_host_free;
+	} else {
+		hba->host->inlinecrypt_support = 1;
 	}
 
 	host->generic_phy = devm_phy_get(dev, "ufsphy");
@@ -2740,7 +2749,7 @@
 	 * the regulators.
 	 */
 	if (of_property_read_bool(np, "non-removable") &&
-	    strlen(android_boot_dev) &&
+	    !of_property_read_bool(np, "force-ufshc-probe") &&
 	    strcmp(android_boot_dev, dev_name(dev)))
 		return -ENODEV;
 
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index a03ecb0..27f6a05 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -375,6 +375,7 @@
 	struct work_struct ice_cfg_work;
 	struct request *req_pending;
 	struct ufs_vreg *vddp_ref_clk;
+	bool work_pending;
 };
 
 static inline u32
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index d427fb3..c6cfd18 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -5433,7 +5433,7 @@
 	/* REPORT SUPPORTED OPERATION CODES is not supported */
 	sdev->no_report_opcodes = 1;
 
-	/* WRITE_SAME command is not supported*/
+	/* WRITE_SAME command is not supported */
 	sdev->no_write_same = 1;
 
 	ufshcd_set_queue_depth(sdev);
@@ -6559,8 +6559,8 @@
 	u32 mode;
 
 	hba = container_of(work, struct ufs_hba, rls_work);
-	ufshcd_scsi_block_requests(hba);
 	pm_runtime_get_sync(hba->dev);
+	ufshcd_scsi_block_requests(hba);
 	down_write(&hba->lock);
 	ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 	if (ret) {
@@ -7090,7 +7090,10 @@
 	 * To avoid these unnecessary/illegal step we skip to the last error
 	 * handling stage: reset and restore.
 	 */
-	if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+	if ((lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) ||
+	    (lrbp->lun == UFS_UPIU_REPORT_LUNS_WLUN) ||
+	    (lrbp->lun == UFS_UPIU_BOOT_WLUN) ||
+	    (lrbp->lun == UFS_UPIU_RPMB_WLUN))
 		return ufshcd_eh_host_reset_handler(cmd);
 
 	ufshcd_hold_all(hba);
@@ -8093,9 +8096,16 @@
 	/*
 	 * If we failed to initialize the device or the device is not
 	 * present, turn off the power/clocks etc.
+	 * In cases when there's both ufs and emmc present and regualtors
+	 * are shared b/w the two, this shouldn't turn-off the regulators
+	 * w/o giving emmc a chance to send PON.
+	 * Hence schedule a delayed suspend, thus giving enough time to
+	 * emmc to vote for the shared regulator.
 	 */
-	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress)
-		pm_runtime_put_sync(hba->dev);
+	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+		pm_runtime_put_noidle(hba->dev);
+		pm_schedule_suspend(hba->dev, MSEC_PER_SEC * 10);
+	}
 
 	trace_ufshcd_init(dev_name(hba->dev), ret,
 		ktime_to_us(ktime_sub(ktime_get(), start)),
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8f4adc1..cbc8e93 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -819,6 +819,7 @@
 	.change_queue_depth = virtscsi_change_queue_depth,
 	.eh_abort_handler = virtscsi_abort,
 	.eh_device_reset_handler = virtscsi_device_reset,
+	.slave_alloc = virtscsi_device_alloc,
 
 	.can_queue = 1024,
 	.dma_boundary = UINT_MAX,
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 26dfd3f..34be230 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -214,6 +214,8 @@
 						dsp);
 	struct pd_qmi_client_data *reg;
 
+	/* Resetting the log level */
+	SLIM_RST_LOGLVL(dev);
 	SLIM_INFO(dev, "SLIM DSP SSR/PDR notify cb:0x%lx, type:%d\n",
 			code, dsp->dom_t);
 	switch (code) {
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index 24a3ccf..8f4851e 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -182,8 +182,12 @@
 	if (!ctrl_dev->iommu_desc.cb_dev)
 		return 0;
 
-	if (!IS_ERR_OR_NULL(ctrl_dev->iommu_desc.iommu_map))
-		return 0;
+	if (!IS_ERR_OR_NULL(ctrl_dev->iommu_desc.iommu_map)) {
+		arm_iommu_detach_device(ctrl_dev->iommu_desc.cb_dev);
+		arm_iommu_release_mapping(ctrl_dev->iommu_desc.iommu_map);
+		ctrl_dev->iommu_desc.iommu_map = NULL;
+		SLIM_INFO(ctrl_dev, "NGD IOMMU Dettach complete\n");
+	}
 
 	dev = ctrl_dev->iommu_desc.cb_dev;
 	iommu_map = arm_iommu_create_mapping(&platform_bus_type,
@@ -1300,12 +1304,6 @@
 			msm_slim_disconn_pipe_port(dev, i);
 	}
 
-	if (!IS_ERR_OR_NULL(dev->iommu_desc.iommu_map)) {
-		arm_iommu_detach_device(dev->iommu_desc.cb_dev);
-		arm_iommu_release_mapping(dev->iommu_desc.iommu_map);
-		dev->iommu_desc.iommu_map = NULL;
-	}
-
 	if (dereg) {
 		for (i = 0; i < dev->port_nums; i++) {
 			if (dev->pipes[i].connected)
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5fafaca..3ecca59 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -684,6 +684,17 @@
 	  determines last CPU to call into PSCI for cluster Low power
 	  modes.
 
+config MSM_PM_LEGACY
+	depends on PM
+	select MSM_IDLE_STATS if DEBUG_FS
+	select CPU_IDLE_MULTIPLE_DRIVERS
+	bool "Qualcomm platform specific Legacy PM driver"
+	help
+	  Platform specific legacy power driver to manage
+	  cores and l2 low power modes. It interface with
+	  various system driver and put the cores into
+	  low power modes.
+
 config MSM_NOPM
 	default y if !PM
 	bool
@@ -700,7 +711,7 @@
 	  trusted apps, unloading them and marshalling buffers to the
 	  trusted fingerprint app.
 
-if MSM_PM
+if (MSM_PM || MSM_PM_LEGACY)
 menuconfig MSM_IDLE_STATS
 	bool "Collect idle statistics"
 	help
@@ -733,7 +744,7 @@
 	  histogram.  This is for collecting statistics on suspend.
 
 endif # MSM_IDLE_STATS
-endif # MSM_PM
+endif # MSM_PM || MSM_PM_LEGACY
 
 config QCOM_DCC_V2
 	bool "Qualcomm Technologies Data Capture and Compare engine support for V2"
@@ -868,3 +879,11 @@
 		interrupt event and event data.
 
 source "drivers/soc/qcom/wcnss/Kconfig"
+
+config BIG_CLUSTER_MIN_FREQ_ADJUST
+	bool "Adjust BIG cluster min frequency based on power collapse state"
+	default n
+	help
+	  This driver is used to set the floor of the min frequency of big cluster
+	  to the user specified value when the cluster is not power collapsed. When
+	  the cluster is power collpsed it resets the value to physical limits.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index b83f554..0b71121 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_QCOM_SMD) +=	smd.o
 obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
 obj-$(CONFIG_QCOM_SMEM) +=	smem.o
+obj-$(CONFIG_MSM_PM_LEGACY) += pm-boot.o msm-pm.o
 obj-$(CONFIG_MSM_SPM) += msm-spm.o spm_devices.o
 obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
 obj-$(CONFIG_QCOM_SMP2P)	+= smp2p.o
@@ -105,3 +106,4 @@
 obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
 obj-$(CONFIG_MSM_BAM_DMUX) += bam_dmux.o
 obj-$(CONFIG_WCNSS_CORE) += wcnss/
+obj-$(CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST) += big_cluster_min_freq_adjust.o
diff --git a/drivers/soc/qcom/bg_rsb.c b/drivers/soc/qcom/bg_rsb.c
index fdfd7b7..ffba372 100644
--- a/drivers/soc/qcom/bg_rsb.c
+++ b/drivers/soc/qcom/bg_rsb.c
@@ -33,7 +33,7 @@
 
 #define BGRSB_GLINK_INTENT_SIZE 0x04
 #define BGRSB_MSG_SIZE 0x08
-#define TIMEOUT_MS 500
+#define TIMEOUT_MS 2000
 
 #define BGRSB_LDO15_VTG_MIN_UV 3300000
 #define BGRSB_LDO15_VTG_MAX_UV 3300000
@@ -122,6 +122,8 @@
 
 	struct device *ldev;
 
+	struct wakeup_source bgrsb_ws;
+
 	wait_queue_head_t link_state_wait;
 
 	uint32_t calbrtion_intrvl;
@@ -462,6 +464,7 @@
 	if (!dev->chnl_state)
 		return -ENODEV;
 
+	__pm_stay_awake(&dev->bgrsb_ws);
 	mutex_lock(&dev->glink_mutex);
 	init_completion(&dev->tx_done);
 	init_completion(&dev->bg_resp_cmplt);
@@ -507,6 +510,7 @@
 
 err_ret:
 	mutex_unlock(&dev->glink_mutex);
+	__pm_relax(&dev->bgrsb_ws);
 	return rc;
 }
 
@@ -544,7 +548,7 @@
 
 		rc = wait_event_timeout(dev->link_state_wait,
 				(dev->chnl_state == true),
-					msecs_to_jiffies(TIMEOUT_MS*2));
+					msecs_to_jiffies(TIMEOUT_MS));
 		if (rc == 0) {
 			pr_err("Glink channel connection time out\n");
 			return;
@@ -574,7 +578,7 @@
 
 		rc = wait_event_timeout(dev->link_state_wait,
 					(dev->chnl_state == true),
-						msecs_to_jiffies(TIMEOUT_MS*2));
+						msecs_to_jiffies(TIMEOUT_MS));
 		if (rc == 0) {
 			pr_err("Glink channel connection time out\n");
 			return;
@@ -904,6 +908,9 @@
 	if (!dev)
 		return -ENOMEM;
 
+	/* Add wake lock for PM suspend */
+	wakeup_source_init(&dev->bgrsb_ws, "BGRSB_wake_lock");
+
 	dev->bgrsb_current_state = BGRSB_STATE_UNKNOWN;
 	rc = bgrsb_init(dev);
 	if (rc)
@@ -964,13 +971,14 @@
 	destroy_workqueue(dev->bgrsb_event_wq);
 	destroy_workqueue(dev->bgrsb_wq);
 	input_free_device(dev->input);
+	wakeup_source_trash(&dev->bgrsb_ws);
 
 	return 0;
 }
 
-static int bg_rsb_resume(struct platform_device *pdev)
+static int bg_rsb_resume(struct device *pldev)
 {
-	int rc;
+	struct platform_device *pdev = to_platform_device(pldev);
 	struct bgrsb_priv *dev = platform_get_drvdata(pdev);
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_RSB_CONFIGURED)
@@ -978,12 +986,6 @@
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_INIT) {
 		if (bgrsb_ldo_work(dev, BGRSB_ENABLE_LDO11) == 0) {
-			rc = bgrsb_configr_rsb(dev, true);
-			if (rc != 0) {
-				pr_err("BG failed to configure RSB %d\n", rc);
-				bgrsb_ldo_work(dev, BGRSB_DISABLE_LDO11);
-				return rc;
-			}
 			dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
 			pr_debug("RSB Cofigured\n");
 			return 0;
@@ -993,8 +995,9 @@
 	return -EINVAL;
 }
 
-static int bg_rsb_suspend(struct platform_device *pdev, pm_message_t state)
+static int bg_rsb_suspend(struct device *pldev)
 {
+	struct platform_device *pdev = to_platform_device(pldev);
 	struct bgrsb_priv *dev = platform_get_drvdata(pdev);
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_INIT)
@@ -1021,15 +1024,19 @@
 	{ }
 };
 
+static const struct dev_pm_ops pm_rsb = {
+	.resume		= bg_rsb_resume,
+	.suspend	= bg_rsb_suspend,
+};
+
 static struct platform_driver bg_rsb_driver = {
 	.driver = {
 		.name = "bg-rsb",
 		.of_match_table = bg_rsb_of_match,
+		.pm = &pm_rsb,
 	},
 	.probe		= bg_rsb_probe,
 	.remove		= bg_rsb_remove,
-	.resume		= bg_rsb_resume,
-	.suspend	= bg_rsb_suspend,
 };
 
 module_platform_driver(bg_rsb_driver);
diff --git a/drivers/soc/qcom/bgcom_interface.c b/drivers/soc/qcom/bgcom_interface.c
index efef26d..1cde8c6 100644
--- a/drivers/soc/qcom/bgcom_interface.c
+++ b/drivers/soc/qcom/bgcom_interface.c
@@ -43,7 +43,8 @@
 #define BGDAEMON_LDO03_LPM_VTG 0
 #define BGDAEMON_LDO03_NPM_VTG 10000
 
-#define MPPS_DOWN_EVENT_TO_BG_TIMEOUT 100
+#define MPPS_DOWN_EVENT_TO_BG_TIMEOUT 3000
+#define SLEEP_FOR_SPI_BUS 2000
 
 enum {
 	SSR_DOMAIN_BG,
@@ -93,6 +94,7 @@
 static  dev_t                    bg_dev;
 static  int                      device_open;
 static  void                     *handle;
+static	bool                     twm_exit;
 static  struct   bgcom_open_config_type   config_type;
 static DECLARE_COMPLETION(bg_modem_down_wait);
 
@@ -353,6 +355,8 @@
 		break;
 	case SET_SPI_BUSY:
 		ret = bgcom_set_spi_state(BGCOM_SPI_BUSY);
+		/* Add sleep for  SPI Bus to release*/
+		msleep(SLEEP_FOR_SPI_BUS);
 		break;
 	case BG_SOFT_RESET:
 		ret = bg_soft_reset();
@@ -360,6 +364,10 @@
 	case BG_MODEM_DOWN2_BG_DONE:
 		ret = modem_down2_bg();
 		break;
+	case BG_TWM_EXIT:
+		twm_exit = true;
+		ret = 0;
+		break;
 	default:
 		ret = -ENOIOCTLCMD;
 	}
@@ -515,6 +523,10 @@
 		bgcom_set_spi_state(BGCOM_SPI_BUSY);
 		send_uevent(&bge);
 		break;
+	case SUBSYS_AFTER_SHUTDOWN:
+		/* Add sleep for  SPI Bus to release*/
+		msleep(SLEEP_FOR_SPI_BUS);
+		break;
 	case SUBSYS_AFTER_POWERUP:
 		bge.e_type = BG_AFTER_POWER_UP;
 		bgdaemon_ldowork(DISABLE_LDO03);
@@ -555,6 +567,16 @@
 	return NOTIFY_DONE;
 }
 
+bool is_twm_exit(void)
+{
+	if (twm_exit) {
+		twm_exit = false;
+		return true;
+	}
+	return false;
+}
+EXPORT_SYMBOL(is_twm_exit);
+
 static struct notifier_block ssr_modem_nb = {
 	.notifier_call = ssr_modem_cb,
 	.priority = 0,
diff --git a/drivers/soc/qcom/bgcom_interface.h b/drivers/soc/qcom/bgcom_interface.h
index 500ca6d..235995e 100644
--- a/drivers/soc/qcom/bgcom_interface.h
+++ b/drivers/soc/qcom/bgcom_interface.h
@@ -13,10 +13,17 @@
 #ifndef BGCOM_INTERFACE_H
 #define BGCOM_INTERFACE_H
 
-/**
+/*
  * bg_soft_reset() - soft reset Blackghost
  * Return 0 on success or -Ve on error
  */
 int bg_soft_reset(void);
 
+/*
+ * is_twm_exit()
+ * Return true if device is booting up on TWM exit.
+ * value is auto cleared once read.
+ */
+bool is_twm_exit(void);
+
 #endif /* BGCOM_INTERFACE_H */
diff --git a/drivers/soc/qcom/bgcom_spi.c b/drivers/soc/qcom/bgcom_spi.c
index b8e3b84..d2dc05f 100644
--- a/drivers/soc/qcom/bgcom_spi.c
+++ b/drivers/soc/qcom/bgcom_spi.c
@@ -25,6 +25,7 @@
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
 #include <linux/kthread.h>
+#include <linux/dma-mapping.h>
 #include "bgcom.h"
 #include "bgrsb.h"
 #include "bgcom_interface.h"
@@ -43,12 +44,11 @@
 
 #define BG_SPI_MAX_WORDS (0x3FFFFFFD)
 #define BG_SPI_MAX_REGS (0x0A)
-#define SLEEP_IN_STATE_CHNG 2000
 #define HED_EVENT_ID_LEN (0x02)
 #define HED_EVENT_SIZE_LEN (0x02)
 #define HED_EVENT_DATA_STRT_LEN (0x05)
 
-#define MAX_RETRY 200
+#define MAX_RETRY 500
 
 enum bgcom_state {
 	/*BGCOM Staus ready*/
@@ -63,6 +63,7 @@
 	BGCOM_READ_REG = 0,
 	BGCOM_READ_FIFO = 1,
 	BGCOM_READ_AHB = 2,
+	BGCOM_WRITE_REG = 3,
 };
 
 struct bg_spi_priv {
@@ -112,6 +113,17 @@
 
 static struct mutex bg_resume_mutex;
 
+static atomic_t  bg_is_spi_active;
+static int bg_irq;
+
+static struct spi_device *get_spi_device(void)
+{
+	struct bg_spi_priv *bg_spi = container_of(bg_com_drv,
+						struct bg_spi_priv, lhandle);
+	struct spi_device *spi = bg_spi->spi;
+	return spi;
+}
+
 static void augmnt_fifo(uint8_t *data, int pos)
 {
 	data[pos] = '\0';
@@ -149,8 +161,6 @@
 
 	mutex_lock(&bg_spi->xfer_mutex);
 	spi_state = state;
-	if (spi_state == BGCOM_SPI_BUSY)
-		msleep(SLEEP_IN_STATE_CHNG);
 	mutex_unlock(&bg_spi->xfer_mutex);
 	return 0;
 }
@@ -197,6 +207,10 @@
 	case BGCOM_READ_FIFO:
 		ret = bgcom_fifo_read(&clnt_handle, no_of_words, buf);
 		break;
+	case BGCOM_WRITE_REG:
+		ret = bgcom_reg_write(&clnt_handle, BG_CMND_REG,
+					no_of_words, buf);
+		break;
 	case BGCOM_READ_AHB:
 		break;
 	}
@@ -232,6 +246,9 @@
 	tx_xfer = &bg_spi->xfer1;
 	spi = bg_spi->spi;
 
+	if (!atomic_read(&bg_is_spi_active))
+		return -ECANCELED;
+
 	mutex_lock(&bg_spi->xfer_mutex);
 	bg_spi_reinit_xfer(tx_xfer);
 	tx_xfer->tx_buf = tx_buf;
@@ -443,6 +460,7 @@
 int bgcom_ahb_read(void *handle, uint32_t ahb_start_addr,
 	uint32_t num_words, void *read_buf)
 {
+	dma_addr_t dma_hndl_tx, dma_hndl_rx;
 	uint32_t txn_len;
 	uint8_t *tx_buf;
 	uint8_t *rx_buf;
@@ -450,6 +468,7 @@
 	int ret;
 	uint8_t cmnd = 0;
 	uint32_t ahb_addr = 0;
+	struct spi_device *spi = get_spi_device();
 
 	if (!handle || !read_buf || num_words == 0
 		|| num_words > BG_SPI_MAX_WORDS) {
@@ -472,15 +491,16 @@
 	size = num_words*BG_SPI_WORD_SIZE;
 	txn_len = BG_SPI_AHB_READ_CMD_LEN + size;
 
-	tx_buf = kzalloc(txn_len, GFP_KERNEL);
 
+	tx_buf = dma_zalloc_coherent(&spi->dev, txn_len,
+					&dma_hndl_tx, GFP_KERNEL);
 	if (!tx_buf)
 		return -ENOMEM;
 
-	rx_buf = kzalloc(txn_len, GFP_KERNEL);
-
+	rx_buf = dma_zalloc_coherent(&spi->dev, txn_len,
+					&dma_hndl_rx, GFP_KERNEL);
 	if (!rx_buf) {
-		kfree(tx_buf);
+		dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl_tx);
 		return -ENOMEM;
 	}
 
@@ -495,8 +515,8 @@
 	if (!ret)
 		memcpy(read_buf, rx_buf+BG_SPI_AHB_READ_CMD_LEN, size);
 
-	kfree(tx_buf);
-	kfree(rx_buf);
+	dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl_tx);
+	dma_free_coherent(&spi->dev, txn_len, rx_buf, dma_hndl_rx);
 	return ret;
 }
 EXPORT_SYMBOL(bgcom_ahb_read);
@@ -504,12 +524,14 @@
 int bgcom_ahb_write(void *handle, uint32_t ahb_start_addr,
 	uint32_t num_words, void *write_buf)
 {
+	dma_addr_t dma_hndl;
 	uint32_t txn_len;
 	uint8_t *tx_buf;
 	uint32_t size;
 	int ret;
 	uint8_t cmnd = 0;
 	uint32_t ahb_addr = 0;
+	struct spi_device *spi = get_spi_device();
 
 	if (!handle || !write_buf || num_words == 0
 		|| num_words > BG_SPI_MAX_WORDS) {
@@ -532,9 +554,7 @@
 
 	size = num_words*BG_SPI_WORD_SIZE;
 	txn_len = BG_SPI_AHB_CMD_LEN + size;
-
-	tx_buf = kzalloc(txn_len, GFP_KERNEL);
-
+	tx_buf = dma_zalloc_coherent(&spi->dev, txn_len, &dma_hndl, GFP_KERNEL);
 	if (!tx_buf)
 		return -ENOMEM;
 
@@ -546,7 +566,7 @@
 	memcpy(tx_buf+BG_SPI_AHB_CMD_LEN, write_buf, size);
 
 	ret = bgcom_transfer(handle, tx_buf, NULL, txn_len);
-	kfree(tx_buf);
+	dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl);
 	return ret;
 }
 EXPORT_SYMBOL(bgcom_ahb_write);
@@ -621,6 +641,11 @@
 		return -EBUSY;
 	}
 
+	if (bgcom_resume(handle)) {
+		pr_err("Failed to resume\n");
+		return -EBUSY;
+	}
+
 	size = num_words*BG_SPI_WORD_SIZE;
 	txn_len = BG_SPI_READ_LEN + size;
 	tx_buf = kzalloc(txn_len, GFP_KERNEL | GFP_ATOMIC);
@@ -671,6 +696,11 @@
 		return -EBUSY;
 	}
 
+	if (bgcom_resume(handle)) {
+		pr_err("Failed to resume\n");
+		return -EBUSY;
+	}
+
 	size = num_regs*BG_SPI_WORD_SIZE;
 	txn_len = BG_SPI_WRITE_CMND_LEN + size;
 
@@ -749,11 +779,18 @@
 	uint8_t rx_buf[8] = {0};
 	uint32_t cmnd_reg = 0;
 
+	if (spi_state == BGCOM_SPI_BUSY) {
+		printk_ratelimited("SPI is held by TZ\n");
+		goto ret_err;
+	}
+
 	txn_len = 0x08;
 	tx_buf[0] = 0x05;
 	ret = bgcom_transfer(handle, tx_buf, rx_buf, txn_len);
 	if (!ret)
 		memcpy(&cmnd_reg, rx_buf+BG_SPI_READ_LEN, 0x04);
+
+ret_err:
 	return cmnd_reg & BIT(31);
 }
 
@@ -766,6 +803,9 @@
 	if (handle == NULL)
 		return -EINVAL;
 
+	if (!atomic_read(&bg_is_spi_active))
+		return -ECANCELED;
+
 	cntx = (struct bg_context *)handle;
 	bg_spi = cntx->bg_spi;
 
@@ -789,36 +829,15 @@
 		bg_soft_reset();
 		return -ETIMEDOUT;
 	}
-	pr_info("BG retries for wake up : %d\n", retry);
 	return 0;
 }
 EXPORT_SYMBOL(bgcom_resume);
 
 int bgcom_suspend(void *handle)
 {
-	struct bg_spi_priv *bg_spi;
-	struct bg_context *cntx;
-	uint32_t cmnd_reg = 0;
-	int ret = 0;
-
-	if (handle == NULL)
+	if (!handle)
 		return -EINVAL;
-
-	cntx = (struct bg_context *)handle;
-	bg_spi = cntx->bg_spi;
-	mutex_lock(&bg_resume_mutex);
-	if (bg_spi->bg_state == BGCOM_STATE_SUSPEND)
-		goto unlock;
-
-	cmnd_reg |= BIT(31);
-	ret = bgcom_reg_write(handle, BG_CMND_REG, 1, &cmnd_reg);
-	if (ret == 0)
-		bg_spi->bg_state = BGCOM_STATE_SUSPEND;
-
-unlock:
-	mutex_unlock(&bg_resume_mutex);
-	pr_info("suspended with : %d\n", ret);
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL(bgcom_suspend);
 
@@ -931,7 +950,6 @@
 	struct bg_spi_priv *bg_spi;
 	struct device_node *node;
 	int irq_gpio = 0;
-	int bg_irq = 0;
 	int ret;
 
 	bg_spi = devm_kzalloc(&spi->dev, sizeof(*bg_spi),
@@ -969,6 +987,8 @@
 	if (ret)
 		goto err_ret;
 
+	atomic_set(&bg_is_spi_active, 1);
+	dma_set_coherent_mask(&spi->dev, DMA_BIT_MASK(64));
 	pr_info("Bgcom Probed successfully\n");
 	return ret;
 
@@ -990,6 +1010,46 @@
 	return 0;
 }
 
+static int bgcom_pm_suspend(struct device *dev)
+{
+	uint32_t cmnd_reg = 0;
+	struct spi_device *s_dev = to_spi_device(dev);
+	struct bg_spi_priv *bg_spi = spi_get_drvdata(s_dev);
+	int ret = 0;
+
+	if (bg_spi->bg_state == BGCOM_STATE_SUSPEND)
+		return 0;
+
+	cmnd_reg |= BIT(31);
+	ret = read_bg_locl(BGCOM_WRITE_REG, 1, &cmnd_reg);
+	if (ret == 0) {
+		bg_spi->bg_state = BGCOM_STATE_SUSPEND;
+		atomic_set(&bg_is_spi_active, 0);
+	}
+	pr_info("suspended with : %d\n", ret);
+	return ret;
+}
+
+static int bgcom_pm_resume(struct device *dev)
+{
+	struct bg_context clnt_handle;
+	int ret;
+	struct bg_spi_priv *spi =
+		container_of(bg_com_drv, struct bg_spi_priv, lhandle);
+
+	clnt_handle.bg_spi = spi;
+	atomic_set(&bg_is_spi_active, 1);
+	ret = bgcom_resume(&clnt_handle);
+	if (ret == 0)
+		pr_info("Bgcom resumed\n");
+	return ret;
+}
+
+static const struct dev_pm_ops bgcom_pm = {
+	.suspend = bgcom_pm_suspend,
+	.resume = bgcom_pm_resume,
+};
+
 static const struct of_device_id bg_spi_of_match[] = {
 	{ .compatible = "qcom,bg-spi", },
 	{ }
@@ -1000,6 +1060,7 @@
 	.driver = {
 		.name = "bg-spi",
 		.of_match_table = bg_spi_of_match,
+		.pm = &bgcom_pm,
 	},
 	.probe = bg_spi_probe,
 	.remove = bg_spi_remove,
diff --git a/drivers/soc/qcom/big_cluster_min_freq_adjust.c b/drivers/soc/qcom/big_cluster_min_freq_adjust.c
new file mode 100644
index 0000000..979dd81
--- /dev/null
+++ b/drivers/soc/qcom/big_cluster_min_freq_adjust.c
@@ -0,0 +1,324 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)     "big_min_freq_adjust: " fmt
+
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/moduleparam.h>
+
+enum min_freq_adjust {
+	ADJUST_MIN_FLOOR,	/* Set min floor to user supplied value */
+	RESET_MIN_FLOOR,	/* Reset min floor cpuinfo value */
+};
+
+struct big_min_freq_adjust_data {
+	struct cpumask cluster_cpumask;
+	unsigned int min_freq_floor;
+	struct delayed_work min_freq_work;
+	unsigned long min_down_delay_jiffies;
+	enum min_freq_adjust min_freq_state;
+	enum min_freq_adjust min_freq_request;
+	spinlock_t lock;
+	bool big_min_freq_on;
+	bool is_init;
+};
+static struct big_min_freq_adjust_data big_min_freq_adjust_data;
+
+static void cpufreq_min_freq_work(struct work_struct *work)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+
+	spin_lock(&p->lock);
+	if (p->min_freq_state == p->min_freq_request) {
+		spin_unlock(&p->lock);
+		return;
+	}
+	spin_unlock(&p->lock);
+	cpufreq_update_policy(cpumask_first(&p->cluster_cpumask));
+}
+
+static int cpufreq_callback(struct notifier_block *nb, unsigned long val,
+				void *data)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+	struct cpufreq_policy *policy = data;
+	unsigned int min_freq_floor;
+
+	if (p->big_min_freq_on == false)
+		return NOTIFY_DONE;
+
+	if (val != CPUFREQ_ADJUST)
+		return NOTIFY_DONE;
+
+	if (!cpumask_test_cpu(cpumask_first(&p->cluster_cpumask),
+				policy->related_cpus))
+		return NOTIFY_DONE;
+
+	spin_lock(&p->lock);
+	if (p->min_freq_request == ADJUST_MIN_FLOOR)
+		min_freq_floor = p->min_freq_floor;
+	else
+		min_freq_floor = policy->cpuinfo.min_freq;
+	cpufreq_verify_within_limits(policy, min_freq_floor,
+			policy->cpuinfo.max_freq);
+	p->min_freq_state = p->min_freq_request;
+	spin_unlock(&p->lock);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_nb = {
+	.notifier_call = cpufreq_callback
+};
+
+#define AFFINITY_LEVEL_L2 1
+static int cpu_pm_callback(struct notifier_block *self,
+			       unsigned long cmd, void *v)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+	unsigned long aff_level = (unsigned long) v;
+	unsigned long delay;
+	int cpu;
+
+	if (p->big_min_freq_on == false)
+		return NOTIFY_DONE;
+
+	if (aff_level != AFFINITY_LEVEL_L2)
+		return NOTIFY_DONE;
+
+	cpu = smp_processor_id();
+
+	if (!cpumask_test_cpu(cpu, &p->cluster_cpumask))
+		return NOTIFY_DONE;
+
+	spin_lock(&p->lock);
+	switch (cmd) {
+	case CPU_CLUSTER_PM_ENTER:
+		p->min_freq_request = RESET_MIN_FLOOR;
+		delay = p->min_down_delay_jiffies;
+		break;
+	case CPU_CLUSTER_PM_ENTER_FAILED:
+	case CPU_CLUSTER_PM_EXIT:
+		p->min_freq_request = ADJUST_MIN_FLOOR;
+		/* To avoid unnecessary oscillations between exit and idle */
+		delay = 1;
+		break;
+	default:
+		spin_unlock(&p->lock);
+		return NOTIFY_DONE;
+	}
+
+	cancel_delayed_work(&p->min_freq_work);
+
+	if (p->min_freq_state != p->min_freq_request) {
+		if (p->min_freq_request == ADJUST_MIN_FLOOR) {
+			if (p->min_freq_floor > cpufreq_quick_get(cpu))
+				delay = 0;
+		}
+		queue_delayed_work(system_unbound_wq, &p->min_freq_work, delay);
+	}
+	spin_unlock(&p->lock);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_pm_nb = {
+	.notifier_call = cpu_pm_callback
+};
+
+static unsigned long __read_mostly big_min_down_delay_ms;
+#define MIN_DOWN_DELAY_MSEC 80 /* Default big_min_down_delay in msec */
+#define POLICY_MIN 1094400 /* Default min_freq_floor in KHz */
+
+static void trigger_state_machine(void *d)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+	bool *update_policy = d;
+
+	if (p->min_freq_request != ADJUST_MIN_FLOOR) {
+		p->min_freq_request = ADJUST_MIN_FLOOR;
+		*update_policy = true;
+	}
+}
+
+static int enable_big_min_freq_adjust(void)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+	int ret;
+	bool update_policy = false;
+
+	if (p->big_min_freq_on == true)
+		return 0;
+
+	if (!cpumask_weight(&p->cluster_cpumask)) {
+		pr_err("Cluster CPU IDs not set\n");
+		return -EPERM;
+	}
+
+	INIT_DEFERRABLE_WORK(&p->min_freq_work, cpufreq_min_freq_work);
+
+	if (!big_min_down_delay_ms) {
+		big_min_down_delay_ms = MIN_DOWN_DELAY_MSEC;
+		p->min_down_delay_jiffies = msecs_to_jiffies(
+				big_min_down_delay_ms);
+	}
+	if (!p->min_freq_floor)
+		p->min_freq_floor = POLICY_MIN;
+
+	ret = cpu_pm_register_notifier(&cpu_pm_nb);
+	if (ret) {
+		pr_err("Failed to register for PM notification\n");
+		return ret;
+	}
+
+	ret = cpufreq_register_notifier(&cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
+	if (ret) {
+		pr_err("Failed to register for CPUFREQ POLICY notification\n");
+		cpu_pm_unregister_notifier(&cpu_pm_nb);
+		return ret;
+	}
+
+	p->min_freq_state = RESET_MIN_FLOOR;
+	p->min_freq_request = RESET_MIN_FLOOR;
+	spin_lock_init(&p->lock);
+	p->big_min_freq_on = true;
+
+	/* If BIG cluster is active at this time and continue to be active
+	 * forever, in that case min frequency of the cluster will never be
+	 * set to floor value.  This is to trigger the state machine and set
+	 * the min freq and  min_freq_state to appropriate values.
+	 *
+	 * Two possibilities here.
+	 * 1) If cluster is idle before this, the wakeup is unnecessary but
+	 * the state machine is set to proper state.
+	 * 2) If cluster is active before this, the wakeup is necessary and
+	 * the state machine is set to proper state.
+	 */
+	smp_call_function_any(&p->cluster_cpumask,
+			trigger_state_machine, &update_policy, true);
+	if (update_policy)
+		cpufreq_update_policy(cpumask_first(&p->cluster_cpumask));
+
+	pr_info("big min freq ajustment enabled\n");
+
+	return 0;
+}
+
+static bool __read_mostly big_min_freq_adjust_enabled;
+
+static int set_big_min_freq_adjust(const char *buf,
+		const struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_set_bool_enable_only(buf, kp);
+	if (ret) {
+		pr_err("Unable to set big_min_freq_adjust_enabled: %d\n", ret);
+		return ret;
+	}
+
+	if (!big_min_freq_adjust_data.is_init)
+		return ret;
+
+	return enable_big_min_freq_adjust();
+}
+
+static const struct kernel_param_ops param_ops_big_min_freq_adjust = {
+	.set = set_big_min_freq_adjust,
+	.get = param_get_bool,
+};
+module_param_cb(min_freq_adjust, &param_ops_big_min_freq_adjust,
+		&big_min_freq_adjust_enabled, 0644);
+
+module_param_named(min_freq_floor, big_min_freq_adjust_data.min_freq_floor,
+		uint, 0644);
+
+static int set_min_down_delay_ms(const char *buf, const struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_set_ulong(buf, kp);
+	if (ret) {
+		pr_err("Unable to set big_min_down_delay_ms: %d\n", ret);
+		return ret;
+	}
+
+	big_min_freq_adjust_data.min_down_delay_jiffies = msecs_to_jiffies(
+			big_min_down_delay_ms);
+
+	return 0;
+}
+
+static const struct kernel_param_ops param_ops_big_min_down_delay_ms = {
+	.set = set_min_down_delay_ms,
+	.get = param_get_ulong,
+};
+module_param_cb(min_down_delay_ms, &param_ops_big_min_down_delay_ms,
+		&big_min_down_delay_ms, 0644);
+
+#define MAX_STR_LEN 16
+static char big_min_freq_cluster[MAX_STR_LEN];
+static struct kparam_string big_min_freq_cluster_kps = {
+	.string = big_min_freq_cluster,
+	.maxlen = MAX_STR_LEN,
+};
+
+static int set_big_min_freq_cluster(const char *buf,
+		const struct kernel_param *kp)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+	int ret;
+
+	if (p->big_min_freq_on == true) {
+		ret = -EPERM;
+		goto err;
+	}
+
+	ret = param_set_copystring(buf, kp);
+	if (ret)
+		goto err;
+
+	ret = cpulist_parse(big_min_freq_cluster_kps.string,
+			&p->cluster_cpumask);
+	if (ret) {
+		cpumask_clear(&p->cluster_cpumask);
+		goto err;
+	}
+
+	return 0;
+err:
+	pr_err("Unable to set big_min_freq_cluster: %d\n", ret);
+	return ret;
+}
+
+static const struct kernel_param_ops param_ops_big_min_freq_cluster = {
+	.set = set_big_min_freq_cluster,
+	.get = param_get_string,
+};
+module_param_cb(min_freq_cluster, &param_ops_big_min_freq_cluster,
+		&big_min_freq_cluster_kps, 0644);
+
+static int __init big_min_freq_adjust_init(void)
+{
+	big_min_freq_adjust_data.is_init = true;
+	if (!big_min_freq_adjust_enabled)
+		return 0;
+
+	return enable_big_min_freq_adjust();
+}
+late_initcall(big_min_freq_adjust_init);
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index c9dc547..23fde2e 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -365,6 +365,7 @@
 	res.start = readl_relaxed(dict);
 	res.end = res.start + readl_relaxed(dict + 0x4);
 	res.flags = IORESOURCE_MEM;
+	res.name = NULL;
 	iounmap(dict);
 
 	start_addr = devm_ioremap_resource(&pdev->dev, &res);
diff --git a/drivers/soc/qcom/dcc.c b/drivers/soc/qcom/dcc.c
index a687286..bef0757 100644
--- a/drivers/soc/qcom/dcc.c
+++ b/drivers/soc/qcom/dcc.c
@@ -731,7 +731,8 @@
 
 	mutex_lock(&drvdata->mutex);
 
-	if (!len) {
+	/* Check the len to avoid allocate huge memory */
+	if (!len || len > (drvdata->ram_size / 8)) {
 		dev_err(drvdata->dev, "DCC: Invalid length!\n");
 		ret = -EINVAL;
 		goto err;
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index b8deec1..c976f17 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1775,6 +1775,18 @@
 			return tx_pkt;
 		}
 	}
+	list_for_each_entry(tx_pkt, &ctx->tx_active, list_node) {
+		if (tx_pkt->riid == riid) {
+			if (tx_pkt->size_remaining) {
+				GLINK_ERR_CH(ctx, "%s: R[%u] TX not complete",
+						__func__, riid);
+				tx_pkt = NULL;
+			}
+			spin_unlock_irqrestore(
+				&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+			return tx_pkt;
+		}
+	}
 	spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
 
 	GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found.\n",
@@ -1817,6 +1829,20 @@
 			return;
 		}
 	}
+	list_for_each_entry_safe(local_tx_pkt, tmp_tx_pkt,
+				&ctx->tx_active, list_node) {
+		if (tx_pkt == local_tx_pkt) {
+			list_del_init(&tx_pkt->list_done);
+			GLINK_DBG_CH(ctx,
+				"%s: R[%u] Removed Tx packet for intent\n",
+				__func__,
+				tx_pkt->riid);
+			rwref_put(&tx_pkt->pkt_ref);
+			spin_unlock_irqrestore(
+				&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+			return;
+		}
+	}
 	spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
 
 	GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found", __func__,
@@ -5578,12 +5604,6 @@
 		tx_info = list_first_entry(&ctx->tx_active,
 				struct glink_core_tx_pkt, list_node);
 		rwref_get(&tx_info->pkt_ref);
-
-		spin_lock(&ctx->tx_pending_rmt_done_lock_lhc4);
-		if (list_empty(&tx_info->list_done))
-			list_add(&tx_info->list_done,
-				 &ctx->tx_pending_remote_done);
-		spin_unlock(&ctx->tx_pending_rmt_done_lock_lhc4);
 		spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
 
 		if (unlikely(tx_info->tracer_pkt)) {
@@ -5648,9 +5668,16 @@
 		txd_len += tx_len;
 		if (!tx_info->size_remaining) {
 			num_pkts++;
+			spin_lock(&ctx->tx_pending_rmt_done_lock_lhc4);
 			list_del_init(&tx_info->list_node);
+			if (list_empty(&tx_info->list_done))
+				list_add(&tx_info->list_done,
+					&ctx->tx_pending_remote_done);
+			rwref_put(&tx_info->pkt_ref);
+			spin_unlock(&ctx->tx_pending_rmt_done_lock_lhc4);
+		} else {
+			rwref_put(&tx_info->pkt_ref);
 		}
-		rwref_put(&tx_info->pkt_ref);
 	}
 
 	ctx->txd_len += txd_len;
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index ca9953a..5640666 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -949,6 +949,12 @@
 		return;
 	}
 
+	if (!einfo->rx_fifo) {
+		if (!get_rx_fifo(einfo))
+			return;
+		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+	}
+
 	if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
 		(waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
 		tx_wakeup_worker(einfo);
@@ -1554,10 +1560,10 @@
 	struct edge_info *einfo;
 
 	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	einfo->in_ssr = false;
 	if (!einfo->rx_fifo) {
 		if (!get_rx_fifo(einfo))
 			return;
-		einfo->in_ssr = false;
 		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
 	}
 }
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 683b074..b8e9268 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2113,6 +2113,7 @@
 
 	set_bit(ICNSS_WLFW_EXISTS, &penv->state);
 	clear_bit(ICNSS_FW_DOWN, &penv->state);
+	icnss_ignore_qmi_timeout(false);
 
 	penv->wlfw_clnt = qmi_handle_create(icnss_qmi_wlfw_clnt_notify, penv);
 	if (!penv->wlfw_clnt) {
@@ -2450,8 +2451,13 @@
 	int ret = 0;
 	struct icnss_event_pd_service_down_data *event_data = data;
 
-	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
+	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
+		icnss_ignore_qmi_timeout(false);
 		goto out;
+	}
+
+	if (priv->force_err_fatal)
+		ICNSS_ASSERT(0);
 
 	if (priv->early_crash_ind) {
 		icnss_pr_dbg("PD Down ignored as early indication is processed: %d, state: 0x%lx\n",
@@ -2467,16 +2473,11 @@
 		goto out;
 	}
 
-	if (priv->force_err_fatal)
-		ICNSS_ASSERT(0);
-
 	icnss_fw_crashed(priv, event_data);
 
 out:
 	kfree(data);
 
-	icnss_ignore_qmi_timeout(false);
-
 	return ret;
 }
 
@@ -2485,15 +2486,16 @@
 {
 	int ret = 0;
 
-	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
+	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
+		icnss_ignore_qmi_timeout(false);
 		goto out;
+	}
 
 	priv->early_crash_ind = true;
 	icnss_fw_crashed(priv, NULL);
 
 out:
 	kfree(data);
-	icnss_ignore_qmi_timeout(false);
 
 	return ret;
 }
@@ -3178,7 +3180,8 @@
 	if (!dev)
 		return -ENODEV;
 
-	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+	if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
+	    !test_bit(ICNSS_FW_READY, &penv->state)) {
 		icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n",
 			     penv->state);
 		return -EINVAL;
@@ -3277,7 +3280,8 @@
 	if (!dev)
 		return -ENODEV;
 
-	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+	if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
+	    !test_bit(ICNSS_FW_READY, &penv->state)) {
 		icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n",
 			     penv->state);
 		return -EINVAL;
diff --git a/drivers/soc/qcom/idle.h b/drivers/soc/qcom/idle.h
new file mode 100644
index 0000000..2f852c3
--- /dev/null
+++ b/drivers/soc/qcom/idle.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2007-2009,2012-2014, 2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_IDLE_H_
+#define _ARCH_ARM_MACH_MSM_IDLE_H_
+
+#define MAX_CPUS_PER_CLUSTER	4
+#define MAX_NUM_CLUSTER	4
+
+#ifndef __ASSEMBLY__
+#if defined(CONFIG_CPU_V7) || defined(CONFIG_ARM64)
+extern unsigned long msm_pm_boot_vector[MAX_NUM_CLUSTER * MAX_CPUS_PER_CLUSTER];
+void msm_pm_boot_entry(void);
+#else
+static inline void msm_pm_boot_entry(void) {}
+#endif
+#endif
+#endif
diff --git a/drivers/soc/qcom/jtagv8-etm.c b/drivers/soc/qcom/jtagv8-etm.c
index 3f4b8bc..23cbb7b 100644
--- a/drivers/soc/qcom/jtagv8-etm.c
+++ b/drivers/soc/qcom/jtagv8-etm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -345,7 +345,7 @@
 						       TRCCNTVRn(j));
 		}
 		/* resource selection registers */
-		for (j = 0; j < etmdata->nr_resource; j++)
+		for (j = 0; j < etmdata->nr_resource * 2; j++)
 			etmdata->state[i++] = etm_readl(etmdata, TRCRSCTLRn(j));
 		/* comparator registers */
 		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
@@ -448,7 +448,7 @@
 			etm_writel(etmdata, etmdata->state[i++], TRCCNTVRn(j));
 		}
 		/* resource selection registers */
-		for (j = 0; j < etmdata->nr_resource; j++)
+		for (j = 0; j < etmdata->nr_resource * 2; j++)
 			etm_writel(etmdata, etmdata->state[i++], TRCRSCTLRn(j));
 		/* comparator registers */
 		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
@@ -932,7 +932,7 @@
 		for (j = 0; j < etmdata->nr_cntr; j++)
 			i = etm_read_crxr(etmdata->state, i, j);
 		/* resource selection registers */
-		for (j = 0; j < etmdata->nr_resource; j++)
+		for (j = 0; j < etmdata->nr_resource * 2; j++)
 			i = etm_read_rsxr(etmdata->state, i, j + 2);
 		/* comparator registers */
 		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
@@ -1387,7 +1387,7 @@
 		for (j = 0; j < etmdata->nr_cntr; j++)
 			i = etm_write_crxr(etmdata->state, i, j);
 		/* resource selection registers */
-		for (j = 0; j < etmdata->nr_resource; j++)
+		for (j = 0; j < etmdata->nr_resource * 2; j++)
 			i = etm_write_rsxr(etmdata->state, i, j + 2);
 		/* comparator registers */
 		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
@@ -1496,7 +1496,7 @@
 	val = etm_readl(etmdata, TRCIDR4);
 	etmdata->nr_addr_cmp = BMVAL(val, 0, 3);
 	etmdata->nr_data_cmp = BMVAL(val, 4, 7);
-	etmdata->nr_resource = BMVAL(val, 16, 19);
+	etmdata->nr_resource = BMVAL(val, 16, 19) + 1;
 	etmdata->nr_ss_cmp = BMVAL(val, 20, 23);
 	etmdata->nr_ctxid_cmp = BMVAL(val, 24, 27);
 	etmdata->nr_vmid_cmp = BMVAL(val, 28, 31);
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
index 696c043..6542861 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.c
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -406,6 +406,7 @@
 				memblock[i].peripheral ==
 				DHMS_MEM_PROC_MPSS_V01 &&
 				!memblock[i].guarantee &&
+				!memblock[i].client_request &&
 				memblock[i].allotted &&
 				!memblock[i].alloc_request) {
 				pr_debug("memshare: hypervisor unmapping  for client id: %d\n",
@@ -665,9 +666,10 @@
 					__func__);
 		flag = 1;
 	} else if (!memblock[client_id].guarantee &&
-					memblock[client_id].allotted) {
-		pr_debug("memshare: %s: size: %d",
-				__func__, memblock[client_id].size);
+				!memblock[client_id].client_request &&
+				memblock[client_id].allotted) {
+		pr_debug("memshare: %s:client_id:%d - size: %d",
+				__func__, client_id, memblock[client_id].size);
 		ret = hyp_assign_phys(memblock[client_id].phy_addr,
 				memblock[client_id].size, source_vmlist, 1,
 				dest_vmids, dest_perms, 1);
@@ -676,8 +678,8 @@
 		 * This is an error case as hyp mapping was successful
 		 * earlier but during unmap it lead to failure.
 		 */
-			pr_err("memshare: %s, failed to unmap the region\n",
-				__func__);
+			pr_err("memshare: %s, failed to unmap the region for client id:%d\n",
+				__func__, client_id);
 		}
 		size = memblock[client_id].size;
 		if (memblock[client_id].client_id == 1) {
@@ -696,8 +698,8 @@
 			attrs);
 		free_client(client_id);
 	} else {
-		pr_err("memshare: %s, Request came for a guaranteed client cannot free up the memory\n",
-						__func__);
+		pr_err("memshare: %s, Request came for a guaranteed client (client_id: %d) cannot free up the memory\n",
+						__func__, client_id);
 	}
 
 	if (flag) {
@@ -992,6 +994,10 @@
 							pdev->dev.of_node,
 							"qcom,allocate-boot-time");
 
+	memblock[num_clients].client_request = of_property_read_bool(
+							pdev->dev.of_node,
+							"qcom,allocate-on-request");
+
 	rc = of_property_read_string(pdev->dev.of_node, "label",
 						&name);
 	if (rc) {
diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h
index 6b54652..908f091 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.h
+++ b/drivers/soc/qcom/memshare/msm_memshare.h
@@ -41,6 +41,8 @@
 	uint32_t allotted;
 	/* Memory allocation request received or not */
 	uint32_t alloc_request;
+	/* Allocation on request from a client*/
+	uint32_t client_request;
 	/* Size required for client */
 	uint32_t size;
 	/*
diff --git a/drivers/soc/qcom/microdump_collector.c b/drivers/soc/qcom/microdump_collector.c
index 4a22b4d..6e9da60 100644
--- a/drivers/soc/qcom/microdump_collector.c
+++ b/drivers/soc/qcom/microdump_collector.c
@@ -48,9 +48,9 @@
 		crash_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size_reason
 				, 0, SMEM_ANY_HOST_FLAG);
 		if (IS_ERR_OR_NULL(crash_reason)) {
-			pr_err("%s: Error in getting SMEM_reason pointer\n",
-				__func__);
-			return -ENODEV;
+			pr_info("%s: smem %d not available\n",
+				__func__, SMEM_SSR_REASON_MSS0);
+			goto out;
 		}
 
 		segment[0].v_address = crash_reason;
@@ -58,9 +58,9 @@
 
 		crash_data = smem_get_entry(smem_id, &size_data, SMEM_MODEM, 0);
 		if (IS_ERR_OR_NULL(crash_data)) {
-			pr_err("%s: Error in getting SMEM_data pointer\n",
-				__func__);
-			return -ENODEV;
+			pr_info("%s: smem %d not available\n ",
+				__func__, smem_id);
+			goto out;
 		}
 
 		segment[1].v_address = crash_data;
@@ -68,10 +68,11 @@
 
 		ret = do_ramdump(drv->microdump_dev, segment, 2);
 		if (ret)
-			pr_err("%s: do_ramdump() failed\n", __func__);
+			pr_info("%s: do_ramdump() failed\n", __func__);
 	}
 
-	return ret;
+out:
+	return NOTIFY_OK;
 }
 
 static int microdump_modem_ssr_register_notifier(struct microdump_data *drv)
diff --git a/drivers/soc/qcom/msm-pm.c b/drivers/soc/qcom/msm-pm.c
new file mode 100644
index 0000000..129ebce
--- /dev/null
+++ b/drivers/soc/qcom/msm-pm.c
@@ -0,0 +1,921 @@
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/smp.h>
+#include <linux/tick.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/msm-bus.h>
+#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/scm-boot.h>
+#include <asm/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/system_misc.h>
+#ifdef CONFIG_VFP
+#include <asm/vfp.h>
+#endif
+#include <soc/qcom/jtag.h>
+#include "pm-boot.h"
+#include "idle.h"
+
+#define SCM_CMD_TERMINATE_PC	(0x2)
+#define SCM_CMD_CORE_HOTPLUGGED (0x10)
+#define SCM_FLUSH_FLAG_MASK	(0x3)
+
+#define SCLK_HZ (32768)
+
+#define MAX_BUF_SIZE  1024
+
+static int msm_pm_debug_mask = 1;
+module_param_named(
+	debug_mask, msm_pm_debug_mask, int, 0664
+);
+
+enum {
+	MSM_PM_DEBUG_SUSPEND = BIT(0),
+	MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
+	MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
+	MSM_PM_DEBUG_CLOCK = BIT(3),
+	MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
+	MSM_PM_DEBUG_IDLE = BIT(5),
+	MSM_PM_DEBUG_IDLE_LIMITS = BIT(6),
+	MSM_PM_DEBUG_HOTPLUG = BIT(7),
+};
+
+enum msm_pc_count_offsets {
+	MSM_PC_ENTRY_COUNTER,
+	MSM_PC_EXIT_COUNTER,
+	MSM_PC_FALLTHRU_COUNTER,
+	MSM_PC_UNUSED,
+	MSM_PC_NUM_COUNTERS,
+};
+
+static bool msm_pm_ldo_retention_enabled = true;
+static bool msm_pm_tz_flushes_cache;
+static bool msm_pm_ret_no_pll_switch;
+static bool msm_no_ramp_down_pc;
+static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
+static DEFINE_PER_CPU(struct clk *, cpu_clks);
+static struct clk *l2_clk;
+
+static long *msm_pc_debug_counters;
+
+static cpumask_t retention_cpus;
+static DEFINE_SPINLOCK(retention_lock);
+static DEFINE_MUTEX(msm_pc_debug_mutex);
+
+static bool msm_pm_is_L1_writeback(void)
+{
+	u32 cache_id = 0;
+
+#if defined(CONFIG_CPU_V7)
+	u32 sel = 0;
+
+	asm volatile ("mcr p15, 2, %[ccselr], c0, c0, 0\n\t"
+		      "isb\n\t"
+		      "mrc p15, 1, %[ccsidr], c0, c0, 0\n\t"
+		      :[ccsidr]"=r" (cache_id)
+		      :[ccselr]"r" (sel)
+		     );
+	return cache_id & BIT(30);
+#elif defined(CONFIG_ARM64)
+	u32 sel = 0;
+
+	asm volatile("msr csselr_el1, %[ccselr]\n\t"
+		     "isb\n\t"
+		     "mrs %[ccsidr],ccsidr_el1\n\t"
+		     :[ccsidr]"=r" (cache_id)
+		     :[ccselr]"r" (sel)
+		    );
+	return cache_id & BIT(30);
+#else
+#error No valid CPU arch selected
+#endif
+}
+
+static bool msm_pm_swfi(bool from_idle)
+{
+	msm_arch_idle();
+	return true;
+}
+
+static bool msm_pm_retention(bool from_idle)
+{
+	int ret = 0;
+	unsigned int cpu = smp_processor_id();
+	struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+
+	spin_lock(&retention_lock);
+
+	if (!msm_pm_ldo_retention_enabled)
+		goto bailout;
+
+	cpumask_set_cpu(cpu, &retention_cpus);
+	spin_unlock(&retention_lock);
+
+	if (!msm_pm_ret_no_pll_switch)
+		clk_disable(cpu_clk);
+
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_RETENTION, false);
+	WARN_ON(ret);
+
+	msm_arch_idle();
+
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+	WARN_ON(ret);
+
+	if (!msm_pm_ret_no_pll_switch)
+		if (clk_enable(cpu_clk))
+			pr_err("%s(): Error restore cpu clk\n", __func__);
+
+	spin_lock(&retention_lock);
+	cpumask_clear_cpu(cpu, &retention_cpus);
+bailout:
+	spin_unlock(&retention_lock);
+	return true;
+}
+
+static inline void msm_pc_inc_debug_count(uint32_t cpu,
+		enum msm_pc_count_offsets offset)
+{
+	int cntr_offset;
+	uint32_t cluster_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+	uint32_t cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
+
+	if (cluster_id >= MAX_NUM_CLUSTER || cpu_id >= MAX_CPUS_PER_CLUSTER)
+		WARN_ON(cpu);
+
+	cntr_offset = (cluster_id * MAX_CPUS_PER_CLUSTER * MSM_PC_NUM_COUNTERS)
+			 + (cpu_id * MSM_PC_NUM_COUNTERS) + offset;
+
+	if (!msm_pc_debug_counters)
+		return;
+
+	msm_pc_debug_counters[cntr_offset]++;
+}
+
+static bool msm_pm_pc_hotplug(void)
+{
+	uint32_t cpu = smp_processor_id();
+	enum msm_pm_l2_scm_flag flag;
+	struct scm_desc desc;
+
+	flag = lpm_cpu_pre_pc_cb(cpu);
+
+	if (!msm_pm_tz_flushes_cache) {
+		if (flag == MSM_SCM_L2_OFF)
+			flush_cache_all();
+		else if (msm_pm_is_L1_writeback())
+			flush_cache_louis();
+	}
+
+	msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
+
+	if (is_scm_armv8()) {
+		desc.args[0] = SCM_CMD_CORE_HOTPLUGGED |
+			       (flag & SCM_FLUSH_FLAG_MASK);
+		desc.arginfo = SCM_ARGS(1);
+		scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+				 SCM_CMD_TERMINATE_PC), &desc);
+	} else {
+		scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
+		SCM_CMD_CORE_HOTPLUGGED | (flag & SCM_FLUSH_FLAG_MASK));
+	}
+
+	/* Should not return here */
+	msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
+	return 0;
+}
+
+static bool msm_pm_fastpc(bool from_idle)
+{
+	int ret = 0;
+	unsigned int cpu = smp_processor_id();
+
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_FASTPC, false);
+	WARN_ON(ret);
+
+	if (from_idle || cpu_online(cpu))
+		msm_arch_idle();
+	else
+		msm_pm_pc_hotplug();
+
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+	WARN_ON(ret);
+
+	return true;
+}
+
+int msm_pm_collapse(unsigned long unused)
+{
+	uint32_t cpu = smp_processor_id();
+	enum msm_pm_l2_scm_flag flag;
+	struct scm_desc desc;
+
+	flag = lpm_cpu_pre_pc_cb(cpu);
+
+	if (!msm_pm_tz_flushes_cache) {
+		if (flag == MSM_SCM_L2_OFF)
+			flush_cache_all();
+		else if (msm_pm_is_L1_writeback())
+			flush_cache_louis();
+	}
+	msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
+
+	if (is_scm_armv8()) {
+		desc.args[0] = flag;
+		desc.arginfo = SCM_ARGS(1);
+		scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+				 SCM_CMD_TERMINATE_PC), &desc);
+	} else {
+		scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC, flag);
+	}
+
+	msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_pm_collapse);
+
+static bool __ref msm_pm_spm_power_collapse(
+	unsigned int cpu, int mode, bool from_idle, bool notify_rpm)
+{
+	void *entry;
+	bool collapsed = 0;
+	int ret;
+	bool save_cpu_regs = (cpu_online(cpu) || from_idle);
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: notify_rpm %d\n",
+			cpu, __func__, (int) notify_rpm);
+
+	ret = msm_spm_set_low_power_mode(mode, notify_rpm);
+	WARN_ON(ret);
+
+	entry = save_cpu_regs ?  cpu_resume : msm_secondary_startup;
+
+	msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
+
+	if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: program vector to %pk\n",
+			cpu, __func__, entry);
+
+	msm_jtag_save_state();
+
+	collapsed = save_cpu_regs ?
+		!cpu_suspend(0, msm_pm_collapse) : msm_pm_pc_hotplug();
+
+	msm_jtag_restore_state();
+
+	if (collapsed)
+		local_fiq_enable();
+
+	msm_pm_boot_config_after_pc(cpu);
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
+			cpu, __func__, collapsed);
+
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+	WARN_ON(ret);
+	return collapsed;
+}
+
+static bool msm_pm_power_collapse_standalone(
+		bool from_idle)
+{
+	unsigned int cpu = smp_processor_id();
+	bool collapsed;
+
+	collapsed = msm_pm_spm_power_collapse(cpu,
+			MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE,
+			from_idle, false);
+
+	return collapsed;
+}
+
+static int ramp_down_last_cpu(int cpu)
+{
+	struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+	int ret = 0;
+
+	clk_disable(cpu_clk);
+	clk_disable(l2_clk);
+
+	return ret;
+}
+
+static int ramp_up_first_cpu(int cpu, int saved_rate)
+{
+	struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+	int rc = 0;
+
+	if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: restore clock rate\n",
+				cpu, __func__);
+
+	clk_enable(l2_clk);
+
+	if (cpu_clk) {
+		int ret = clk_enable(cpu_clk);
+
+		if (ret) {
+			pr_err("%s(): Error restoring cpu clk\n",
+					__func__);
+			return ret;
+		}
+	}
+
+	return rc;
+}
+
+static bool msm_pm_power_collapse(bool from_idle)
+{
+	unsigned int cpu = smp_processor_id();
+	unsigned long saved_acpuclk_rate = 0;
+	bool collapsed;
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: idle %d\n",
+			cpu, __func__, (int)from_idle);
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
+
+	if (cpu_online(cpu) && !msm_no_ramp_down_pc)
+		saved_acpuclk_rate = ramp_down_last_cpu(cpu);
+
+	collapsed = msm_pm_spm_power_collapse(cpu, MSM_SPM_MODE_POWER_COLLAPSE,
+			from_idle, true);
+
+	if (cpu_online(cpu) && !msm_no_ramp_down_pc)
+		ramp_up_first_cpu(cpu, saved_acpuclk_rate);
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: post power up\n", cpu, __func__);
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: return\n", cpu, __func__);
+	return collapsed;
+}
+/******************************************************************************
+ * External Idle/Suspend Functions
+ *****************************************************************************/
+
+static void arch_idle(void) {}
+
+static bool (*execute[MSM_PM_SLEEP_MODE_NR])(bool idle) = {
+	[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = msm_pm_swfi,
+	[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
+		msm_pm_power_collapse_standalone,
+	[MSM_PM_SLEEP_MODE_RETENTION] = msm_pm_retention,
+	[MSM_PM_SLEEP_MODE_FASTPC] = msm_pm_fastpc,
+	[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = msm_pm_power_collapse,
+};
+
+/**
+ * msm_cpu_pm_enter_sleep(): Enter a low power mode on current cpu
+ *
+ * @mode - sleep mode to enter
+ * @from_idle - bool to indicate that the mode is exercised during idle/suspend
+ *
+ * returns none
+ *
+ * The code should be with interrupts disabled and on the core on which the
+ * low power is to be executed.
+ *
+ */
+bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle)
+{
+	bool exit_stat = false;
+	unsigned int cpu = smp_processor_id();
+
+	if ((!from_idle  && cpu_online(cpu))
+			|| (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask))
+		pr_info("CPU%u:%s mode:%d during %s\n", cpu, __func__,
+				mode, from_idle ? "idle" : "suspend");
+
+	if (execute[mode])
+		exit_stat = execute[mode](from_idle);
+
+	return exit_stat;
+}
+
+/**
+ * msm_pm_wait_cpu_shutdown() - Wait for a core to be power collapsed during
+ *				hotplug
+ *
+ * @ cpu - cpu to wait on.
+ *
+ * Blocking function call that waits on the core to be power collapsed. This
+ * function is called from platform_cpu_die to ensure that a core is power
+ * collapsed before sending the CPU_DEAD notification so the drivers could
+ * remove the resource votes for this CPU(regulator and clock)
+ */
+int msm_pm_wait_cpu_shutdown(unsigned int cpu)
+{
+	int timeout = 0;
+
+	if (!msm_pm_slp_sts)
+		return 0;
+	if (!msm_pm_slp_sts[cpu].base_addr)
+		return 0;
+	while (1) {
+		/*
+		 * Check for the SPM of the core being hotplugged to set
+		 * its sleep state.The SPM sleep state indicates that the
+		 * core has been power collapsed.
+		 */
+		int acc_sts = __raw_readl(msm_pm_slp_sts[cpu].base_addr);
+
+		if (acc_sts & msm_pm_slp_sts[cpu].mask)
+			return 0;
+
+		udelay(100);
+		/*
+		 * Dump spm registers for debugging
+		 */
+		if (++timeout == 20) {
+			msm_spm_dump_regs(cpu);
+			__WARN_printf(
+			"CPU%u didn't collapse in 2ms, sleep status: 0x%x\n",
+								cpu, acc_sts);
+		}
+	}
+
+	return -EBUSY;
+}
+
+static void msm_pm_ack_retention_disable(void *data)
+{
+	/*
+	 * This is a NULL function to ensure that the core has woken up
+	 * and is safe to disable retention.
+	 */
+}
+/**
+ * msm_pm_enable_retention() - Disable/Enable retention on all cores
+ * @enable: Enable/Disable retention
+ *
+ */
+void msm_pm_enable_retention(bool enable)
+{
+	if (enable == msm_pm_ldo_retention_enabled)
+		return;
+
+	msm_pm_ldo_retention_enabled = enable;
+
+	/*
+	 * If retention is being disabled, wakeup all online core to ensure
+	 * that it isn't executing retention. Offlined cores need not be woken
+	 * up as they enter the deepest sleep mode, namely RPM assited power
+	 * collapse
+	 */
+	if (!enable) {
+		preempt_disable();
+		smp_call_function_many(&retention_cpus,
+				msm_pm_ack_retention_disable,
+				NULL, true);
+		preempt_enable();
+	}
+}
+EXPORT_SYMBOL(msm_pm_enable_retention);
+
+/**
+ * msm_pm_retention_enabled() - Check if retention is enabled
+ *
+ * returns true if retention is enabled
+ */
+bool msm_pm_retention_enabled(void)
+{
+	return msm_pm_ldo_retention_enabled;
+}
+EXPORT_SYMBOL(msm_pm_retention_enabled);
+
+static int msm_pm_snoc_client_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	static struct msm_bus_scale_pdata *msm_pm_bus_pdata;
+	static uint32_t msm_pm_bus_client;
+
+	msm_pm_bus_pdata = msm_bus_cl_get_pdata(pdev);
+
+	if (msm_pm_bus_pdata) {
+		msm_pm_bus_client =
+			msm_bus_scale_register_client(msm_pm_bus_pdata);
+
+		if (!msm_pm_bus_client) {
+			pr_err("%s: Failed to register SNOC client", __func__);
+			rc = -ENXIO;
+			goto snoc_cl_probe_done;
+		}
+
+		rc = msm_bus_scale_client_update_request(msm_pm_bus_client, 1);
+
+		if (rc)
+			pr_err("%s: Error setting bus rate", __func__);
+	}
+
+snoc_cl_probe_done:
+	return rc;
+}
+
+static int msm_cpu_status_probe(struct platform_device *pdev)
+{
+	u32 cpu;
+	int rc;
+
+	if (!pdev || !pdev->dev.of_node)
+		return -EFAULT;
+
+	msm_pm_slp_sts = devm_kzalloc(&pdev->dev,
+			sizeof(*msm_pm_slp_sts) * num_possible_cpus(),
+			GFP_KERNEL);
+
+	if (!msm_pm_slp_sts)
+		return -ENOMEM;
+
+
+	for_each_possible_cpu(cpu) {
+		struct device_node *cpun, *node;
+		char *key;
+
+		cpun = of_get_cpu_node(cpu, NULL);
+
+		if (!cpun) {
+			__WARN();
+			continue;
+		}
+
+		node = of_parse_phandle(cpun, "qcom,sleep-status", 0);
+		if (!node)
+			return -ENODEV;
+
+		msm_pm_slp_sts[cpu].base_addr = of_iomap(node, 0);
+		if (!msm_pm_slp_sts[cpu].base_addr) {
+			pr_err("%s: Can't find base addr\n", __func__);
+			return -ENODEV;
+		}
+
+		key = "qcom,sleep-status-mask";
+		rc = of_property_read_u32(node, key, &msm_pm_slp_sts[cpu].mask);
+		if (rc) {
+			pr_err("%s: Can't find %s property\n", __func__, key);
+			iounmap(msm_pm_slp_sts[cpu].base_addr);
+			return rc;
+		}
+	}
+
+	return 0;
+};
+
+static const struct of_device_id msm_slp_sts_match_tbl[] = {
+	{.compatible = "qcom,cpu-sleep-status"},
+	{},
+};
+
+static struct platform_driver msm_cpu_status_driver = {
+	.probe = msm_cpu_status_probe,
+	.driver = {
+		.name = "cpu_slp_status",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_slp_sts_match_tbl,
+	},
+};
+
+static const struct of_device_id msm_snoc_clnt_match_tbl[] = {
+	{.compatible = "qcom,pm-snoc-client"},
+	{},
+};
+
+static struct platform_driver msm_cpu_pm_snoc_client_driver = {
+	.probe = msm_pm_snoc_client_probe,
+	.driver = {
+		.name = "pm_snoc_client",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_snoc_clnt_match_tbl,
+	},
+};
+
+struct msm_pc_debug_counters_buffer {
+	long *reg;
+	u32 len;
+	char buf[MAX_BUF_SIZE];
+};
+
+static char *counter_name[MSM_PC_NUM_COUNTERS] = {
+		"PC Entry Counter",
+		"Warmboot Entry Counter",
+		"PC Bailout Counter"
+};
+
+static int msm_pc_debug_counters_copy(
+		struct msm_pc_debug_counters_buffer *data)
+{
+	int j;
+	u32 stat;
+	unsigned int cpu;
+	unsigned int len;
+	uint32_t cluster_id;
+	uint32_t cpu_id;
+	uint32_t offset;
+
+	for_each_possible_cpu(cpu) {
+		len = scnprintf(data->buf + data->len,
+				sizeof(data->buf)-data->len,
+				"CPU%d\n", cpu);
+		cluster_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+		cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
+		offset = (cluster_id * MAX_CPUS_PER_CLUSTER
+				 * MSM_PC_NUM_COUNTERS)
+				 + (cpu_id * MSM_PC_NUM_COUNTERS);
+
+		data->len += len;
+
+		for (j = 0; j < MSM_PC_NUM_COUNTERS - 1; j++) {
+			stat = data->reg[offset + j];
+			len = scnprintf(data->buf + data->len,
+					 sizeof(data->buf) - data->len,
+					"\t%s: %d", counter_name[j], stat);
+
+			data->len += len;
+		}
+		len = scnprintf(data->buf + data->len,
+			 sizeof(data->buf) - data->len,
+			"\n");
+
+		data->len += len;
+	}
+
+	return data->len;
+}
+
+static ssize_t msm_pc_debug_counters_file_read(struct file *file,
+		char __user *bufu, size_t count, loff_t *ppos)
+{
+	struct msm_pc_debug_counters_buffer *data;
+	ssize_t ret;
+
+	mutex_lock(&msm_pc_debug_mutex);
+	data = file->private_data;
+
+	if (!data) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!bufu) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!access_ok(VERIFY_WRITE, bufu, count)) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	if (*ppos >= data->len && data->len == 0)
+		data->len = msm_pc_debug_counters_copy(data);
+
+	ret = simple_read_from_buffer(bufu, count, ppos,
+			data->buf, data->len);
+exit:
+	mutex_unlock(&msm_pc_debug_mutex);
+	return ret;
+}
+
+static int msm_pc_debug_counters_file_open(struct inode *inode,
+		struct file *file)
+{
+	struct msm_pc_debug_counters_buffer *buf;
+	int ret = 0;
+
+	mutex_lock(&msm_pc_debug_mutex);
+
+	if (!inode->i_private) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	file->private_data = kzalloc(
+		sizeof(struct msm_pc_debug_counters_buffer), GFP_KERNEL);
+
+	if (!file->private_data) {
+		pr_err("%s: ERROR kmalloc failed to allocate %zu bytes\n",
+		__func__, sizeof(struct msm_pc_debug_counters_buffer));
+
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	buf = file->private_data;
+	buf->reg = (long *)inode->i_private;
+
+exit:
+	mutex_unlock(&msm_pc_debug_mutex);
+	return ret;
+}
+
+static int msm_pc_debug_counters_file_close(struct inode *inode,
+		struct file *file)
+{
+	mutex_lock(&msm_pc_debug_mutex);
+	kfree(file->private_data);
+	mutex_unlock(&msm_pc_debug_mutex);
+	return 0;
+}
+
+static const struct file_operations msm_pc_debug_counters_fops = {
+	.open = msm_pc_debug_counters_file_open,
+	.read = msm_pc_debug_counters_file_read,
+	.release = msm_pc_debug_counters_file_close,
+	.llseek = no_llseek,
+};
+
+static int msm_pm_clk_init(struct platform_device *pdev)
+{
+	bool synced_clocks;
+	u32 cpu;
+	char clk_name[] = "cpu??_clk";
+	char *key;
+
+	key = "qcom,saw-turns-off-pll";
+	if (of_property_read_bool(pdev->dev.of_node, key))
+		return 0;
+
+	key = "qcom,synced-clocks";
+	synced_clocks = of_property_read_bool(pdev->dev.of_node, key);
+
+	for_each_possible_cpu(cpu) {
+		struct clk *clk;
+
+		snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
+		clk = clk_get(&pdev->dev, clk_name);
+		if (IS_ERR(clk)) {
+			if (cpu && synced_clocks)
+				return 0;
+			clk = NULL;
+		}
+		per_cpu(cpu_clks, cpu) = clk;
+	}
+
+	if (synced_clocks)
+		return 0;
+
+	l2_clk = clk_get(&pdev->dev, "l2_clk");
+	if (IS_ERR(l2_clk))
+		pr_warn("%s: Could not get l2_clk (-%ld)\n", __func__,
+			PTR_ERR(l2_clk));
+
+	return 0;
+}
+
+static int msm_cpu_pm_probe(struct platform_device *pdev)
+{
+	struct dentry *dent = NULL;
+	struct resource *res = NULL;
+	int ret = 0;
+	void __iomem *msm_pc_debug_counters_imem;
+	char *key;
+	int alloc_size = (MAX_NUM_CLUSTER * MAX_CPUS_PER_CLUSTER
+					* MSM_PC_NUM_COUNTERS
+					* sizeof(*msm_pc_debug_counters));
+
+	msm_pc_debug_counters = dma_alloc_coherent(&pdev->dev, alloc_size,
+				&msm_pc_debug_counters_phys, GFP_KERNEL);
+
+	if (msm_pc_debug_counters) {
+		memset(msm_pc_debug_counters, 0, alloc_size);
+		dent = debugfs_create_file("pc_debug_counter", 0444, NULL,
+				msm_pc_debug_counters,
+				&msm_pc_debug_counters_fops);
+		if (!dent)
+			pr_err("%s: ERROR debugfs_create_file failed\n",
+					__func__);
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res)
+			goto skip_save_imem;
+		msm_pc_debug_counters_imem = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+		if (msm_pc_debug_counters_imem) {
+			writel_relaxed(msm_pc_debug_counters_phys,
+					msm_pc_debug_counters_imem);
+			/* memory barrier */
+			mb();
+			devm_iounmap(&pdev->dev,
+					msm_pc_debug_counters_imem);
+		}
+	} else {
+		msm_pc_debug_counters = NULL;
+		msm_pc_debug_counters_phys = 0;
+	}
+skip_save_imem:
+	if (pdev->dev.of_node) {
+		key = "qcom,tz-flushes-cache";
+		msm_pm_tz_flushes_cache =
+				of_property_read_bool(pdev->dev.of_node, key);
+
+		key = "qcom,no-pll-switch-for-retention";
+		msm_pm_ret_no_pll_switch =
+				of_property_read_bool(pdev->dev.of_node, key);
+
+		ret = msm_pm_clk_init(pdev);
+		if (ret) {
+			pr_info("msm_pm_clk_init returned error\n");
+			return ret;
+		}
+	}
+
+	if (pdev->dev.of_node)
+		of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+	return ret;
+}
+
+static const struct of_device_id msm_cpu_pm_table[] = {
+	{.compatible = "qcom,pm"},
+	{},
+};
+
+static struct platform_driver msm_cpu_pm_driver = {
+	.probe = msm_cpu_pm_probe,
+	.driver = {
+		.name = "msm-pm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cpu_pm_table,
+	},
+};
+
+static int __init msm_pm_drv_init(void)
+{
+	int rc;
+
+	cpumask_clear(&retention_cpus);
+
+	rc = platform_driver_register(&msm_cpu_pm_snoc_client_driver);
+
+	if (rc)
+		pr_err("%s(): failed to register driver %s\n", __func__,
+				msm_cpu_pm_snoc_client_driver.driver.name);
+	return rc;
+}
+late_initcall(msm_pm_drv_init);
+
+static int __init msm_pm_debug_counters_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&msm_cpu_pm_driver);
+
+	if (rc)
+		pr_err("%s(): failed to register driver %s\n", __func__,
+				msm_cpu_pm_driver.driver.name);
+	return rc;
+}
+fs_initcall(msm_pm_debug_counters_init);
+
+int __init msm_pm_sleep_status_init(void)
+{
+	static bool registered;
+
+	if (registered)
+		return 0;
+	registered = true;
+
+	return platform_driver_register(&msm_cpu_status_driver);
+}
+arch_initcall(msm_pm_sleep_status_init);
+
+#ifdef CONFIG_ARM
+static int idle_initialize(void)
+{
+	arm_pm_idle = arch_idle;
+	return 0;
+}
+early_initcall(idle_initialize);
+#endif
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
index ee9b7af..eb54119 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
@@ -971,10 +971,8 @@
 
 	bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
 	if (!bus_node) {
-		MSM_BUS_ERR("%s:Bus node alloc failed\n", __func__);
-		kfree(bus_dev);
-		bus_dev = NULL;
-		goto exit_device_init;
+		ret = -ENOMEM;
+		goto err_device_init;
 	}
 	bus_dev = &bus_node->dev;
 	device_initialize(bus_dev);
@@ -982,47 +980,37 @@
 	node_info = devm_kzalloc(bus_dev,
 			sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
 	if (!node_info) {
-		MSM_BUS_ERR("%s:Bus node info alloc failed\n", __func__);
-		devm_kfree(bus_dev, bus_node);
-		kfree(bus_dev);
-		bus_dev = NULL;
-		goto exit_device_init;
+		ret = -ENOMEM;
+		goto err_put_device;
 	}
 
 	bus_node->node_info = node_info;
 	bus_node->ap_owned = pdata->ap_owned;
 	bus_dev->of_node = pdata->of_node;
 
-	if (msm_bus_copy_node_info(pdata, bus_dev) < 0) {
-		devm_kfree(bus_dev, bus_node);
-		devm_kfree(bus_dev, node_info);
-		kfree(bus_dev);
-		bus_dev = NULL;
-		goto exit_device_init;
-	}
+	ret = msm_bus_copy_node_info(pdata, bus_dev);
+	if (ret)
+		goto err_put_device;
 
 	bus_dev->bus = &msm_bus_type;
 	dev_set_name(bus_dev, bus_node->node_info->name);
 
 	ret = device_add(bus_dev);
-	if (ret < 0) {
+	if (ret) {
 		MSM_BUS_ERR("%s: Error registering device %d",
 				__func__, pdata->node_info->id);
-		devm_kfree(bus_dev, bus_node);
-		devm_kfree(bus_dev, node_info->dev_connections);
-		devm_kfree(bus_dev, node_info->connections);
-		devm_kfree(bus_dev, node_info->black_connections);
-		devm_kfree(bus_dev, node_info->black_listed_connections);
-		devm_kfree(bus_dev, node_info);
-		kfree(bus_dev);
-		bus_dev = NULL;
-		goto exit_device_init;
+		goto err_put_device;
 	}
 	device_create_file(bus_dev, &dev_attr_bw);
 	INIT_LIST_HEAD(&bus_node->devlist);
-
-exit_device_init:
 	return bus_dev;
+
+err_put_device:
+	put_device(bus_dev);
+	bus_dev = NULL;
+	kfree(bus_node);
+err_device_init:
+	return ERR_PTR(ret);
 }
 
 static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
@@ -1169,10 +1157,10 @@
 
 		node_dev = msm_bus_device_init(&pdata->info[i]);
 
-		if (!node_dev) {
+		if (IS_ERR(node_dev)) {
 			MSM_BUS_ERR("%s: Error during dev init for %d",
 				__func__, pdata->info[i].node_info->id);
-			ret = -ENXIO;
+			ret = PTR_ERR(node_dev);
 			goto exit_device_probe;
 		}
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index c00749c..5a28e98 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -1471,7 +1471,7 @@
 	struct device *bus_dev = NULL;
 	struct msm_bus_node_device_type *bus_node = NULL;
 	struct msm_bus_node_info_type *node_info = NULL;
-	int ret = 0, i = 0;
+	int ret = -ENODEV, i = 0;
 
 	/**
 	* Init here so we can use devm calls
@@ -1479,10 +1479,8 @@
 
 	bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
 	if (!bus_node) {
-		MSM_BUS_ERR("%s:Bus node alloc failed\n", __func__);
-		kfree(bus_dev);
-		bus_dev = NULL;
-		goto exit_device_init;
+		ret = -ENOMEM;
+		goto err_device_init;
 	}
 	bus_dev = &bus_node->dev;
 	device_initialize(bus_dev);
@@ -1490,11 +1488,8 @@
 	node_info = devm_kzalloc(bus_dev,
 			sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
 	if (!node_info) {
-		MSM_BUS_ERR("%s:Bus node info alloc failed\n", __func__);
-		devm_kfree(bus_dev, bus_node);
-		kfree(bus_dev);
-		bus_dev = NULL;
-		goto exit_device_init;
+		ret = -ENOMEM;
+		goto err_put_device;
 	}
 
 	bus_node->node_info = node_info;
@@ -1505,8 +1500,10 @@
 		bus_node->qos_bcms = devm_kzalloc(bus_dev,
 					(sizeof(struct qos_bcm_type) *
 					bus_node->num_qos_bcms), GFP_KERNEL);
-		if (!bus_node->qos_bcms)
-			goto exit_device_init;
+		if (!bus_node->qos_bcms) {
+			ret = -ENOMEM;
+			goto err_put_device;
+		}
 		for (i = 0; i < bus_node->num_qos_bcms; i++) {
 			bus_node->qos_bcms[i].qos_bcm_id =
 					pdata->qos_bcms[i].qos_bcm_id;
@@ -1519,36 +1516,29 @@
 
 	bus_dev->of_node = pdata->of_node;
 
-	if (msm_bus_copy_node_info(pdata, bus_dev) < 0) {
-		devm_kfree(bus_dev, bus_node);
-		devm_kfree(bus_dev, node_info);
-		kfree(bus_dev);
-		bus_dev = NULL;
-		goto exit_device_init;
-	}
+	ret = msm_bus_copy_node_info(pdata, bus_dev);
+	if (ret)
+		goto err_put_device;
 
 	bus_dev->bus = &msm_bus_type;
 	dev_set_name(bus_dev, bus_node->node_info->name);
 
 	ret = device_add(bus_dev);
-	if (ret < 0) {
+	if (ret) {
 		MSM_BUS_ERR("%s: Error registering device %d",
 				__func__, pdata->node_info->id);
-		devm_kfree(bus_dev, bus_node);
-		devm_kfree(bus_dev, node_info->dev_connections);
-		devm_kfree(bus_dev, node_info->connections);
-		devm_kfree(bus_dev, node_info->black_connections);
-		devm_kfree(bus_dev, node_info->black_listed_connections);
-		devm_kfree(bus_dev, node_info);
-		kfree(bus_dev);
-		bus_dev = NULL;
-		goto exit_device_init;
+		goto err_put_device;
 	}
 	device_create_file(bus_dev, &dev_attr_bw);
 	INIT_LIST_HEAD(&bus_node->devlist);
-
-exit_device_init:
 	return bus_dev;
+
+err_put_device:
+	put_device(bus_dev);
+	bus_dev = NULL;
+	kfree(bus_node);
+err_device_init:
+	return ERR_PTR(ret);
 }
 
 static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
@@ -1736,10 +1726,10 @@
 
 		node_dev = msm_bus_device_init(&pdata->info[i]);
 
-		if (!node_dev) {
+		if (IS_ERR(node_dev)) {
 			MSM_BUS_ERR("%s: Error during dev init for %d",
 				__func__, pdata->info[i].node_info->id);
-			ret = -ENXIO;
+			ret = PTR_ERR(node_dev);
 			goto exit_device_probe;
 		}
 
diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c
index 9a9b4df..a292879 100644
--- a/drivers/soc/qcom/msm_glink_pkt.c
+++ b/drivers/soc/qcom/msm_glink_pkt.c
@@ -29,6 +29,7 @@
 #include <asm/ioctls.h>
 #include <linux/mm.h>
 #include <linux/of.h>
+#include <linux/vmalloc.h>
 #include <linux/ipc_logging.h>
 #include <linux/termios.h>
 
@@ -397,7 +398,7 @@
 	GLINK_PKT_INFO("%s(): priv[%p] pkt_priv[%p] ptr[%p]\n",
 					__func__, priv, pkt_priv, ptr);
 	/* Free Tx buffer allocated in glink_pkt_write */
-	kfree(ptr);
+	kvfree(ptr);
 }
 
 /**
@@ -775,8 +776,10 @@
 		__func__, devp->i, count);
 	data = kzalloc(count, GFP_KERNEL);
 	if (!data) {
-		GLINK_PKT_ERR("%s buffer allocation failed\n", __func__);
-		return -ENOMEM;
+		if (!strcmp(devp->open_cfg.edge, "bg"))
+			data = vzalloc(count);
+		if (!data)
+			return -ENOMEM;
 	}
 
 	ret = copy_from_user(data, buf, count);
@@ -784,14 +787,14 @@
 		GLINK_PKT_ERR(
 		"%s copy_from_user failed ret[%d] on dev id:%d size %zu\n",
 		 __func__, ret, devp->i, count);
-		kfree(data);
+		kvfree(data);
 		return -EFAULT;
 	}
 
 	ret = glink_tx(devp->handle, data, data, count, GLINK_TX_REQ_INTENT);
 	if (ret) {
 		GLINK_PKT_ERR("%s glink_tx failed ret[%d]\n", __func__, ret);
-		kfree(data);
+		kvfree(data);
 		return ret;
 	}
 
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index 1bbd751..959aab9 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -26,6 +26,7 @@
 #include <soc/qcom/subsystem_notif.h>
 #include <soc/qcom/subsystem_restart.h>
 #include <soc/qcom/ramdump.h>
+#include <soc/qcom/scm.h>
 
 #include <soc/qcom/smem.h>
 
@@ -1085,12 +1086,15 @@
 	void *handle;
 	struct restart_notifier_block *nb;
 
-	if (smem_dev)
-		smem_ramdump_dev = create_ramdump_device("smem", smem_dev);
-	if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
-		LOG_ERR("%s: Unable to create smem ramdump device.\n",
-			__func__);
-		smem_ramdump_dev = NULL;
+	if (scm_is_secure_device()) {
+		if (smem_dev)
+			smem_ramdump_dev = create_ramdump_device("smem",
+								 smem_dev);
+		if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
+			LOG_ERR("%s: Unable to create smem ramdump device.\n",
+				__func__);
+			smem_ramdump_dev = NULL;
+		}
 	}
 
 	for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 360dbcf..8776379 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -263,6 +263,7 @@
 	struct pil_priv *priv = desc->priv;
 	struct pil_seg *seg;
 	int count = 0, ret;
+	u32 encryption_status = 0;
 
 	if (desc->minidump_ss) {
 		pr_info("Minidump : md_ss_toc->md_ss_toc_init is 0x%x\n",
@@ -280,12 +281,14 @@
 		 * Collect minidump if SS ToC is valid and segment table
 		 * is initialized in memory and encryption status is set.
 		 */
+		encryption_status = desc->minidump_ss->encryption_status;
+
 		if ((desc->minidump_ss->md_ss_smem_regions_baseptr != 0) &&
 			(desc->minidump_ss->md_ss_toc_init == true) &&
 			(desc->minidump_ss->md_ss_enable_status ==
 				MD_SS_ENABLED)) {
-			if (desc->minidump_ss->encryption_status ==
-				MD_SS_ENCR_DONE) {
+			if (encryption_status == MD_SS_ENCR_DONE ||
+				encryption_status == MD_SS_ENCR_NOTREQ) {
 				pr_info("Minidump : Dumping for %s\n",
 					desc->name);
 				return pil_do_minidump(desc, minidump_dev);
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 6283477..2a46d0b 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -797,14 +797,18 @@
 int pil_mss_debug_reset(struct pil_desc *pil)
 {
 	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 encryption_status;
 	int ret;
 
+
 	if (!pil->minidump_ss)
 		return 0;
-	if (pil->minidump_ss) {
-		if (pil->minidump_ss->md_ss_enable_status != MD_SS_ENABLED)
-			return 0;
-	}
+
+	encryption_status = pil->minidump_ss->encryption_status;
+
+	if ((pil->minidump_ss->md_ss_enable_status != MD_SS_ENABLED) ||
+		encryption_status == MD_SS_ENCR_NOTREQ)
+		return 0;
 
 	/*
 	 * Bring subsystem out of reset and enable required
@@ -836,7 +840,7 @@
 	 * complete before returning
 	 */
 	pr_info("Minidump: waiting encryption to complete\n");
-	msleep(10000);
+	msleep(13000);
 	if (pil->minidump_ss) {
 		writel_relaxed(0x2, drv->reg_base + QDSP6SS_NMI_CFG);
 		/* Let write complete before proceeding */
diff --git a/drivers/soc/qcom/pil_bg_intf.h b/drivers/soc/qcom/pil_bg_intf.h
index 722024b..46aed25 100644
--- a/drivers/soc/qcom/pil_bg_intf.h
+++ b/drivers/soc/qcom/pil_bg_intf.h
@@ -36,7 +36,7 @@
 __packed struct tzapp_bg_rsp {
 	uint32_t tzapp_bg_cmd;
 	uint32_t bg_info_len;
-	uint32_t status;
+	int32_t status;
 	uint32_t bg_info[100];
 };
 
diff --git a/drivers/soc/qcom/pm-boot.c b/drivers/soc/qcom/pm-boot.c
new file mode 100644
index 0000000..f0daeba
--- /dev/null
+++ b/drivers/soc/qcom/pm-boot.c
@@ -0,0 +1,98 @@
+/* Copyright (c) 2011-2014, 2016, 2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <soc/qcom/scm-boot.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+#include "pm-boot.h"
+#include "idle.h"
+
+#define CPU_INDEX(cluster, cpu) (cluster * MAX_CPUS_PER_CLUSTER + cpu)
+
+static void (*msm_pm_boot_before_pc)(unsigned int cpu, unsigned long entry);
+static void (*msm_pm_boot_after_pc)(unsigned int cpu);
+
+static int msm_pm_tz_boot_init(void)
+{
+	int ret;
+	phys_addr_t warmboot_addr = virt_to_phys(msm_pm_boot_entry);
+
+	if (scm_is_mc_boot_available())
+		ret = scm_set_warm_boot_addr_mc_for_all(warmboot_addr);
+	else {
+		unsigned int flag = 0;
+
+		if (num_possible_cpus() == 1)
+			flag = SCM_FLAG_WARMBOOT_CPU0;
+		else if (num_possible_cpus() == 2)
+			flag = SCM_FLAG_WARMBOOT_CPU0 | SCM_FLAG_WARMBOOT_CPU1;
+		else if (num_possible_cpus() == 4)
+			flag = SCM_FLAG_WARMBOOT_CPU0 | SCM_FLAG_WARMBOOT_CPU1 |
+				SCM_FLAG_WARMBOOT_CPU2 | SCM_FLAG_WARMBOOT_CPU3;
+		else
+			pr_warn("%s: set warmboot address failed\n",
+								__func__);
+
+		ret = scm_set_boot_addr(virt_to_phys(msm_pm_boot_entry), flag);
+	}
+	return ret;
+}
+static void msm_pm_write_boot_vector(unsigned int cpu, unsigned long address)
+{
+	uint32_t clust_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+	uint32_t cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
+	unsigned long *start_address;
+	unsigned long *end_address;
+
+	if (clust_id >= MAX_NUM_CLUSTER || cpu_id >= MAX_CPUS_PER_CLUSTER)
+		WARN_ON(cpu);
+
+	msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id)] = address;
+	start_address = &msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id)];
+	end_address = &msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id + 1)];
+	dmac_clean_range((void *)start_address, (void *)end_address);
+}
+
+static void msm_pm_config_tz_before_pc(unsigned int cpu,
+		unsigned long entry)
+{
+	msm_pm_write_boot_vector(cpu, entry);
+}
+
+void msm_pm_boot_config_before_pc(unsigned int cpu, unsigned long entry)
+{
+	if (msm_pm_boot_before_pc)
+		msm_pm_boot_before_pc(cpu, entry);
+}
+
+void msm_pm_boot_config_after_pc(unsigned int cpu)
+{
+	if (msm_pm_boot_after_pc)
+		msm_pm_boot_after_pc(cpu);
+}
+
+static int __init msm_pm_boot_init(void)
+{
+	int ret = 0;
+
+	ret = msm_pm_tz_boot_init();
+	msm_pm_boot_before_pc = msm_pm_config_tz_before_pc;
+	msm_pm_boot_after_pc = NULL;
+
+	return ret;
+}
+postcore_initcall(msm_pm_boot_init);
diff --git a/drivers/soc/qcom/pm-boot.h b/drivers/soc/qcom/pm-boot.h
new file mode 100644
index 0000000..7ec053a
--- /dev/null
+++ b/drivers/soc/qcom/pm-boot.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2011-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_PM_BOOT_H
+#define _ARCH_ARM_MACH_MSM_PM_BOOT_H
+
+void msm_pm_boot_config_before_pc(unsigned int cpu, unsigned long entry);
+void msm_pm_boot_config_after_pc(unsigned int cpu);
+
+#endif
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index 8668155..fb70a07 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -245,7 +245,7 @@
 
 	switch (event) {
 	case USB_QDSS_CONNECT:
-		usb_qdss_alloc_req(drvdata->usb_ch, poolsize, 0);
+		usb_qdss_alloc_req(ch, poolsize, 0);
 		mhi_queue_read(drvdata);
 		break;
 
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 036990c..4f3c264 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -258,21 +258,28 @@
 	switch (state) {
 	case RPMH_ACTIVE_ONLY_STATE:
 	case RPMH_AWAKE_STATE:
-		if (req->sleep_val != UINT_MAX)
+		if (req->sleep_val != UINT_MAX) {
 			req->wake_val = cmd->data;
+			rpm->dirty = true;
+		}
 		break;
 	case RPMH_WAKE_ONLY_STATE:
-		req->wake_val = cmd->data;
+		if (req->wake_val != cmd->data) {
+			req->wake_val = cmd->data;
+			rpm->dirty = true;
+		}
 		break;
 	case RPMH_SLEEP_STATE:
-		req->sleep_val = cmd->data;
+		if (req->sleep_val != cmd->data) {
+			req->sleep_val = cmd->data;
+			rpm->dirty = true;
+		}
 		break;
 	default:
 		break;
 	};
 
 unlock:
-	rpm->dirty = true;
 	spin_unlock_irqrestore(&rpm->lock, flags);
 
 	return req;
diff --git a/drivers/soc/qcom/rpmh_master_stat.c b/drivers/soc/qcom/rpmh_master_stat.c
index 3a77ed8..80589de 100644
--- a/drivers/soc/qcom/rpmh_master_stat.c
+++ b/drivers/soc/qcom/rpmh_master_stat.c
@@ -265,6 +265,7 @@
 	kobject_put(prvdata->kobj);
 	platform_set_drvdata(pdev, NULL);
 	iounmap(rpmh_unit_base);
+	rpmh_unit_base = NULL;
 
 	return 0;
 }
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index e02bf84..fffef10 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -593,12 +593,18 @@
 	/* QCS605 ID */
 	[347] = {MSM_CPU_QCS605, "QCS605"},
 
+	/* SXR1130 ID */
+	[371] = {MSM_CPU_SXR1130, "SXR1130"},
+
 	/* SDA670 ID */
 	[337] = {MSM_CPU_SDA670, "SDA670"},
 
 	/* SDM710 ID */
 	[360] = {MSM_CPU_SDM710, "SDM710"},
 
+	/* SXR1120 ID */
+	[370] = {MSM_CPU_SXR1120, "SXR1120"},
+
 	/* 8953 ID */
 	[293] = {MSM_CPU_8953, "MSM8953"},
 	[304] = {MSM_CPU_8953, "APQ8053"},
@@ -615,6 +621,12 @@
 	[294] = {MSM_CPU_8937, "MSM8937"},
 	[295] = {MSM_CPU_8937, "APQ8937"},
 
+	/* MSM8917 IDs */
+	[303] = {MSM_CPU_8917, "MSM8917"},
+	[307] = {MSM_CPU_8917, "APQ8017"},
+	[308] = {MSM_CPU_8917, "MSM8217"},
+	[309] = {MSM_CPU_8917, "MSM8617"},
+
 	/* SDM429 and SDM439 ID*/
 	[353] = {MSM_CPU_SDM439, "SDM439"},
 	[354] = {MSM_CPU_SDM429, "SDM429"},
@@ -1532,6 +1544,10 @@
 		dummy_socinfo.id = 336;
 		strlcpy(dummy_socinfo.build_id, "sdm670 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sxr1130()) {
+		dummy_socinfo.id = 371;
+		strlcpy(dummy_socinfo.build_id, "sxr1130 - ",
+			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdm710()) {
 		dummy_socinfo.id = 360;
 		strlcpy(dummy_socinfo.build_id, "sdm710 - ",
@@ -1544,6 +1560,10 @@
 		dummy_socinfo.id = 347;
 		strlcpy(dummy_socinfo.build_id, "qcs605 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sxr1120()) {
+		dummy_socinfo.id = 370;
+		strlcpy(dummy_socinfo.build_id, "sxr1120 - ",
+			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_mdm9650()) {
 		dummy_socinfo.id = 286;
 		strlcpy(dummy_socinfo.build_id, "mdm9650 - ",
@@ -1560,6 +1580,10 @@
 		dummy_socinfo.id = 294;
 		strlcpy(dummy_socinfo.build_id, "msm8937 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8917()) {
+		dummy_socinfo.id = 303;
+		strlcpy(dummy_socinfo.build_id, "msm8917 - ",
+			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdm450()) {
 		dummy_socinfo.id = 338;
 		strlcpy(dummy_socinfo.build_id, "sdm450 - ",
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index d568014..f2597e3 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -2764,7 +2764,7 @@
 	ret = spcom_register_chardev();
 	if (ret) {
 		pr_err("create character device failed.\n");
-		goto fail_reg_chardev;
+		goto fail_while_chardev_reg;
 	}
 
 	link_info.glink_link_state_notif_cb = spcom_link_state_notif_cb;
@@ -2802,6 +2802,7 @@
 fail_reg_chardev:
 	pr_err("Failed to init driver.\n");
 	spcom_unregister_chrdev();
+fail_while_chardev_reg:
 	kfree(dev);
 	spcom_dev = NULL;
 
diff --git a/drivers/soc/qcom/spm_devices.c b/drivers/soc/qcom/spm_devices.c
index 5f1ac4e..268a8fe 100644
--- a/drivers/soc/qcom/spm_devices.c
+++ b/drivers/soc/qcom/spm_devices.c
@@ -234,7 +234,7 @@
 }
 
 static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
-		unsigned int mode, bool notify_rpm)
+		unsigned int mode, bool notify_rpm, bool set_spm_enable)
 {
 	uint32_t i;
 	int ret = -EINVAL;
@@ -251,9 +251,11 @@
 	if (!dev->num_modes)
 		return 0;
 
-	if (mode == MSM_SPM_MODE_DISABLED) {
+	if (mode == MSM_SPM_MODE_DISABLED && set_spm_enable) {
 		ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
-	} else if (!msm_spm_drv_set_spm_enable(&dev->reg_data, true)) {
+	} else {
+		if (set_spm_enable)
+			ret = msm_spm_drv_set_spm_enable(&dev->reg_data, true);
 		for (i = 0; i < dev->num_modes; i++) {
 			if (dev->modes[i].mode != mode)
 				continue;
@@ -539,10 +541,24 @@
 {
 	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
 
-	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true);
 }
 EXPORT_SYMBOL(msm_spm_set_low_power_mode);
 
+void msm_spm_set_rpm_hs(bool allow_rpm_hs)
+{
+	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+
+	dev->allow_rpm_hs = allow_rpm_hs;
+}
+EXPORT_SYMBOL(msm_spm_set_rpm_hs);
+
+int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, false);
+}
+
 /**
  * msm_spm_init(): Board initalization function
  * @data: platform specific SPM register configuration data
@@ -586,7 +602,7 @@
 int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
 		unsigned int mode, bool notify_rpm)
 {
-	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true);
 }
 #ifdef CONFIG_MSM_L2_SPM
 
diff --git a/drivers/soc/qcom/subsys-pil-bg.c b/drivers/soc/qcom/subsys-pil-bg.c
index 75c3666..070733e 100644
--- a/drivers/soc/qcom/subsys-pil-bg.c
+++ b/drivers/soc/qcom/subsys-pil-bg.c
@@ -30,6 +30,7 @@
 #include "peripheral-loader.h"
 #include "../../misc/qseecom_kernel.h"
 #include "pil_bg_intf.h"
+#include "bgcom_interface.h"
 
 #define INVALID_GPIO	-1
 #define NUM_GPIOS	4
@@ -37,7 +38,7 @@
 #define desc_to_data(d)	container_of(d, struct pil_bg_data, desc)
 #define subsys_to_data(d) container_of(d, struct pil_bg_data, subsys_desc)
 #define BG_RAMDUMP_SZ	0x00102000
-#define BG_CRASH_IN_TWM	2
+#define BG_CRASH_IN_TWM	-2
 /**
  * struct pil_bg_data
  * @qseecom_handle: handle of TZ app
@@ -90,9 +91,18 @@
 static void bg_app_shutdown_notify(const struct subsys_desc *subsys)
 {
 	struct pil_bg_data *bg_data = subsys_to_data(subsys);
+
+	/* Disable irq if already BG is up */
+	if (bg_data->is_ready) {
+		disable_irq(bg_data->status_irq);
+		disable_irq(bg_data->errfatal_irq);
+		bg_data->is_ready = false;
+	}
 	/* Toggle AP2BG err fatal gpio here to inform apps err fatal event */
-	if (gpio_is_valid(bg_data->gpios[2]))
+	if (gpio_is_valid(bg_data->gpios[2])) {
+		pr_debug("Sending Apps shutdown signal\n");
 		gpio_set_value(bg_data->gpios[2], 1);
+	}
 }
 
 /**
@@ -106,9 +116,18 @@
 {
 	struct pil_bg_data *bg_data = container_of(nb,
 					struct pil_bg_data, reboot_blk);
+
+	/* Disable irq if already BG is up */
+	if (bg_data->is_ready) {
+		disable_irq(bg_data->status_irq);
+		disable_irq(bg_data->errfatal_irq);
+		bg_data->is_ready = false;
+	}
 	/* Toggle AP2BG err fatal gpio here to inform apps err fatal event */
-	if (gpio_is_valid(bg_data->gpios[2]))
+	if (gpio_is_valid(bg_data->gpios[2])) {
+		pr_debug("Sending reboot signal\n");
 		gpio_set_value(bg_data->gpios[2], 1);
+	}
 	return NOTIFY_DONE;
 }
 
@@ -266,7 +285,6 @@
 		return ret;
 	}
 	enable_irq(bg_data->status_irq);
-	enable_irq(bg_data->errfatal_irq);
 	ret = wait_for_err_ready(bg_data);
 	if (ret) {
 		dev_err(bg_data->desc.dev,
@@ -289,10 +307,12 @@
 {
 	struct pil_bg_data *bg_data = subsys_to_data(subsys);
 
-	disable_irq(bg_data->status_irq);
-	devm_free_irq(bg_data->desc.dev, bg_data->status_irq, bg_data);
-	disable_irq(bg_data->errfatal_irq);
-	bg_data->is_ready = false;
+	if (bg_data->is_ready) {
+		disable_irq(bg_data->status_irq);
+		devm_free_irq(bg_data->desc.dev, bg_data->status_irq, bg_data);
+		disable_irq(bg_data->errfatal_irq);
+		bg_data->is_ready = false;
+	}
 	return 0;
 }
 
@@ -393,7 +413,9 @@
 	ret = bgpil_tzapp_comm(bg_data, &bg_tz_req);
 	if (bg_data->cmd_status == BG_CRASH_IN_TWM) {
 		/* Do ramdump and resend boot cmd */
-		bg_data->subsys_desc.ramdump(true, &bg_data->subsys_desc);
+		if (is_twm_exit())
+			bg_data->subsys_desc.ramdump(true,
+				&bg_data->subsys_desc);
 		bg_tz_req.tzapp_bg_cmd = BGPIL_DLOAD_CONT;
 		ret = bgpil_tzapp_comm(bg_data, &bg_tz_req);
 	}
@@ -524,7 +546,6 @@
 	} else if (value == false && drvdata->is_ready) {
 		dev_err(drvdata->desc.dev,
 			"BG got unexpected reset: irq state changed 1->0\n");
-			drvdata->is_ready = false;
 		queue_work(drvdata->bg_queue, &drvdata->restart_work);
 	} else {
 		dev_err(drvdata->desc.dev,
@@ -586,7 +607,6 @@
 		goto err;
 	}
 	drvdata->errfatal_irq = irq;
-	enable_irq(drvdata->errfatal_irq);
 	/* Configure outgoing GPIO's */
 	if (gpio_request(drvdata->gpios[2], "AP2BG_ERRFATAL")) {
 		dev_err(&pdev->dev,
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 9c6edbf..1befdcf 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -22,6 +22,13 @@
 #define PDC_TIME_VALID_SHIFT	31
 #define PDC_TIME_UPPER_MASK	0xFFFFFF
 
+#ifdef CONFIG_ARM_GIC_V3
+#include <linux/irqchip/arm-gic-v3.h>
+#else
+static inline void gic_v3_dist_restore(void) {}
+static inline void gic_v3_dist_save(void) {}
+#endif
+
 static struct rpmh_client *rpmh_client;
 
 static int setup_wakeup(uint32_t lo, uint32_t hi)
@@ -61,6 +68,7 @@
  */
 static int system_sleep_enter(struct cpumask *mask)
 {
+	gic_v3_dist_save();
 	return rpmh_flush(rpmh_client);
 }
 
@@ -70,6 +78,7 @@
 static void system_sleep_exit(void)
 {
 	msm_rpmh_master_stats_update();
+	gic_v3_dist_restore();
 }
 
 static struct system_pm_ops pm_ops = {
diff --git a/drivers/soc/qcom/wcnss/wcnss_vreg.c b/drivers/soc/qcom/wcnss/wcnss_vreg.c
index 9d24ce1..1e61250 100644
--- a/drivers/soc/qcom/wcnss/wcnss_vreg.c
+++ b/drivers/soc/qcom/wcnss/wcnss_vreg.c
@@ -206,7 +206,7 @@
 					 voltage_levels,
 					 ARRAY_SIZE(voltage_levels));
 	if (ret) {
-		dev_err(dev, "error reading %s property\n", vreg_name);
+		wcnss_log(ERR, "error reading %s property\n", vreg_name);
 		return ret;
 	}
 
@@ -217,7 +217,8 @@
 	ret = of_property_read_u32(dev->of_node, current_vreg_name,
 				   &current_vreg);
 	if (ret) {
-		dev_err(dev, "error reading %s property\n", current_vreg_name);
+		wcnss_log(ERR, "error reading %s property\n",
+			  current_vreg_name);
 		return ret;
 	}
 
@@ -240,12 +241,14 @@
 		if (IS_ERR(pronto_vregs[vreg_i].regulator)) {
 			if (pronto_vregs[vreg_i].required) {
 				rc = PTR_ERR(pronto_vregs[vreg_i].regulator);
-				dev_err(dev, "regulator get of %s failed (%d)\n",
+				wcnss_log(ERR,
+					"regulator get of %s failed (%d)\n",
 					pronto_vregs[vreg_i].name, rc);
 				return rc;
 			} else {
-				dev_dbg(dev, "Skip optional regulator configuration: %s\n",
-					pronto_vregs[vreg_i].name);
+				wcnss_log(DBG,
+				"Skip optional regulator configuration: %s\n",
+				pronto_vregs[vreg_i].name);
 				continue;
 			}
 		}
@@ -255,7 +258,8 @@
 					       pronto_vregs[vreg_i].volt,
 					       wlan_config->pronto_vlevel);
 		if (rc) {
-			dev_err(dev, "error reading voltage-level property\n");
+			wcnss_log(ERR,
+				  "error reading voltage-level property\n");
 			return rc;
 		}
 		pronto_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
@@ -269,12 +273,14 @@
 		if (IS_ERR(iris_vregs[vreg_i].regulator)) {
 			if (iris_vregs[vreg_i].required) {
 				rc = PTR_ERR(iris_vregs[vreg_i].regulator);
-				dev_err(dev, "regulator get of %s failed (%d)\n",
+				wcnss_log(ERR,
+					"regulator get of %s failed (%d)\n",
 					iris_vregs[vreg_i].name, rc);
 				return rc;
 			} else {
-				dev_dbg(dev, "Skip optional regulator configuration: %s\n",
-					iris_vregs[vreg_i].name);
+				wcnss_log(DBG,
+				"Skip optional regulator configuration: %s\n",
+				iris_vregs[vreg_i].name);
 				continue;
 			}
 		}
@@ -284,7 +290,8 @@
 					       iris_vregs[vreg_i].volt,
 					       wlan_config->iris_vlevel);
 		if (rc) {
-			dev_err(dev, "error reading voltage-level property\n");
+			wcnss_log(ERR,
+				  "error reading voltage-level property\n");
 			return rc;
 		}
 		iris_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
@@ -334,7 +341,7 @@
 
 		clk = clk_get(dev, "xo");
 		if (IS_ERR(clk)) {
-			pr_err("Couldn't get xo clock\n");
+			wcnss_log(ERR, "Couldn't get xo clock\n");
 			return PTR_ERR(clk);
 		}
 
@@ -344,7 +351,7 @@
 
 		clk = clk_get(dev, "cxo");
 		if (IS_ERR(clk)) {
-			pr_err("Couldn't get cxo clock\n");
+			wcnss_log(ERR, "Couldn't get cxo clock\n");
 			return PTR_ERR(clk);
 		}
 	}
@@ -352,21 +359,21 @@
 	if (on) {
 		msm_wcnss_base = cfg->msm_wcnss_base;
 		if (!msm_wcnss_base) {
-			pr_err("ioremap wcnss physical failed\n");
+			wcnss_log(ERR, "ioremap wcnss physical failed\n");
 			goto fail;
 		}
 
 		/* Enable IRIS XO */
 		rc = clk_prepare_enable(clk);
 		if (rc) {
-			pr_err("clk enable failed\n");
+			wcnss_log(ERR, "clk enable failed\n");
 			goto fail;
 		}
 
 		/* NV bit is set to indicate that platform driver is capable
 		 * of doing NV download.
 		 */
-		pr_debug("wcnss: Indicate NV bin download\n");
+		wcnss_log(DBG, "Indicate NV bin download\n");
 		spare_reg = msm_wcnss_base + spare_offset;
 		reg = readl_relaxed(spare_reg);
 		reg |= NVBIN_DLND_BIT;
@@ -403,10 +410,11 @@
 					cpu_relax();
 
 				iris_reg = readl_relaxed(iris_read_reg);
-				pr_info("wcnss: IRIS Reg: %08x\n", iris_reg);
+				wcnss_log(INFO, "IRIS Reg: %08x\n", iris_reg);
 
 				if (validate_iris_chip_id(iris_reg) && i >= 4) {
-					pr_info("wcnss: IRIS Card absent/invalid\n");
+					wcnss_log(INFO,
+						"IRIS Card absent/invalid\n");
 					auto_detect = WCNSS_XO_INVALID;
 					/* Reset iris read bit */
 					reg &= ~WCNSS_PMU_CFG_IRIS_XO_READ;
@@ -416,7 +424,8 @@
 					reg &= ~(WCNSS_PMU_CFG_IRIS_XO_MODE);
 					goto xo_configure;
 				} else if (!validate_iris_chip_id(iris_reg)) {
-					pr_debug("wcnss: IRIS Card is present\n");
+					wcnss_log(DBG,
+						  "IRIS Card is present\n");
 					break;
 				}
 				reg &= ~WCNSS_PMU_CFG_IRIS_XO_READ;
@@ -472,13 +481,13 @@
 		    auto_detect ==  WCNSS_XO_19MHZ) {
 			clk_rf = clk_get(dev, "rf_clk");
 			if (IS_ERR(clk_rf)) {
-				pr_err("Couldn't get rf_clk\n");
+				wcnss_log(ERR, "Couldn't get rf_clk\n");
 				goto fail;
 			}
 
 			rc = clk_prepare_enable(clk_rf);
 			if (rc) {
-				pr_err("clk_rf enable failed\n");
+				wcnss_log(ERR, "clk_rf enable failed\n");
 				goto fail;
 			}
 			if (iris_xo_set)
@@ -489,7 +498,7 @@
 		    auto_detect ==  WCNSS_XO_19MHZ) {
 		clk_rf = clk_get(dev, "rf_clk");
 		if (IS_ERR(clk_rf)) {
-			pr_err("Couldn't get rf_clk\n");
+			wcnss_log(ERR, "Couldn't get rf_clk\n");
 			goto fail;
 		}
 		clk_disable_unprepare(clk_rf);
@@ -517,7 +526,7 @@
 	cfg = wcnss_get_wlan_config();
 
 	if (!cfg) {
-		pr_err("Failed to get WLAN configuration\n");
+		wcnss_log(ERR, "Failed to get WLAN configuration\n");
 		return;
 	}
 
@@ -530,7 +539,8 @@
 		if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) {
 			rc = regulator_set_load(regulators[i].regulator, 0);
 			if (rc < 0) {
-				pr_err("regulator set load(%s) failed (%d)\n",
+				wcnss_log(ERR,
+					"regulator set load(%s) failed (%d)\n",
 				       regulators[i].name, rc);
 			}
 		}
@@ -553,15 +563,16 @@
 						   max_voltage);
 
 			if (rc)
-				pr_err("regulator_set_voltage(%s) failed (%d)\n",
-				       regulators[i].name, rc);
+				wcnss_log(ERR,
+				"regulator_set_voltage(%s) failed (%d)\n",
+				regulators[i].name, rc);
 		}
 
 		/* Disable regulator */
 		if (regulators[i].state & VREG_ENABLE_MASK) {
 			rc = regulator_disable(regulators[i].regulator);
 			if (rc < 0)
-				pr_err("vreg %s disable failed (%d)\n",
+				wcnss_log(ERR, "vreg %s disable failed (%d)\n",
 				       regulators[i].name, rc);
 		}
 	}
@@ -579,7 +590,7 @@
 	cfg = wcnss_get_wlan_config();
 
 	if (!cfg) {
-		pr_err("Failed to get WLAN configuration\n");
+		wcnss_log(ERR, "Failed to get WLAN configuration\n");
 		return -EINVAL;
 	}
 
@@ -608,7 +619,8 @@
 						   max_voltage);
 
 			if (rc) {
-				pr_err("regulator_set_voltage(%s) failed (%d)\n",
+				wcnss_log(ERR,
+				"regulator_set_voltage(%s) failed (%d)\n",
 				       regulators[i].name, rc);
 				goto fail;
 			}
@@ -620,7 +632,8 @@
 			rc = regulator_set_load(regulators[i].regulator,
 						voltage_level[i].uA_load);
 			if (rc < 0) {
-				pr_err("regulator set load(%s) failed (%d)\n",
+				wcnss_log(ERR,
+				"regulator set load(%s) failed (%d)\n",
 				       regulators[i].name, rc);
 				goto fail;
 			}
@@ -630,7 +643,7 @@
 		/* Enable the regulator */
 		rc = regulator_enable(regulators[i].regulator);
 		if (rc) {
-			pr_err("vreg %s enable failed (%d)\n",
+			wcnss_log(ERR, "vreg %s enable failed (%d)\n",
 			       regulators[i].name, rc);
 			goto fail;
 		}
@@ -653,7 +666,7 @@
 				cfg->iris_vlevel);
 		break;
 	default:
-		pr_err("%s invalid hardware %d\n", __func__, hw_type);
+		wcnss_log(ERR, "%s invalid hardware %d\n", __func__, hw_type);
 	}
 }
 
@@ -669,7 +682,7 @@
 				     cfg->iris_vlevel);
 		break;
 	default:
-		pr_err("%s invalid hardware %d\n", __func__, hw_type);
+		wcnss_log(ERR, "%s invalid hardware %d\n", __func__, hw_type);
 	}
 	return ret;
 }
@@ -683,7 +696,7 @@
 				ARRAY_SIZE(pronto_vregs), cfg->pronto_vlevel);
 		break;
 	default:
-		pr_err("%s invalid hardware %d\n", __func__, hw_type);
+		wcnss_log(ERR, "%s invalid hardware %d\n", __func__, hw_type);
 	}
 }
 
@@ -700,7 +713,7 @@
 				     cfg->pronto_vlevel);
 		break;
 	default:
-		pr_err("%s invalid hardware %d\n", __func__, hw_type);
+		wcnss_log(ERR, "%s invalid hardware %d\n", __func__, hw_type);
 	}
 
 	return ret;
diff --git a/drivers/soc/qcom/wcnss/wcnss_wlan.c b/drivers/soc/qcom/wcnss/wcnss_wlan.c
index c3fb30b..08bd78b 100644
--- a/drivers/soc/qcom/wcnss/wcnss_wlan.c
+++ b/drivers/soc/qcom/wcnss/wcnss_wlan.c
@@ -38,6 +38,7 @@
 #include <linux/pm_qos.h>
 #include <linux/bitops.h>
 #include <linux/cdev.h>
+#include <linux/ipc_logging.h>
 #include <soc/qcom/socinfo.h>
 
 #include <soc/qcom/subsystem_restart.h>
@@ -452,6 +453,53 @@
 	struct cdev ctrl_dev, node_dev;
 } *penv = NULL;
 
+static void *wcnss_ipc_log;
+
+#define IPC_NUM_LOG_PAGES	12
+#define wcnss_ipc_log_string(_x...) do {		\
+	if (wcnss_ipc_log)				\
+		ipc_log_string(wcnss_ipc_log, _x);	\
+	} while (0)
+
+void wcnss_log(enum wcnss_log_type type, const char *_fmt, ...)
+{
+	struct va_format vaf = {
+		.fmt = _fmt,
+	};
+	va_list args;
+
+	va_start(args, _fmt);
+	vaf.va = &args;
+	switch (type) {
+	case ERR:
+		pr_err("wcnss: %pV", &vaf);
+		wcnss_ipc_log_string("wcnss: %pV", &vaf);
+		break;
+	case WARN:
+		pr_warn("wcnss: %pV", &vaf);
+		wcnss_ipc_log_string("wcnss: %pV", &vaf);
+		break;
+	case INFO:
+		pr_info("wcnss: %pV", &vaf);
+		wcnss_ipc_log_string("wcnss: %pV", &vaf);
+		break;
+	case DBG:
+#if defined(CONFIG_DYNAMIC_DEBUG)
+		pr_debug("wcnss: %pV", &vaf);
+		wcnss_ipc_log_string("wcnss: %pV", &vaf);
+#elif defined(DEBUG)
+		pr_debug("wcnss: %pV", &vaf);
+		wcnss_ipc_log_string("wcnss: %pV", &vaf);
+#else
+		pr_devel("wcnss: %pV", &vaf);
+		wcnss_ipc_log_string("wcnss: %pV", &vaf);
+#endif
+		break;
+	}
+	va_end(args);
+}
+EXPORT_SYMBOL(wcnss_log);
+
 static ssize_t wcnss_wlan_macaddr_store(struct device *dev,
 					struct device_attribute *attr,
 					const char *buf, size_t count)
@@ -463,14 +511,14 @@
 		return -ENODEV;
 
 	if (strlen(buf) != WCNSS_USER_MAC_ADDR_LENGTH) {
-		dev_err(dev, "%s: Invalid MAC addr length\n", __func__);
+		wcnss_log(ERR, "%s: Invalid MAC addr length\n", __func__);
 		return -EINVAL;
 	}
 
 	if (sscanf(buf, MAC_ADDRESS_STR, &mac_addr[0], &mac_addr[1],
 		   &mac_addr[2], &mac_addr[3], &mac_addr[4],
 		   &mac_addr[5]) != WLAN_MAC_ADDR_SIZE) {
-		pr_err("%s: Failed to Copy MAC\n", __func__);
+		wcnss_log(ERR, "%s: Failed to Copy MAC\n", __func__);
 		return -EINVAL;
 	}
 
@@ -479,7 +527,7 @@
 		       (char *)&mac_addr[index], sizeof(char));
 	}
 
-	pr_info("%s: Write MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
+	wcnss_log(INFO, "%s: Write MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
 		penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
 		penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
 		penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
@@ -555,19 +603,19 @@
 
 	ccu_reg = penv->riva_ccu_base + CCU_RIVA_INVALID_ADDR_OFFSET;
 	reg = readl_relaxed(ccu_reg);
-	pr_info_ratelimited("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg);
+	wcnss_log(INFO, "%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg);
 
 	ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR0_OFFSET;
 	reg = readl_relaxed(ccu_reg);
-	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg);
+	wcnss_log(INFO, "%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg);
 
 	ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR1_OFFSET;
 	reg = readl_relaxed(ccu_reg);
-	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg);
+	wcnss_log(INFO, "%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg);
 
 	ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR2_OFFSET;
 	reg = readl_relaxed(ccu_reg);
-	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
+	wcnss_log(INFO, "%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
 }
 EXPORT_SYMBOL(wcnss_riva_log_debug_regs);
 
@@ -586,13 +634,12 @@
 			break;
 	}
 
-	if (iter == A2XB_FIFO_COUNTER) {
-		pr_err("%s data FIFO testbus possibly stalled reg%08x\n",
-		       type, reg);
-	} else {
-		pr_err("%s data FIFO tstbus not stalled reg%08x\n",
-		       type, reg);
-	}
+	if (iter == A2XB_FIFO_COUNTER)
+		wcnss_log(ERR,
+		"%s data FIFO testbus possibly stalled reg%08x\n", type, reg);
+	else
+		wcnss_log(ERR,
+		"%s data FIFO tstbus not stalled reg%08x\n", type, reg);
 }
 
 int wcnss_get_dual_band_capability_info(struct platform_device *pdev)
@@ -627,71 +674,72 @@
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SPARE_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_PMU_SPARE %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_PMU_SPARE %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CPU_CBCR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_PMU_COM_CPU_CBCR %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_PMU_COM_CPU_CBCR %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_AHB_CBCR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_PMU_COM_AHB_CBCR %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_PMU_COM_AHB_CBCR %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CFG_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_PMU_CFG %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_PMU_CFG %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CSR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_PMU_COM_CSR %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_PMU_COM_CSR %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SOFT_RESET_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_PMU_SOFT_RESET %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_PMU_SOFT_RESET %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WDOG_CTL;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_PMU_WDOG_CTL %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_PMU_WDOG_CTL %08x\n", reg);
 
 	reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SPM_STS_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_SAW2_SPM_STS %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_SAW2_SPM_STS %08x\n", reg);
 
 	reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SPM_CTL;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_SAW2_SPM_CTL %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_SAW2_SPM_CTL %08x\n", reg);
 
 	if (penv->is_a2xb_split_reg) {
 		reg_addr = penv->msm_wcnss_base +
 			   PMU_A2XB_CFG_HSPLIT_RESP_LIMIT_OFFSET;
 		reg = readl_relaxed(reg_addr);
-		pr_err("PMU_A2XB_CFG_HSPLIT_RESP_LIMIT %08x\n", reg);
+		wcnss_log(ERR, "PMU_A2XB_CFG_HSPLIT_RESP_LIMIT %08x\n", reg);
 	}
 
 	reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SAW2_VERSION;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_SAW2_SAW2_VERSION %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_SAW2_SAW2_VERSION %08x\n", reg);
 	reg >>= PRONTO_SAW2_MAJOR_VER_OFFSET;
 
 	reg_addr = penv->msm_wcnss_base  + PRONTO_PMU_CCPU_BOOT_REMAP_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_PMU_CCPU_BOOT_REMAP %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_PMU_CCPU_BOOT_REMAP %08x\n", reg);
 
 	reg_addr = penv->pronto_pll_base + PRONTO_PLL_STATUS_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_PLL_STATUS %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_PLL_STATUS %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET;
 	reg4 = readl_relaxed(reg_addr);
-	pr_err("PMU_CPU_CMD_RCGR %08x\n", reg4);
+	wcnss_log(ERR, "PMU_CPU_CMD_RCGR %08x\n", reg4);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_GDSCR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("PRONTO_PMU_COM_GDSCR %08x\n", reg);
+	wcnss_log(ERR, "PRONTO_PMU_COM_GDSCR %08x\n", reg);
 	reg >>= 31;
 
 	if (!reg) {
-		pr_err("Cannot log, Pronto common SS is power collapsed\n");
+		wcnss_log(ERR,
+		"Cannot log, Pronto common SS is power collapsed\n");
 		return;
 	}
 	reg &= ~(PRONTO_PMU_COM_GDSCR_SW_COLLAPSE
@@ -705,47 +753,47 @@
 
 	reg_addr = penv->pronto_a2xb_base + A2XB_CFG_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("A2XB_CFG_OFFSET %08x\n", reg);
+	wcnss_log(ERR, "A2XB_CFG_OFFSET %08x\n", reg);
 
 	reg_addr = penv->pronto_a2xb_base + A2XB_INT_SRC_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("A2XB_INT_SRC_OFFSET %08x\n", reg);
+	wcnss_log(ERR, "A2XB_INT_SRC_OFFSET %08x\n", reg);
 
 	reg_addr = penv->pronto_a2xb_base + A2XB_ERR_INFO_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("A2XB_ERR_INFO_OFFSET %08x\n", reg);
+	wcnss_log(ERR, "A2XB_ERR_INFO_OFFSET %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_INVALID_ADDR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("CCU_CCPU_INVALID_ADDR %08x\n", reg);
+	wcnss_log(ERR, "CCU_CCPU_INVALID_ADDR %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR0_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("CCU_CCPU_LAST_ADDR0 %08x\n", reg);
+	wcnss_log(ERR, "CCU_CCPU_LAST_ADDR0 %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR1_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("CCU_CCPU_LAST_ADDR1 %08x\n", reg);
+	wcnss_log(ERR, "CCU_CCPU_LAST_ADDR1 %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR2_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("CCU_CCPU_LAST_ADDR2 %08x\n", reg);
+	wcnss_log(ERR, "CCU_CCPU_LAST_ADDR2 %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_AOWBR_ERR_ADDR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("CCU_PRONTO_AOWBR_ERR_ADDR_OFFSET %08x\n", reg);
+	wcnss_log(ERR, "CCU_PRONTO_AOWBR_ERR_ADDR_OFFSET %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_AOWBR_TIMEOUT_REG_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("CCU_PRONTO_AOWBR_TIMEOUT_REG_OFFSET %08x\n", reg);
+	wcnss_log(ERR, "CCU_PRONTO_AOWBR_TIMEOUT_REG_OFFSET %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_AOWBR_ERR_TIMEOUT_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("CCU_PRONTO_AOWBR_ERR_TIMEOUT_OFFSET %08x\n", reg);
+	wcnss_log(ERR, "CCU_PRONTO_AOWBR_ERR_TIMEOUT_OFFSET %08x\n", reg);
 
 	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_A2AB_ERR_ADDR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("CCU_PRONTO_A2AB_ERR_ADDR_OFFSET %08x\n", reg);
+	wcnss_log(ERR, "CCU_PRONTO_A2AB_ERR_ADDR_OFFSET %08x\n", reg);
 
 	tst_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_OFFSET;
 	tst_ctrl_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_CTRL_OFFSET;
@@ -760,7 +808,7 @@
 					       A2XB_READ_FIFO_FILL_MASK,
 					       "Read");
 	} else {
-		pr_err("Read data FIFO testbus %08x\n", reg);
+		wcnss_log(ERR, "Read data FIFO testbus %08x\n", reg);
 	}
 	/*  command FIFO */
 	reg = 0;
@@ -771,7 +819,7 @@
 		wcnss_pronto_is_a2xb_bus_stall(tst_addr,
 					       A2XB_CMD_FIFO_FILL_MASK, "Cmd");
 	} else {
-		pr_err("Command FIFO testbus %08x\n", reg);
+		wcnss_log(ERR, "Command FIFO testbus %08x\n", reg);
 	}
 
 	/*  write data FIFO */
@@ -784,7 +832,7 @@
 					       A2XB_WRITE_FIFO_FILL_MASK,
 					       "Write");
 	} else {
-		pr_err("Write data FIFO testbus %08x\n", reg);
+		wcnss_log(ERR, "Write data FIFO testbus %08x\n", reg);
 	}
 
 	/*   AXIM SEL CFG0 */
@@ -793,7 +841,7 @@
 				WCNSS_TSTBUS_CTRL_AXIM_CFG0;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_err("AXIM SEL CFG0 testbus %08x\n", reg);
+	wcnss_log(ERR, "AXIM SEL CFG0 testbus %08x\n", reg);
 
 	/*   AXIM SEL CFG1 */
 	reg = 0;
@@ -801,7 +849,7 @@
 				WCNSS_TSTBUS_CTRL_AXIM_CFG1;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_err("AXIM SEL CFG1 testbus %08x\n", reg);
+	wcnss_log(ERR, "AXIM SEL CFG1 testbus %08x\n", reg);
 
 	/*   CTRL SEL CFG0 */
 	reg = 0;
@@ -809,7 +857,7 @@
 		WCNSS_TSTBUS_CTRL_CTRL_CFG0;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_err("CTRL SEL CFG0 testbus %08x\n", reg);
+	wcnss_log(ERR, "CTRL SEL CFG0 testbus %08x\n", reg);
 
 	/*   CTRL SEL CFG1 */
 	reg = 0;
@@ -817,7 +865,7 @@
 		WCNSS_TSTBUS_CTRL_CTRL_CFG1;
 	writel_relaxed(reg, tst_ctrl_addr);
 	reg = readl_relaxed(tst_addr);
-	pr_err("CTRL SEL CFG1 testbus %08x\n", reg);
+	wcnss_log(ERR, "CTRL SEL CFG1 testbus %08x\n", reg);
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_BCR_OFFSET;
 	reg = readl_relaxed(reg_addr);
@@ -827,7 +875,7 @@
 
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_AHB_CBCR_OFFSET;
 	reg3 = readl_relaxed(reg_addr);
-	pr_err("PMU_WLAN_AHB_CBCR %08x\n", reg3);
+	wcnss_log(ERR, "PMU_WLAN_AHB_CBCR %08x\n", reg3);
 
 	msleep(50);
 
@@ -836,76 +884,76 @@
 	    (!(reg4 & PRONTO_PMU_CPU_AHB_CMD_RCGR_ROOT_EN)) ||
 	    (reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_OFF) ||
 	    (!(reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_EN))) {
-		pr_err("Cannot log, wlan domain is power collapsed\n");
+		wcnss_log(ERR, "Cannot log, wlan domain is power collapsed\n");
 		return;
 	}
 
 	reg = readl_relaxed(penv->wlan_tx_phy_aborts);
-	pr_err("WLAN_TX_PHY_ABORTS %08x\n", reg);
+	wcnss_log(ERR, "WLAN_TX_PHY_ABORTS %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_APB2PHY_STATUS_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_APB2PHY_STATUS %08x\n", reg);
+	wcnss_log(ERR, "MCU_APB2PHY_STATUS %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_CBR_CCAHB_ERR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_CBR_CCAHB_ERR %08x\n", reg);
+	wcnss_log(ERR, "MCU_CBR_CCAHB_ERR %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_CBR_CAHB_ERR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_CBR_CAHB_ERR %08x\n", reg);
+	wcnss_log(ERR, "MCU_CBR_CAHB_ERR %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_CBR_CCAHB_TIMEOUT_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_CBR_CCAHB_TIMEOUT %08x\n", reg);
+	wcnss_log(ERR, "MCU_CBR_CCAHB_TIMEOUT %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_CBR_CAHB_TIMEOUT_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_CBR_CAHB_TIMEOUT %08x\n", reg);
+	wcnss_log(ERR, "MCU_CBR_CAHB_TIMEOUT %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_DBR_CDAHB_ERR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_DBR_CDAHB_ERR %08x\n", reg);
+	wcnss_log(ERR, "MCU_DBR_CDAHB_ERR %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_DBR_DAHB_ERR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_DBR_DAHB_ERR %08x\n", reg);
+	wcnss_log(ERR, "MCU_DBR_DAHB_ERR %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_DBR_CDAHB_TIMEOUT_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_DBR_CDAHB_TIMEOUT %08x\n", reg);
+	wcnss_log(ERR, "MCU_DBR_CDAHB_TIMEOUT %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_DBR_DAHB_TIMEOUT_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_DBR_DAHB_TIMEOUT %08x\n", reg);
+	wcnss_log(ERR, "MCU_DBR_DAHB_TIMEOUT %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_FDBR_CDAHB_ERR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_FDBR_CDAHB_ERR %08x\n", reg);
+	wcnss_log(ERR, "MCU_FDBR_CDAHB_ERR %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_FDBR_FDAHB_ERR_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_FDBR_FDAHB_ERR %08x\n", reg);
+	wcnss_log(ERR, "MCU_FDBR_FDAHB_ERR %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_FDBR_CDAHB_TIMEOUT_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_FDBR_CDAHB_TIMEOUT %08x\n", reg);
+	wcnss_log(ERR, "MCU_FDBR_CDAHB_TIMEOUT %08x\n", reg);
 
 	reg_addr = penv->pronto_mcu_base + MCU_FDBR_FDAHB_TIMEOUT_OFFSET;
 	reg = readl_relaxed(reg_addr);
-	pr_err("MCU_FDBR_FDAHB_TIMEOUT %08x\n", reg);
+	wcnss_log(ERR, "MCU_FDBR_FDAHB_TIMEOUT %08x\n", reg);
 
 	reg = readl_relaxed(penv->wlan_brdg_err_source);
-	pr_err("WLAN_BRDG_ERR_SOURCE %08x\n", reg);
+	wcnss_log(ERR, "WLAN_BRDG_ERR_SOURCE %08x\n", reg);
 
 	reg = readl_relaxed(penv->wlan_tx_status);
-	pr_err("WLAN_TXP_STATUS %08x\n", reg);
+	wcnss_log(ERR, "WLAN_TXP_STATUS %08x\n", reg);
 
 	reg = readl_relaxed(penv->alarms_txctl);
-	pr_err("ALARMS_TXCTL %08x\n", reg);
+	wcnss_log(ERR, "ALARMS_TXCTL %08x\n", reg);
 
 	reg = readl_relaxed(penv->alarms_tactl);
-	pr_err("ALARMS_TACTL %08x\n", reg);
+	wcnss_log(ERR, "ALARMS_TACTL %08x\n", reg);
 }
 EXPORT_SYMBOL(wcnss_pronto_log_debug_regs);
 
@@ -930,13 +978,13 @@
 	if (!IS_ERR_OR_NULL(pin_state)) {
 		ret = pinctrl_select_state(penv->pinctrl, pin_state);
 		if (ret < 0) {
-			pr_err("%s: can not set gpio pins err: %d\n",
+			wcnss_log(ERR, "%s: can not set gpio pins err: %d\n",
 			       __func__, ret);
 			goto pinctrl_set_err;
 		}
 
 	} else {
-		pr_err("%s: invalid gpio pinstate err: %lu\n",
+		wcnss_log(ERR, "%s: invalid gpio pinstate err: %lu\n",
 		       __func__, PTR_ERR(pin_state));
 		goto pinctrl_set_err;
 	}
@@ -945,7 +993,7 @@
 		ret = gpio_request_one(penv->gpios[i],
 				       GPIOF_DIR_IN, NULL);
 		if (ret) {
-			pr_err("%s: request failed for gpio:%d\n",
+			wcnss_log(ERR, "%s: request failed for gpio:%d\n",
 			       __func__, penv->gpios[i]);
 			i--;
 			goto gpio_req_err;
@@ -956,7 +1004,7 @@
 		ret = gpio_request_one(penv->gpios[i],
 				       GPIOF_OUT_INIT_LOW, NULL);
 		if (ret) {
-			pr_err("%s: request failed for gpio:%d\n",
+			wcnss_log(ERR, "%s: request failed for gpio:%d\n",
 			       __func__, penv->gpios[i]);
 			i--;
 			goto gpio_req_err;
@@ -1072,12 +1120,13 @@
 		0x04, 0x05, 0x11, 0x1e, 0x40, 0x48,
 		0x49, 0x4b, 0x00, 0x01, 0x4d};
 
-	pr_info("%s: IRIS Registers [address] : value\n", __func__);
+	wcnss_log(INFO, "%s: IRIS Registers [address] : value\n", __func__);
 
 	for (i = 0; i < ARRAY_SIZE(regs_array); i++) {
 		reg_val = wcnss_rf_read_reg(regs_array[i]);
 
-		pr_info("[0x%08x] : 0x%08x\n", regs_array[i], reg_val);
+		wcnss_log(INFO, "IRIS Reg Addr: [0x%08x] : Reg val: 0x%08x\n",
+			  regs_array[i], reg_val);
 	}
 }
 
@@ -1111,24 +1160,25 @@
 
 	if (!IS_ERR(measure) && !IS_ERR(wcnss_debug_mux)) {
 		if (clk_set_parent(measure, wcnss_debug_mux)) {
-			pr_err("Setting measure clk parent failed\n");
+			wcnss_log(ERR, "Setting measure clk parent failed\n");
 			return;
 		}
 
 		if (clk_prepare_enable(measure)) {
-			pr_err("measure clk enable failed\n");
+			wcnss_log(ERR, "measure clk enable failed\n");
 			return;
 		}
 
 		clk_rate = clk_get_rate(measure);
-		pr_debug("wcnss: clock frequency is: %luHz\n", clk_rate);
+		wcnss_log(DBG, "clock frequency is: %luHz\n", clk_rate);
 
 		if (clk_rate) {
 			wcnss_pronto_log_debug_regs();
 			if (wcnss_get_mux_control())
 				wcnss_log_iris_regs();
 		} else {
-			pr_err("clock frequency is zero, cannot access PMU or other registers\n");
+			wcnss_log(ERR, "clock frequency is zero, cannot");
+			wcnss_log(ERR, " access PMU or other registers\n");
 			wcnss_log_iris_regs();
 		}
 
@@ -1153,8 +1203,8 @@
 			wmb();
 			__raw_writel(1 << 16, penv->fiq_reg);
 		} else {
-			pr_info("%s: Block FIQ during power up sequence\n",
-				__func__);
+			wcnss_log(INFO,
+			"%s: Block FIQ during power up sequence\n", __func__);
 		}
 	} else {
 		wcnss_riva_log_debug_regs();
@@ -1202,20 +1252,20 @@
 
 static void wcnss_pm_qos_add_request(void)
 {
-	pr_info("%s: add request\n", __func__);
+	wcnss_log(INFO, "%s: add request\n", __func__);
 	pm_qos_add_request(&penv->wcnss_pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
 			   PM_QOS_DEFAULT_VALUE);
 }
 
 static void wcnss_pm_qos_remove_request(void)
 {
-	pr_info("%s: remove request\n", __func__);
+	wcnss_log(INFO, "%s: remove request\n", __func__);
 	pm_qos_remove_request(&penv->wcnss_pm_qos_request);
 }
 
 void wcnss_pm_qos_update_request(int val)
 {
-	pr_info("%s: update request %d\n", __func__, val);
+	wcnss_log(INFO, "%s: update request %d\n", __func__, val);
 	pm_qos_update_request(&penv->wcnss_pm_qos_request, val);
 }
 
@@ -1248,21 +1298,21 @@
 	int len = 0;
 
 	if (penv != data) {
-		pr_err("wcnss: invalid env pointer in smd callback\n");
+		wcnss_log(ERR, "invalid env pointer in smd callback\n");
 		return;
 	}
 	switch (event) {
 	case SMD_EVENT_DATA:
 		len = smd_read_avail(penv->smd_ch);
 		if (len < 0) {
-			pr_err("wcnss: failed to read from smd %d\n", len);
+			wcnss_log(ERR, "failed to read from smd %d\n", len);
 			return;
 		}
 		schedule_work(&penv->wcnssctrl_rx_work);
 		break;
 
 	case SMD_EVENT_OPEN:
-		pr_debug("wcnss: opening WCNSS SMD channel :%s",
+		wcnss_log(DBG, "opening WCNSS SMD channel :%s",
 			 WCNSS_CTRL_CHANNEL);
 		schedule_work(&penv->wcnssctrl_version_work);
 		schedule_work(&penv->wcnss_pm_config_work);
@@ -1273,7 +1323,7 @@
 		break;
 
 	case SMD_EVENT_CLOSE:
-		pr_debug("wcnss: closing WCNSS SMD channel :%s",
+		wcnss_log(DBG, "closing WCNSS SMD channel :%s",
 			 WCNSS_CTRL_CHANNEL);
 		penv->nv_downloaded = 0;
 		penv->is_cbc_done = 0;
@@ -1290,7 +1340,7 @@
 	struct pinctrl_state *pin_state;
 	int ret;
 
-	pr_debug("%s: Set GPIO state : %d\n", __func__, active);
+	wcnss_log(DBG, "%s: Set GPIO state : %d\n", __func__, active);
 
 	pin_state = active ? penv->wcnss_5wire_active
 			: penv->wcnss_5wire_suspend;
@@ -1298,13 +1348,13 @@
 	if (!IS_ERR_OR_NULL(pin_state)) {
 		ret = pinctrl_select_state(penv->pinctrl, pin_state);
 		if (ret < 0) {
-			pr_err("%s: can not set %s pins\n", __func__,
+			wcnss_log(ERR, "%s: can not set %s pins\n", __func__,
 			       active ? WCNSS_PINCTRL_STATE_DEFAULT
 			       : WCNSS_PINCTRL_STATE_SLEEP);
 			return ret;
 		}
 	} else {
-		pr_err("%s: invalid '%s' pinstate\n", __func__,
+		wcnss_log(ERR, "%s: invalid '%s' pinstate\n", __func__,
 		       active ? WCNSS_PINCTRL_STATE_DEFAULT
 		       : WCNSS_PINCTRL_STATE_SLEEP);
 		return PTR_ERR(pin_state);
@@ -1323,7 +1373,7 @@
 	penv->pinctrl = devm_pinctrl_get(&pdev->dev);
 
 	if (IS_ERR_OR_NULL(penv->pinctrl)) {
-		pr_err("%s: failed to get pinctrl\n", __func__);
+		wcnss_log(ERR, "%s: failed to get pinctrl\n", __func__);
 		return PTR_ERR(penv->pinctrl);
 	}
 
@@ -1332,7 +1382,7 @@
 			WCNSS_PINCTRL_STATE_DEFAULT);
 
 	if (IS_ERR_OR_NULL(penv->wcnss_5wire_active)) {
-		pr_err("%s: can not get default pinstate\n", __func__);
+		wcnss_log(ERR, "%s: can not get default pinstate\n", __func__);
 		return PTR_ERR(penv->wcnss_5wire_active);
 	}
 
@@ -1341,19 +1391,20 @@
 			WCNSS_PINCTRL_STATE_SLEEP);
 
 	if (IS_ERR_OR_NULL(penv->wcnss_5wire_suspend)) {
-		pr_warn("%s: can not get sleep pinstate\n", __func__);
+		wcnss_log(WARN, "%s: can not get sleep pinstate\n", __func__);
 		return PTR_ERR(penv->wcnss_5wire_suspend);
 	}
 
 	penv->wcnss_gpio_active = pinctrl_lookup_state(penv->pinctrl,
 					WCNSS_PINCTRL_GPIO_STATE_DEFAULT);
 	if (IS_ERR_OR_NULL(penv->wcnss_gpio_active))
-		pr_warn("%s: can not get gpio default pinstate\n", __func__);
+		wcnss_log(WARN, "%s: can not get gpio default pinstate\n",
+			  __func__);
 
 	for (i = 0; i < WCNSS_WLAN_MAX_GPIO; i++) {
 		penv->gpios[i] = of_get_gpio(node, i);
 		if (penv->gpios[i] < 0)
-			pr_warn("%s: Fail to get 5wire gpio: %d\n",
+			wcnss_log(WARN, "%s: Fail to get 5wire gpio: %d\n",
 				__func__, i);
 	}
 
@@ -1370,13 +1421,13 @@
 	/* Use Pinctrl to configure 5 wire GPIOs */
 	rc = wcnss_pinctrl_init(pdev);
 	if (rc) {
-		pr_err("%s: failed to get pin resources\n", __func__);
+		wcnss_log(ERR, "%s: failed to get pin resources\n", __func__);
 		penv->pinctrl = NULL;
 		goto gpio_probe;
 	} else {
 		rc = wcnss_pinctrl_set_state(true);
 		if (rc)
-			pr_err("%s: failed to set pin state\n",
+			wcnss_log(ERR, "%s: failed to set pin state\n",
 			       __func__);
 		penv->use_pinctrl = true;
 		return rc;
@@ -1389,7 +1440,7 @@
 		if (enable) {
 			rc = gpio_request(gpio, "wcnss_wlan");
 			if (rc) {
-				pr_err("WCNSS gpio_request %d err %d\n",
+				wcnss_log(ERR, "WCNSS gpio_request %d err %d\n",
 				       gpio, rc);
 				goto fail;
 			}
@@ -1418,7 +1469,8 @@
 		if (enable) {
 			rc = gpio_request(i, gpios_5wire->name);
 			if (rc) {
-				pr_err("WCNSS gpio_request %d err %d\n", i, rc);
+				wcnss_log(ERR,
+					  "gpio_request %d err %d\n", i, rc);
 				goto fail;
 			}
 		} else {
@@ -1442,7 +1494,7 @@
 
 	penv->smd_channel_ready = 1;
 
-	pr_info("%s: SMD ctrl channel up\n", __func__);
+	wcnss_log(INFO, "%s: SMD ctrl channel up\n", __func__);
 	return 0;
 }
 
@@ -1452,7 +1504,7 @@
 	if (penv)
 		penv->smd_channel_ready = 0;
 
-	pr_info("%s: SMD ctrl channel down\n", __func__);
+	wcnss_log(INFO, "%s: SMD ctrl channel down\n", __func__);
 
 	return 0;
 }
@@ -1487,7 +1539,7 @@
 				     &penv->smd_ch, penv,
 				     wcnss_smd_notify_event);
 	if (ret < 0) {
-		pr_err("wcnss: cannot open the smd command channel %s: %d\n",
+		wcnss_log(ERR, "cannot open the smd command channel %s: %d\n",
 		       WCNSS_CTRL_CHANNEL, ret);
 		return -ENODEV;
 	}
@@ -1611,14 +1663,15 @@
 {
 	if (penv && dev && (dev == &penv->pdev->dev) && pm_ops) {
 		if (!penv->pm_ops) {
-			pr_err("%s: pm_ops is already unregistered.\n",
+			wcnss_log(ERR, "%s: pm_ops is already unregistered.\n",
 			       __func__);
 			return;
 		}
 
 		if (pm_ops->suspend != penv->pm_ops->suspend ||
 		    pm_ops->resume != penv->pm_ops->resume)
-			pr_err("PM APIs dont match with registered APIs\n");
+			wcnss_log(ERR,
+			"PM APIs dont match with registered APIs\n");
 		penv->pm_ops = NULL;
 	}
 }
@@ -1637,7 +1690,7 @@
 {
 	if (penv && tm_notify) {
 		if (tm_notify != penv->tm_notify)
-			pr_err("tm_notify doesn't match registered\n");
+			wcnss_log(ERR, "tm_notify doesn't match registered\n");
 		penv->tm_notify = NULL;
 	}
 }
@@ -1647,7 +1700,7 @@
 {
 	if (penv) {
 		penv->serial_number = socinfo_get_serial_number();
-		pr_info("%s: Device serial number: %u\n",
+		wcnss_log(INFO, "%s: Device serial number: %u\n",
 			__func__, penv->serial_number);
 		return penv->serial_number;
 	}
@@ -1662,7 +1715,7 @@
 		return -ENODEV;
 
 	memcpy(mac_addr, penv->wlan_nv_mac_addr, WLAN_MAC_ADDR_SIZE);
-	pr_debug("%s: Get MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
+	wcnss_log(DBG, "%s: Get MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
 		 penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
 		 penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
 		 penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
@@ -1682,7 +1735,7 @@
 		return ret;
 
 	if (enable_wcnss_suspend_notify)
-		pr_debug("Suspend notification activated for wcnss\n");
+		wcnss_log(DBG, "Suspend notification activated for wcnss\n");
 
 	return 0;
 }
@@ -1875,12 +1928,12 @@
 
 	ret = smd_write_avail(penv->smd_ch);
 	if (ret < len) {
-		pr_err("wcnss: no space available for smd frame\n");
+		wcnss_log(ERR, "no space available for smd frame\n");
 		return -ENOSPC;
 	}
 	ret = smd_write(penv->smd_ch, data, len);
 	if (ret < len) {
-		pr_err("wcnss: failed to write Command %d", len);
+		wcnss_log(ERR, "failed to write Command %d", len);
 		ret = -ENODEV;
 	}
 	return ret;
@@ -1892,19 +1945,19 @@
 	struct qpnp_vadc_result adc_result;
 
 	if (!penv->vadc_dev) {
-		pr_err("wcnss: not setting up vadc\n");
+		wcnss_log(ERR, "not setting up vadc\n");
 		return rc;
 	}
 
 	rc = qpnp_vadc_read(penv->vadc_dev, VBAT_SNS, &adc_result);
 	if (rc) {
-		pr_err("error reading adc channel = %d, rc = %d\n",
+		wcnss_log(ERR, "error reading adc channel = %d, rc = %d\n",
 		       VBAT_SNS, rc);
 		return rc;
 	}
 
-	pr_info("Battery mvolts phy=%lld meas=0x%llx\n", adc_result.physical,
-		adc_result.measurement);
+	wcnss_log(INFO, "Battery mvolts phy=%lld meas=0x%llx\n",
+		  adc_result.physical, adc_result.measurement);
 	*result_uv = (int)adc_result.physical;
 
 	return 0;
@@ -1918,7 +1971,7 @@
 	cancel_delayed_work_sync(&penv->vbatt_work);
 
 	if (state == ADC_TM_LOW_STATE) {
-		pr_debug("wcnss: low voltage notification triggered\n");
+		wcnss_log(DBG, "low voltage notification triggered\n");
 		penv->vbat_monitor_params.state_request =
 			ADC_TM_HIGH_THR_ENABLE;
 		penv->vbat_monitor_params.high_thr = WCNSS_VBATT_THRESHOLD +
@@ -1930,14 +1983,14 @@
 		penv->vbat_monitor_params.low_thr = WCNSS_VBATT_THRESHOLD -
 		WCNSS_VBATT_GUARD;
 		penv->vbat_monitor_params.high_thr = 0;
-		pr_debug("wcnss: high voltage notification triggered\n");
+		wcnss_log(DBG, "high voltage notification triggered\n");
 	} else {
-		pr_debug("wcnss: unknown voltage notification state: %d\n",
+		wcnss_log(DBG, "unknown voltage notification state: %d\n",
 			 state);
 		mutex_unlock(&penv->vbat_monitor_mutex);
 		return;
 	}
-	pr_debug("wcnss: set low thr to %d and high to %d\n",
+	wcnss_log(DBG, "set low thr to %d and high to %d\n",
 		 penv->vbat_monitor_params.low_thr,
 		 penv->vbat_monitor_params.high_thr);
 
@@ -1945,7 +1998,7 @@
 					 &penv->vbat_monitor_params);
 
 	if (rc)
-		pr_err("%s: tm setup failed: %d\n", __func__, rc);
+		wcnss_log(ERR, "%s: tm setup failed: %d\n", __func__, rc);
 	else
 		schedule_delayed_work(&penv->vbatt_work,
 				      msecs_to_jiffies(2000));
@@ -1958,7 +2011,7 @@
 	int rc = -1;
 
 	if (!penv->adc_tm_dev) {
-		pr_err("wcnss: not setting up vbatt\n");
+		wcnss_log(ERR, "not setting up vbatt\n");
 		return rc;
 	}
 	penv->vbat_monitor_params.low_thr = WCNSS_VBATT_THRESHOLD;
@@ -1973,14 +2026,14 @@
 	penv->vbat_monitor_params.btm_ctx = (void *)penv;
 	penv->vbat_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S;
 	penv->vbat_monitor_params.threshold_notification = &wcnss_notify_vbat;
-	pr_debug("wcnss: set low thr to %d and high to %d\n",
+	wcnss_log(DBG, "set low thr to %d and high to %d\n",
 		 penv->vbat_monitor_params.low_thr,
 		 penv->vbat_monitor_params.high_thr);
 
 	rc = qpnp_adc_tm_channel_measure(penv->adc_tm_dev,
 					 &penv->vbat_monitor_params);
 	if (rc)
-		pr_err("%s: tm setup failed: %d\n", __func__, rc);
+		wcnss_log(ERR, "%s: tm setup failed: %d\n", __func__, rc);
 
 	return rc;
 }
@@ -1997,12 +2050,12 @@
 	mutex_lock(&penv->vbat_monitor_mutex);
 	vbatt_msg.vbatt.curr_volt = penv->wlan_config.vbatt;
 	mutex_unlock(&penv->vbat_monitor_mutex);
-	pr_debug("wcnss: send curr_volt: %d to FW\n",
+	wcnss_log(DBG, "send curr_volt: %d to FW\n",
 		 vbatt_msg.vbatt.curr_volt);
 
 	ret = wcnss_smd_tx(&vbatt_msg, vbatt_msg.hdr.msg_len);
 	if (ret < 0)
-		pr_err("wcnss: smd tx failed\n");
+		wcnss_log(ERR, "smd tx failed\n");
 }
 
 static void wcnss_update_vbatt(struct work_struct *work)
@@ -2020,13 +2073,13 @@
 	     penv->fw_vbatt_state == WCNSS_CONFIG_UNSPECIFIED)) {
 		vbatt_msg.vbatt.curr_volt = WCNSS_VBATT_HIGH;
 		penv->fw_vbatt_state = WCNSS_VBATT_HIGH;
-		pr_debug("wcnss: send HIGH BATT to FW\n");
+		wcnss_log(DBG, "send HIGH BATT to FW\n");
 	} else if (!penv->vbat_monitor_params.low_thr &&
 		   (penv->fw_vbatt_state == WCNSS_VBATT_HIGH ||
 		    penv->fw_vbatt_state == WCNSS_CONFIG_UNSPECIFIED)){
 		vbatt_msg.vbatt.curr_volt = WCNSS_VBATT_LOW;
 		penv->fw_vbatt_state = WCNSS_VBATT_LOW;
-		pr_debug("wcnss: send LOW BATT to FW\n");
+		wcnss_log(DBG, "send LOW BATT to FW\n");
 	} else {
 		mutex_unlock(&penv->vbat_monitor_mutex);
 		return;
@@ -2034,7 +2087,7 @@
 	mutex_unlock(&penv->vbat_monitor_mutex);
 	ret = wcnss_smd_tx(&vbatt_msg, vbatt_msg.hdr.msg_len);
 	if (ret < 0)
-		pr_err("wcnss: smd tx failed\n");
+		wcnss_log(ERR, "smd tx failed\n");
 }
 
 static unsigned char wcnss_fw_status(void)
@@ -2046,13 +2099,13 @@
 
 	len = smd_read_avail(penv->smd_ch);
 	if (len < 1) {
-		pr_err("%s: invalid firmware status", __func__);
+		wcnss_log(ERR, "%s: invalid firmware status", __func__);
 		return fw_status;
 	}
 
 	rc = smd_read(penv->smd_ch, &fw_status, 1);
 	if (rc < 0) {
-		pr_err("%s: incomplete data read from smd\n", __func__);
+		wcnss_log(ERR, "%s: incomplete data read from smd\n", __func__);
 		return fw_status;
 	}
 	return fw_status;
@@ -2075,7 +2128,7 @@
 
 	rc = wcnss_smd_tx(msg, rsphdr->msg_len);
 	if (rc < 0)
-		pr_err("wcnss: smd tx failed\n");
+		wcnss_log(ERR, "smd tx failed\n");
 
 	kfree(msg);
 }
@@ -2088,7 +2141,7 @@
 	unsigned char fw_status = WCNSS_RESP_FAIL;
 
 	if (len < sizeof(struct cal_data_params)) {
-		pr_err("wcnss: incomplete cal header length\n");
+		wcnss_log(ERR, "incomplete cal header length\n");
 		return;
 	}
 
@@ -2096,18 +2149,18 @@
 	rc = smd_read(penv->smd_ch, (unsigned char *)&calhdr,
 		      sizeof(struct cal_data_params));
 	if (rc < sizeof(struct cal_data_params)) {
-		pr_err("wcnss: incomplete cal header read from smd\n");
+		wcnss_log(ERR, "incomplete cal header read from smd\n");
 		mutex_unlock(&penv->dev_lock);
 		return;
 	}
 
 	if (penv->fw_cal_exp_frag != calhdr.frag_number) {
-		pr_err("wcnss: Invalid frgament");
+		wcnss_log(ERR, "Invalid frgament");
 		goto unlock_exit;
 	}
 
 	if (calhdr.frag_size > WCNSS_MAX_FRAME_SIZE) {
-		pr_err("wcnss: Invalid fragment size");
+		wcnss_log(ERR, "Invalid fragment size");
 		goto unlock_exit;
 	}
 
@@ -2125,7 +2178,7 @@
 
 	if (calhdr.frag_number == 0) {
 		if (calhdr.total_size > MAX_CALIBRATED_DATA_SIZE) {
-			pr_err("wcnss: Invalid cal data size %d",
+			wcnss_log(ERR, "Invalid cal data size %d",
 			       calhdr.total_size);
 			goto unlock_exit;
 		}
@@ -2141,7 +2194,7 @@
 
 	if (penv->fw_cal_rcvd + calhdr.frag_size >
 			MAX_CALIBRATED_DATA_SIZE) {
-		pr_err("calibrated data size is more than expected %d",
+		wcnss_log(ERR, "calibrated data size is more than expected %d",
 		       penv->fw_cal_rcvd + calhdr.frag_size);
 		penv->fw_cal_exp_frag = 0;
 		penv->fw_cal_rcvd = 0;
@@ -2160,7 +2213,7 @@
 	if (calhdr.msg_flags & LAST_FRAGMENT) {
 		penv->fw_cal_exp_frag = 0;
 		penv->fw_cal_available = true;
-		pr_info("wcnss: cal data collection completed\n");
+		wcnss_log(INFO, "cal data collection completed\n");
 	}
 	mutex_unlock(&penv->dev_lock);
 	wake_up(&penv->read_wait);
@@ -2190,18 +2243,18 @@
 
 	len = smd_read_avail(penv->smd_ch);
 	if (len > WCNSS_MAX_FRAME_SIZE) {
-		pr_err("wcnss: frame larger than the allowed size\n");
+		wcnss_log(ERR, "frame larger than the allowed size\n");
 		smd_read(penv->smd_ch, NULL, len);
 		return;
 	}
 	if (len < sizeof(struct smd_msg_hdr)) {
-		pr_debug("wcnss: incomplete header available len = %d\n", len);
+		wcnss_log(DBG, "incomplete header available len = %d\n", len);
 		return;
 	}
 
 	rc = smd_read(penv->smd_ch, buf, sizeof(struct smd_msg_hdr));
 	if (rc < sizeof(struct smd_msg_hdr)) {
-		pr_err("wcnss: incomplete header read from smd\n");
+		wcnss_log(ERR, "incomplete header read from smd\n");
 		return;
 	}
 	len -= sizeof(struct smd_msg_hdr);
@@ -2212,14 +2265,14 @@
 	case WCNSS_VERSION_RSP:
 		if (len != sizeof(struct wcnss_version)
 				- sizeof(struct smd_msg_hdr)) {
-			pr_err("wcnss: invalid version data from wcnss %d\n",
+			wcnss_log(ERR, "invalid version data from wcnss %d\n",
 			       len);
 			return;
 		}
 		rc = smd_read(penv->smd_ch, buf + sizeof(struct smd_msg_hdr),
 			      len);
 		if (rc < len) {
-			pr_err("wcnss: incomplete data read from smd\n");
+			wcnss_log(ERR, "incomplete data read from smd\n");
 			return;
 		}
 		pversion = (struct wcnss_version *)buf;
@@ -2228,14 +2281,15 @@
 		snprintf(penv->wcnss_version, WCNSS_VERSION_LEN,
 			 "%02x%02x%02x%02x", pversion->major, pversion->minor,
 			 pversion->version, pversion->revision);
-		pr_info("wcnss: version %s\n", penv->wcnss_version);
+		wcnss_log(INFO, "version %s\n", penv->wcnss_version);
 		/* schedule work to download nvbin to ccpu */
 		hw_type = wcnss_hardware_type();
 		switch (hw_type) {
 		case WCNSS_RIVA_HW:
 			/* supported only if riva major >= 1 and minor >= 4 */
 			if ((pversion->major >= 1) && (pversion->minor >= 4)) {
-				pr_info("wcnss: schedule dnld work for riva\n");
+				wcnss_log(INFO,
+					  "schedule download work for riva\n");
 				schedule_work(&penv->wcnssctrl_nvbin_dnld_work);
 			}
 			break;
@@ -2245,41 +2299,43 @@
 			smd_msg.msg_len = sizeof(smd_msg);
 			rc = wcnss_smd_tx(&smd_msg, smd_msg.msg_len);
 			if (rc < 0)
-				pr_err("wcnss: smd tx failed: %s\n", __func__);
+				wcnss_log(ERR, "smd tx failed: %s\n", __func__);
 
 			/* supported only if pronto major >= 1 and minor >= 4 */
 			if ((pversion->major >= 1) && (pversion->minor >= 4)) {
-				pr_info("wcnss: schedule dnld work for pronto\n");
+				wcnss_log(INFO,
+					  "schedule dnld work for pronto\n");
 				schedule_work(&penv->wcnssctrl_nvbin_dnld_work);
 			}
 			break;
 
 		default:
-			pr_info("wcnss: unknown hw type (%d), will not schedule dnld work\n",
-				hw_type);
+			wcnss_log(INFO,
+			"unknown hw type (%d) will not schedule dnld work\n",
+			hw_type);
 			break;
 		}
 		break;
 
 	case WCNSS_BUILD_VER_RSP:
 		if (len > WCNSS_MAX_BUILD_VER_LEN) {
-			pr_err("wcnss: invalid build version data from wcnss %d\n",
-			       len);
+			wcnss_log(ERR,
+			"invalid build version data from wcnss %d\n", len);
 			return;
 		}
 		rc = smd_read(penv->smd_ch, build, len);
 		if (rc < len) {
-			pr_err("wcnss: incomplete data read from smd\n");
+			wcnss_log(ERR, "incomplete data read from smd\n");
 			return;
 		}
 		build[len] = 0;
-		pr_info("wcnss: build version %s\n", build);
+		wcnss_log(INFO, "build version %s\n", build);
 		break;
 
 	case WCNSS_NVBIN_DNLD_RSP:
 		penv->nv_downloaded = true;
 		fw_status = wcnss_fw_status();
-		pr_debug("wcnss: received WCNSS_NVBIN_DNLD_RSP from ccpu %u\n",
+		wcnss_log(DBG, "received WCNSS_NVBIN_DNLD_RSP from ccpu %u\n",
 			 fw_status);
 		if (fw_status != WAIT_FOR_CBC_IND)
 			penv->is_cbc_done = 1;
@@ -2289,12 +2345,12 @@
 	case WCNSS_CALDATA_DNLD_RSP:
 		penv->nv_downloaded = true;
 		fw_status = wcnss_fw_status();
-		pr_debug("wcnss: received WCNSS_CALDATA_DNLD_RSP from ccpu %u\n",
+		wcnss_log(DBG, "received WCNSS_CALDATA_DNLD_RSP from ccpu %u\n",
 			 fw_status);
 		break;
 	case WCNSS_CBC_COMPLETE_IND:
 		penv->is_cbc_done = 1;
-		pr_debug("wcnss: received WCNSS_CBC_COMPLETE_IND from FW\n");
+		wcnss_log(DBG, "received WCNSS_CBC_COMPLETE_IND from FW\n");
 		break;
 
 	case WCNSS_CALDATA_UPLD_REQ:
@@ -2302,7 +2358,7 @@
 		break;
 
 	default:
-		pr_err("wcnss: invalid message type %d\n", phdr->msg_type);
+		wcnss_log(ERR, "invalid message type %d\n", phdr->msg_type);
 	}
 }
 
@@ -2315,7 +2371,7 @@
 	smd_msg.msg_len = sizeof(smd_msg);
 	ret = wcnss_smd_tx(&smd_msg, smd_msg.msg_len);
 	if (ret < 0)
-		pr_err("wcnss: smd tx failed\n");
+		wcnss_log(ERR, "smd tx failed\n");
 }
 
 static void wcnss_send_pm_config(struct work_struct *worker)
@@ -2340,12 +2396,12 @@
 	rc = of_property_read_u32_array(penv->pdev->dev.of_node,
 					"qcom,wcnss-pm", payload, prop_len);
 	if (rc < 0) {
-		pr_err("wcnss: property read failed\n");
+		wcnss_log(ERR, "property read failed\n");
 		kfree(msg);
 		return;
 	}
 
-	pr_debug("%s:size=%d: <%d, %d, %d, %d, %d %d>\n", __func__,
+	wcnss_log(DBG, "%s:size=%d: <%d, %d, %d, %d, %d %d>\n", __func__,
 		 prop_len, *payload, *(payload + 1), *(payload + 2),
 		 *(payload + 3), *(payload + 4), *(payload + 5));
 
@@ -2355,7 +2411,7 @@
 
 	rc = wcnss_smd_tx(msg, hdr->msg_len);
 	if (rc < 0)
-		pr_err("wcnss: smd tx failed\n");
+		wcnss_log(ERR, "smd tx failed\n");
 
 	kfree(msg);
 }
@@ -2386,7 +2442,8 @@
 	ret = request_firmware(&nv, NVBIN_FILE, dev);
 
 	if (ret || !nv || !nv->data || !nv->size) {
-		pr_err("wcnss: %s: request_firmware failed for %s (ret = %d)\n",
+		wcnss_log(ERR,
+			  "%s: request_firmware failed for %s (ret = %d)\n",
 		       __func__, NVBIN_FILE, ret);
 		goto out;
 	}
@@ -2399,7 +2456,7 @@
 
 	total_fragments = TOTALFRAGMENTS(nv_blob_size);
 
-	pr_info("wcnss: NV bin size: %d, total_fragments: %d\n",
+	wcnss_log(INFO, "NV bin size: %d, total_fragments: %d\n",
 		nv_blob_size, total_fragments);
 
 	/* get buffer for nv bin dnld req message */
@@ -2447,11 +2504,12 @@
 
 		retry_count = 0;
 		while ((ret == -ENOSPC) && (retry_count <= 3)) {
-			pr_debug("wcnss: %s: smd tx failed, ENOSPC\n",
+			wcnss_log(DBG, "%s: smd tx failed, ENOSPC\n",
 				 __func__);
-			pr_debug("fragment: %d, len: %d, TotFragments: %d, retry_count: %d\n",
-				 count, dnld_req_msg->hdr.msg_len,
-				 total_fragments, retry_count);
+			wcnss_log(DBG, "fragment %d, len: %d,", count,
+				  dnld_req_msg->hdr.msg_len);
+			wcnss_log(DBG, "TotFragments: %d, retry_count: %d\n",
+				  total_fragments, retry_count);
 
 			/* wait and try again */
 			msleep(20);
@@ -2461,10 +2519,11 @@
 		}
 
 		if (ret < 0) {
-			pr_err("wcnss: %s: smd tx failed\n", __func__);
-			pr_err("fragment %d, len: %d, TotFragments: %d, retry_count: %d\n",
-			       count, dnld_req_msg->hdr.msg_len,
-			       total_fragments, retry_count);
+			wcnss_log(ERR, "%s: smd tx failed\n", __func__);
+			wcnss_log(ERR, "fragment %d, len: %d,", count,
+				  dnld_req_msg->hdr.msg_len);
+			wcnss_log(ERR, "TotFragments: %d, retry_count: %d\n",
+				  total_fragments, retry_count);
 			goto err_dnld;
 		}
 	}
@@ -2538,11 +2597,12 @@
 
 		retry_count = 0;
 		while ((ret == -ENOSPC) && (retry_count <= 3)) {
-			pr_debug("wcnss: %s: smd tx failed, ENOSPC\n",
+			wcnss_log(DBG, "%s: smd tx failed, ENOSPC\n",
 				 __func__);
-			pr_debug("fragment: %d, len: %d, TotFragments: %d, retry_count: %d\n",
-				 count, cal_msg->hdr.msg_len,
-				 total_fragments, retry_count);
+			wcnss_log(DBG, "fragment: %d, len: %d",
+				  count, cal_msg->hdr.msg_len);
+			wcnss_log(DBG, " TotFragments: %d, retry_count: %d\n",
+				  total_fragments, retry_count);
 
 			/* wait and try again */
 			msleep(20);
@@ -2552,10 +2612,10 @@
 		}
 
 		if (ret < 0) {
-			pr_err("wcnss: %s: smd tx failed\n", __func__);
-			pr_err("fragment %d, len: %d, TotFragments: %d, retry_count: %d\n",
-			       count, cal_msg->hdr.msg_len,
-				total_fragments, retry_count);
+			wcnss_log(ERR, "%s: smd tx failed: fragment %d, len:%d",
+				  count, cal_msg->hdr.msg_len, __func__);
+			wcnss_log(ERR, " TotFragments: %d, retry_count: %d\n",
+				  total_fragments, retry_count);
 			goto err_dnld;
 		}
 	}
@@ -2578,17 +2638,17 @@
 			msleep(500);
 	}
 	if (penv->fw_cal_available) {
-		pr_info_ratelimited("wcnss: cal download, using fw cal");
+		wcnss_log(INFO, "cal download, using fw cal");
 		wcnss_caldata_dnld(penv->fw_cal_data, penv->fw_cal_rcvd, true);
 
 	} else if (penv->user_cal_available) {
-		pr_info_ratelimited("wcnss: cal download, using user cal");
+		wcnss_log(INFO, "cal download, using user cal");
 		wcnss_caldata_dnld(penv->user_cal_data,
 				   penv->user_cal_rcvd, true);
 	}
 
 nv_download:
-	pr_info_ratelimited("wcnss: NV download");
+	wcnss_log(INFO, "NV download");
 	wcnss_nvbin_dnld();
 }
 
@@ -2631,20 +2691,20 @@
 	switch (cmd) {
 	case WCNSS_USR_HAS_CAL_DATA:
 		if (buf[2] > 1)
-			pr_err("%s: Invalid data for cal %d\n", __func__,
-			       buf[2]);
+			wcnss_log(ERR, "%s: Invalid data for cal %d\n",
+				  __func__, buf[2]);
 		has_calibrated_data = buf[2];
 		break;
 	case WCNSS_USR_WLAN_MAC_ADDR:
 		memcpy(&penv->wlan_nv_mac_addr,  &buf[2],
 		       sizeof(penv->wlan_nv_mac_addr));
-		pr_debug("%s: MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
+		wcnss_log(DBG, "%s: MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
 			 penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
 			 penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
 			 penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
 		break;
 	default:
-		pr_err("%s: Invalid command %d\n", __func__, cmd);
+		wcnss_log(ERR, "%s: Invalid command %d\n", __func__, cmd);
 		break;
 	}
 }
@@ -2703,7 +2763,7 @@
 
 	rc = wcnss_parse_voltage_regulator(&penv->wlan_config, &pdev->dev);
 	if (rc) {
-		dev_err(&pdev->dev, "Failed to parse voltage regulators\n");
+		wcnss_log(ERR, "Failed to parse voltage regulators\n");
 		goto fail;
 	}
 
@@ -2742,7 +2802,7 @@
 
 		/* allocate 5-wire GPIO resources */
 		if (!penv->gpios_5wire) {
-			dev_err(&pdev->dev, "insufficient IO resources\n");
+			wcnss_log(ERR, "insufficient IO resources\n");
 			ret = -ENOENT;
 			goto fail_gpio_res;
 		}
@@ -2752,7 +2812,7 @@
 	}
 
 	if (ret) {
-		dev_err(&pdev->dev, "WCNSS gpios config failed.\n");
+		wcnss_log(ERR, "gpios config failed.\n");
 		goto fail_gpio_res;
 	}
 
@@ -2765,7 +2825,7 @@
 							"wcnss_wlanrx_irq");
 
 	if (!(penv->mmio_res && penv->tx_irq_res && penv->rx_irq_res)) {
-		dev_err(&pdev->dev, "insufficient resources\n");
+		wcnss_log(ERR, "insufficient resources\n");
 		ret = -ENOENT;
 		goto fail_res;
 	}
@@ -2785,7 +2845,7 @@
 						   "pronto_phy_base");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource pronto_phy_base failed\n",
+			wcnss_log(ERR, "%s: resource pronto_phy_base failed\n",
 			       __func__);
 			goto fail_ioremap;
 		}
@@ -2797,7 +2857,7 @@
 						   "riva_phy_base");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource riva_phy_base failed\n",
+			wcnss_log(ERR, "%s: resource riva_phy_base failed\n",
 			       __func__);
 			goto fail_ioremap;
 		}
@@ -2807,7 +2867,7 @@
 
 	if (!penv->msm_wcnss_base) {
 		ret = -ENOMEM;
-		pr_err("%s: ioremap wcnss physical failed\n", __func__);
+		wcnss_log(ERR, "%s: ioremap wcnss physical failed\n", __func__);
 		goto fail_ioremap;
 	}
 
@@ -2818,7 +2878,7 @@
 						   "riva_ccu_base");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource riva_ccu_base failed\n",
+			wcnss_log(ERR, "%s: resource riva_ccu_base failed\n",
 			       __func__);
 			goto fail_ioremap;
 		}
@@ -2827,7 +2887,7 @@
 
 		if (!penv->riva_ccu_base) {
 			ret = -ENOMEM;
-			pr_err("%s: ioremap riva ccu physical failed\n",
+			wcnss_log(ERR, "%s: ioremap riva ccu physical failed\n",
 			       __func__);
 			goto fail_ioremap;
 		}
@@ -2837,7 +2897,7 @@
 						   "pronto_a2xb_base");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource pronto_a2xb_base failed\n",
+			wcnss_log(ERR, "%s: resource pronto_a2xb_base failed\n",
 			       __func__);
 			goto fail_ioremap;
 		}
@@ -2846,8 +2906,8 @@
 
 		if (!penv->pronto_a2xb_base) {
 			ret = -ENOMEM;
-			pr_err("%s: ioremap pronto a2xb physical failed\n",
-			       __func__);
+			wcnss_log(ERR,
+			"%s: ioremap pronto a2xb physical failed\n", __func__);
 			goto fail_ioremap;
 		}
 
@@ -2856,7 +2916,7 @@
 						   "pronto_ccpu_base");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource pronto_ccpu_base failed\n",
+			wcnss_log(ERR, "%s: resource pronto_ccpu_base failed\n",
 			       __func__);
 			goto fail_ioremap;
 		}
@@ -2865,8 +2925,8 @@
 
 		if (!penv->pronto_ccpu_base) {
 			ret = -ENOMEM;
-			pr_err("%s: ioremap pronto ccpu physical failed\n",
-			       __func__);
+			wcnss_log(ERR,
+			"%s: ioremap pronto ccpu physical failed\n", __func__);
 			goto fail_ioremap;
 		}
 
@@ -2874,14 +2934,15 @@
 		res = platform_get_resource_byname(penv->pdev,
 						   IORESOURCE_MEM, "wcnss_fiq");
 		if (!res) {
-			dev_err(&pdev->dev, "insufficient irq mem resources\n");
+			wcnss_log(ERR, "insufficient irq mem resources\n");
 			ret = -ENOENT;
 			goto fail_ioremap;
 		}
 		penv->fiq_reg = ioremap_nocache(res->start, resource_size(res));
 		if (!penv->fiq_reg) {
-			pr_err("wcnss: %s: ioremap_nocache() failed fiq_reg addr:%pr\n",
-			       __func__, &res->start);
+			wcnss_log(ERR, "%s", __func__,
+			"ioremap_nocache() failed fiq_reg addr:%pr\n",
+			&res->start);
 			ret = -ENOMEM;
 			goto fail_ioremap;
 		}
@@ -2891,7 +2952,7 @@
 						   "pronto_saw2_base");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource pronto_saw2_base failed\n",
+			wcnss_log(ERR, "%s: resource pronto_saw2_base failed\n",
 			       __func__);
 			goto fail_ioremap2;
 		}
@@ -2899,8 +2960,8 @@
 			devm_ioremap_resource(&pdev->dev, res);
 
 		if (!penv->pronto_saw2_base) {
-			pr_err("%s: ioremap wcnss physical(saw2) failed\n",
-			       __func__);
+			wcnss_log(ERR,
+			"%s: ioremap wcnss physical(saw2) failed\n", __func__);
 			ret = -ENOMEM;
 			goto fail_ioremap2;
 		}
@@ -2908,8 +2969,8 @@
 		penv->pronto_pll_base =
 			penv->msm_wcnss_base + PRONTO_PLL_MODE_OFFSET;
 		if (!penv->pronto_pll_base) {
-			pr_err("%s: ioremap wcnss physical(pll) failed\n",
-			       __func__);
+			wcnss_log(ERR,
+			"%s: ioremap wcnss physical(pll) failed\n", __func__);
 			ret = -ENOMEM;
 			goto fail_ioremap2;
 		}
@@ -2919,8 +2980,8 @@
 						   "wlan_tx_phy_aborts");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource wlan_tx_phy_aborts failed\n",
-			       __func__);
+			wcnss_log(ERR,
+			"%s: resource wlan_tx_phy_aborts failed\n", __func__);
 			goto fail_ioremap2;
 		}
 		penv->wlan_tx_phy_aborts =
@@ -2928,7 +2989,8 @@
 
 		if (!penv->wlan_tx_phy_aborts) {
 			ret = -ENOMEM;
-			pr_err("%s: ioremap wlan TX PHY failed\n", __func__);
+			wcnss_log(ERR, "%s: ioremap wlan TX PHY failed\n",
+				  __func__);
 			goto fail_ioremap2;
 		}
 
@@ -2937,8 +2999,8 @@
 						   "wlan_brdg_err_source");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource wlan_brdg_err_source failed\n",
-			       __func__);
+			wcnss_log(ERR,
+			"%s: get wlan_brdg_err_source res failed\n", __func__);
 			goto fail_ioremap2;
 		}
 		penv->wlan_brdg_err_source =
@@ -2946,7 +3008,8 @@
 
 		if (!penv->wlan_brdg_err_source) {
 			ret = -ENOMEM;
-			pr_err("%s: ioremap wlan BRDG ERR failed\n", __func__);
+			wcnss_log(ERR, "%s: ioremap wlan BRDG ERR failed\n",
+				  __func__);
 			goto fail_ioremap2;
 		}
 
@@ -2955,7 +3018,7 @@
 						   "wlan_tx_status");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource wlan_tx_status failed\n",
+			wcnss_log(ERR, "%s: resource wlan_tx_status failed\n",
 			       __func__);
 			goto fail_ioremap2;
 		}
@@ -2964,7 +3027,8 @@
 
 		if (!penv->wlan_tx_status) {
 			ret = -ENOMEM;
-			pr_err("%s: ioremap wlan TX STATUS failed\n", __func__);
+			wcnss_log(ERR, "%s: ioremap wlan TX STATUS failed\n",
+				   __func__);
 			goto fail_ioremap2;
 		}
 
@@ -2973,8 +3037,8 @@
 						   "alarms_txctl");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource alarms_txctl failed\n",
-			       __func__);
+			wcnss_log(ERR,
+			"%s: resource alarms_txctl failed\n", __func__);
 			goto fail_ioremap2;
 		}
 		penv->alarms_txctl =
@@ -2982,7 +3046,8 @@
 
 		if (!penv->alarms_txctl) {
 			ret = -ENOMEM;
-			pr_err("%s: ioremap alarms TXCTL failed\n", __func__);
+			wcnss_log(ERR,
+			"%s: ioremap alarms TXCTL failed\n", __func__);
 			goto fail_ioremap2;
 		}
 
@@ -2991,8 +3056,8 @@
 						   "alarms_tactl");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource alarms_tactl failed\n",
-			       __func__);
+			wcnss_log(ERR,
+			"%s: resource alarms_tactl failed\n", __func__);
 			goto fail_ioremap2;
 		}
 		penv->alarms_tactl =
@@ -3000,7 +3065,8 @@
 
 		if (!penv->alarms_tactl) {
 			ret = -ENOMEM;
-			pr_err("%s: ioremap alarms TACTL failed\n", __func__);
+			wcnss_log(ERR,
+			"%s: ioremap alarms TACTL failed\n", __func__);
 			goto fail_ioremap2;
 		}
 
@@ -3009,8 +3075,8 @@
 						   "pronto_mcu_base");
 		if (!res) {
 			ret = -EIO;
-			pr_err("%s: resource pronto_mcu_base failed\n",
-			       __func__);
+			wcnss_log(ERR,
+			"%s: resource pronto_mcu_base failed\n", __func__);
 			goto fail_ioremap2;
 		}
 		penv->pronto_mcu_base =
@@ -3018,8 +3084,8 @@
 
 		if (!penv->pronto_mcu_base) {
 			ret = -ENOMEM;
-			pr_err("%s: ioremap pronto mcu physical failed\n",
-			       __func__);
+			wcnss_log(ERR,
+			"%s: ioremap pronto mcu physical failed\n", __func__);
 			goto fail_ioremap2;
 		}
 
@@ -3027,8 +3093,8 @@
 					  "qcom,is-dual-band-disabled")) {
 			ret = wcnss_get_dual_band_capability_info(pdev);
 			if (ret) {
-				pr_err("%s: failed to get dual band info\n",
-				       __func__);
+				wcnss_log(ERR,
+				"%s: failed to get dual band info\n", __func__);
 				goto fail_ioremap2;
 			}
 		}
@@ -3036,7 +3102,7 @@
 
 	penv->adc_tm_dev = qpnp_get_adc_tm(&penv->pdev->dev, "wcnss");
 	if (IS_ERR(penv->adc_tm_dev)) {
-		pr_err("%s:  adc get failed\n", __func__);
+		wcnss_log(ERR, "%s: adc get failed\n", __func__);
 		penv->adc_tm_dev = NULL;
 	} else {
 		INIT_DELAYED_WORK(&penv->vbatt_work, wcnss_update_vbatt);
@@ -3045,14 +3111,14 @@
 
 	penv->snoc_wcnss = devm_clk_get(&penv->pdev->dev, "snoc_wcnss");
 	if (IS_ERR(penv->snoc_wcnss)) {
-		pr_err("%s: couldn't get snoc_wcnss\n", __func__);
+		wcnss_log(ERR, "%s: couldn't get snoc_wcnss\n", __func__);
 		penv->snoc_wcnss = NULL;
 	} else {
 		if (of_property_read_u32(pdev->dev.of_node,
 					 "qcom,snoc-wcnss-clock-freq",
 					 &penv->snoc_wcnss_clock_freq)) {
-			pr_debug("%s: wcnss snoc clock frequency is not defined\n",
-				 __func__);
+			wcnss_log(DBG,
+			"%s: snoc clock frequency is not defined\n", __func__);
 			devm_clk_put(&penv->pdev->dev, penv->snoc_wcnss);
 			penv->snoc_wcnss = NULL;
 		}
@@ -3062,7 +3128,7 @@
 		penv->vadc_dev = qpnp_get_vadc(&penv->pdev->dev, "wcnss");
 
 		if (IS_ERR(penv->vadc_dev)) {
-			pr_debug("%s:  vadc get failed\n", __func__);
+			wcnss_log(DBG, "%s: vadc get failed\n", __func__);
 			penv->vadc_dev = NULL;
 		} else {
 			rc = wcnss_get_battery_volt(&penv->wlan_config.vbatt);
@@ -3070,8 +3136,8 @@
 				  wcnss_send_vbatt_indication);
 
 			if (rc < 0)
-				pr_err("Failed to get battery voltage with error= %d\n",
-				       rc);
+				wcnss_log(ERR,
+				"battery voltage get failed:err=%d\n", rc);
 		}
 	}
 
@@ -3079,7 +3145,7 @@
 		/* trigger initialization of the WCNSS */
 		penv->pil = subsystem_get(WCNSS_PIL_DEVICE);
 		if (IS_ERR(penv->pil)) {
-			dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
+			wcnss_log(ERR, "Peripheral Loader failed on WCNSS.\n");
 			ret = PTR_ERR(penv->pil);
 			wcnss_disable_pc_add_req();
 			wcnss_pronto_log_debug_regs();
@@ -3129,7 +3195,7 @@
 	int rc;
 
 	if (!penv->snoc_wcnss) {
-		pr_err("%s: couldn't get clk snoc_wcnss\n", __func__);
+		wcnss_log(ERR, "%s: couldn't get clk snoc_wcnss\n", __func__);
 		return;
 	}
 
@@ -3137,13 +3203,15 @@
 		rc = clk_set_rate(penv->snoc_wcnss,
 				  penv->snoc_wcnss_clock_freq);
 		if (rc) {
-			pr_err("%s: snoc_wcnss_clk-clk_set_rate failed =%d\n",
-			       __func__, rc);
+			wcnss_log(ERR,
+				  "%s: snoc_wcnss_clk-clk_set_rate failed=%d\n",
+				__func__, rc);
 			return;
 		}
 
 		if (clk_prepare_enable(penv->snoc_wcnss)) {
-			pr_err("%s: snoc_wcnss clk enable failed\n", __func__);
+			wcnss_log(ERR, "%s: snoc_wcnss clk enable failed\n",
+				  __func__);
 			return;
 		}
 	} else {
@@ -3219,7 +3287,7 @@
 		return -EFAULT;
 
 	if (!penv->triggered) {
-		pr_info(DEVICE " triggered by userspace\n");
+		wcnss_log(INFO, DEVICE " triggered by userspace\n");
 		pdev = penv->pdev;
 		rc = wcnss_trigger_config(pdev);
 		if (rc)
@@ -3282,8 +3350,8 @@
 				    user_buffer, 4);
 		if (!penv->user_cal_exp_size ||
 		    penv->user_cal_exp_size > MAX_CALIBRATED_DATA_SIZE) {
-			pr_err(DEVICE " invalid size to write %d\n",
-			       penv->user_cal_exp_size);
+			wcnss_log(ERR, DEVICE " invalid size to write %d\n",
+				  penv->user_cal_exp_size);
 			penv->user_cal_exp_size = 0;
 			mutex_unlock(&penv->dev_lock);
 			return -EFAULT;
@@ -3297,8 +3365,8 @@
 	mutex_lock(&penv->dev_lock);
 	if ((UINT32_MAX - count < penv->user_cal_rcvd) ||
 	    (penv->user_cal_exp_size < count + penv->user_cal_rcvd)) {
-		pr_err(DEVICE " invalid size to write %zu\n", count +
-		       penv->user_cal_rcvd);
+		wcnss_log(ERR, DEVICE " invalid size to write %zu\n",
+			  count + penv->user_cal_rcvd);
 		mutex_unlock(&penv->dev_lock);
 		return -ENOMEM;
 	}
@@ -3320,7 +3388,7 @@
 	kfree(cal_data);
 	if (penv->user_cal_rcvd == penv->user_cal_exp_size) {
 		penv->user_cal_available = true;
-		pr_info_ratelimited("wcnss: user cal written");
+		wcnss_log(INFO, "user cal written");
 	}
 	mutex_unlock(&penv->dev_lock);
 
@@ -3342,12 +3410,12 @@
 
 	if (!(code >= SUBSYS_NOTIF_MIN_INDEX) &&
 	    (code <= SUBSYS_NOTIF_MAX_INDEX)) {
-		pr_debug("%s: Invaild subsystem notification code: %lu\n",
-			 __func__, code);
+		wcnss_log(DBG, "%s: Invaild subsystem notification code: %lu\n",
+			  __func__, code);
 		return NOTIFY_DONE;
 	}
 
-	pr_info("%s: wcnss notification event: %lu : %s\n",
+	wcnss_log(INFO, "%s: notification event: %lu : %s\n",
 		 __func__, code, wcnss_subsys_notif_type[code]);
 
 	if (code == SUBSYS_PROXY_VOTE) {
@@ -3356,7 +3424,7 @@
 					       WCNSS_WLAN_SWITCH_ON, &xo_mode);
 			wcnss_set_iris_xo_mode(xo_mode);
 			if (ret)
-				pr_err("Failed to execute wcnss_wlan_power\n");
+				wcnss_log(ERR, "wcnss_wlan_power failed\n");
 		}
 	} else if (code == SUBSYS_PROXY_UNVOTE) {
 		if (pdev && pwlanconfig) {
@@ -3407,30 +3475,30 @@
 
 	ret = alloc_chrdev_region(&penv->dev_ctrl, 0, 1, CTRL_DEVICE);
 	if (ret < 0) {
-		dev_err(&pdev->dev, "CTRL Device Registration failed\n");
+		wcnss_log(ERR, "CTRL Device Registration failed\n");
 		goto alloc_region_ctrl;
 	}
 	ret = alloc_chrdev_region(&penv->dev_node, 0, 1, DEVICE);
 	if (ret < 0) {
-		dev_err(&pdev->dev, "NODE Device Registration failed\n");
+		wcnss_log(ERR, "NODE Device Registration failed\n");
 		goto alloc_region_node;
 	}
 
 	penv->node_class = class_create(THIS_MODULE, "wcnss");
 	if (!penv->node_class) {
-		dev_err(&pdev->dev, "NODE Device Class Creation failed\n");
+		wcnss_log(ERR, "NODE Device Class Creation failed\n");
 		goto class_create_node;
 	}
 
 	if (device_create(penv->node_class, NULL, penv->dev_ctrl, NULL,
 			 CTRL_DEVICE) == NULL) {
-		dev_err(&pdev->dev, "CTRL Device Creation failed\n");
+		wcnss_log(ERR, "CTRL Device Creation failed\n");
 		goto device_create_ctrl;
 	}
 
 	if (device_create(penv->node_class, NULL, penv->dev_node, NULL,
 			  DEVICE) == NULL) {
-		dev_err(&pdev->dev, "NODE Device Creation failed\n");
+		wcnss_log(ERR, "NODE Device Creation failed\n");
 		goto device_create_node;
 	}
 
@@ -3438,11 +3506,11 @@
 	cdev_init(&penv->node_dev, &wcnss_node_fops);
 
 	if (cdev_add(&penv->ctrl_dev, penv->dev_ctrl, 1) == -1) {
-		dev_err(&pdev->dev, "CTRL Device addition failed\n");
+		wcnss_log(ERR, "CTRL Device addition failed\n");
 		goto cdev_add_ctrl;
 	}
 	if (cdev_add(&penv->node_dev, penv->dev_node, 1) == -1) {
-		dev_err(&pdev->dev, "NODE Device addition failed\n");
+		wcnss_log(ERR, "NODE Device addition failed\n");
 		goto cdev_add_node;
 	}
 
@@ -3466,7 +3534,7 @@
 
 static void wcnss_cdev_unregister(struct platform_device *pdev)
 {
-	dev_err(&pdev->dev, "Unregistering cdev devices\n");
+	wcnss_log(ERR, "Unregistering cdev devices\n");
 	cdev_del(&penv->ctrl_dev);
 	cdev_del(&penv->node_dev);
 	device_destroy(penv->node_class, penv->dev_ctrl);
@@ -3483,7 +3551,7 @@
 
 	/* verify we haven't been called more than once */
 	if (penv) {
-		dev_err(&pdev->dev, "cannot handle multiple devices.\n");
+		wcnss_log(ERR, "cannot handle multiple devices.\n");
 		return -ENODEV;
 	}
 
@@ -3497,7 +3565,7 @@
 	penv->user_cal_data =
 		devm_kzalloc(&pdev->dev, MAX_CALIBRATED_DATA_SIZE, GFP_KERNEL);
 	if (!penv->user_cal_data) {
-		dev_err(&pdev->dev, "Failed to alloc memory for cal data.\n");
+		wcnss_log(ERR, "Failed to alloc memory for cal data.\n");
 		return -ENOMEM;
 	}
 
@@ -3511,7 +3579,7 @@
 	/* register wcnss event notification */
 	penv->wcnss_notif_hdle = subsys_notif_register_notifier("wcnss", &wnb);
 	if (IS_ERR(penv->wcnss_notif_hdle)) {
-		pr_err("wcnss: register event notification failed!\n");
+		wcnss_log(ERR, "register event notification failed!\n");
 		return PTR_ERR(penv->wcnss_notif_hdle);
 	}
 
@@ -3534,7 +3602,7 @@
 	 * device so that we know that WCNSS configuration can take
 	 * place
 	 */
-	pr_info(DEVICE " probed in built-in mode\n");
+	wcnss_log(INFO, DEVICE " probed in built-in mode\n");
 
 	return wcnss_cdev_register(pdev);
 }
@@ -3579,6 +3647,11 @@
 
 static int __init wcnss_wlan_init(void)
 {
+
+	wcnss_ipc_log = ipc_log_context_create(IPC_NUM_LOG_PAGES, "wcnss", 0);
+	if (!wcnss_ipc_log)
+		wcnss_log(ERR, "Unable to create log context\n");
+
 	platform_driver_register(&wcnss_wlan_driver);
 	platform_driver_register(&wcnss_wlan_ctrl_driver);
 	platform_driver_register(&wcnss_ctrl_driver);
@@ -3599,6 +3672,8 @@
 	platform_driver_unregister(&wcnss_ctrl_driver);
 	platform_driver_unregister(&wcnss_wlan_ctrl_driver);
 	platform_driver_unregister(&wcnss_wlan_driver);
+	ipc_log_context_destroy(wcnss_ipc_log);
+	wcnss_ipc_log = NULL;
 }
 
 module_init(wcnss_wlan_init);
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index 520aedd..78d3dba 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -247,7 +247,7 @@
 		/* Increment for next fragment */
 		req->seq++;
 
-		data += req->hdr.len;
+		data += NV_FRAGMENT_SIZE;
 		left -= NV_FRAGMENT_SIZE;
 	} while (left > 0);
 
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 6ee1da0..fc96f62 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -156,7 +156,6 @@
 config SPI_BCM_QSPI
 	tristate "Broadcom BSPI and MSPI controller support"
 	depends on ARCH_BRCMSTB || ARCH_BCM || ARCH_BCM_IPROC || COMPILE_TEST
-	depends on MTD_NORFLASH
 	default ARCH_BCM_IPROC
 	help
 	  Enables support for the Broadcom SPI flash and MSPI controller.
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 7d629b4..6323176 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -514,7 +514,7 @@
 
 static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
 {
-	if (!has_bspi(qspi) || (qspi->bspi_enabled))
+	if (!has_bspi(qspi))
 		return;
 
 	qspi->bspi_enabled = 1;
@@ -529,7 +529,7 @@
 
 static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
 {
-	if (!has_bspi(qspi) || (!qspi->bspi_enabled))
+	if (!has_bspi(qspi))
 		return;
 
 	qspi->bspi_enabled = 0;
@@ -543,16 +543,19 @@
 
 static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
 {
-	u32 data = 0;
+	u32 rd = 0;
+	u32 wr = 0;
 
-	if (qspi->curr_cs == cs)
-		return;
 	if (qspi->base[CHIP_SELECT]) {
-		data = bcm_qspi_read(qspi, CHIP_SELECT, 0);
-		data = (data & ~0xff) | (1 << cs);
-		bcm_qspi_write(qspi, CHIP_SELECT, 0, data);
+		rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
+		wr = (rd & ~0xff) | (1 << cs);
+		if (rd == wr)
+			return;
+		bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
 		usleep_range(10, 20);
 	}
+
+	dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
 	qspi->curr_cs = cs;
 }
 
@@ -770,8 +773,13 @@
 			dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
 		}
 		mspi_cdram = MSPI_CDRAM_CONT_BIT;
-		mspi_cdram |= (~(1 << spi->chip_select) &
-			       MSPI_CDRAM_PCS);
+
+		if (has_bspi(qspi))
+			mspi_cdram &= ~1;
+		else
+			mspi_cdram |= (~(1 << spi->chip_select) &
+				       MSPI_CDRAM_PCS);
+
 		mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
 				MSPI_CDRAM_BITSE_BIT);
 
@@ -1212,7 +1220,7 @@
 		qspi->base[MSPI]  = devm_ioremap_resource(dev, res);
 		if (IS_ERR(qspi->base[MSPI])) {
 			ret = PTR_ERR(qspi->base[MSPI]);
-			goto qspi_probe_err;
+			goto qspi_resource_err;
 		}
 	} else {
 		goto qspi_resource_err;
@@ -1223,7 +1231,7 @@
 		qspi->base[BSPI]  = devm_ioremap_resource(dev, res);
 		if (IS_ERR(qspi->base[BSPI])) {
 			ret = PTR_ERR(qspi->base[BSPI]);
-			goto qspi_probe_err;
+			goto qspi_resource_err;
 		}
 		qspi->bspi_mode = true;
 	} else {
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 02fb967..0d8f43a 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -646,7 +646,7 @@
 			buf = t->rx_buf;
 		t->rx_dma = dma_map_single(&spi->dev, buf,
 				t->len, DMA_FROM_DEVICE);
-		if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
+		if (dma_mapping_error(&spi->dev, t->rx_dma)) {
 			ret = -EFAULT;
 			goto err_rx_map;
 		}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index a074763..22884ae 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -89,7 +89,7 @@
 #define TIMESTAMP_AFTER		BIT(3)
 #define POST_CMD_DELAY		BIT(4)
 
-#define SPI_CORE2X_VOTE		(10000)
+#define SPI_CORE2X_VOTE		(7600)
 /* GSI CONFIG0 TRE Params */
 /* Flags bit fields */
 #define GSI_LOOPBACK_EN		(BIT(0))
@@ -155,6 +155,7 @@
 	int num_xfers;
 	void *ipc;
 	bool shared_se;
+	bool dis_autosuspend;
 };
 
 static struct spi_master *get_spi_master(struct device *dev)
@@ -311,7 +312,7 @@
 				struct spi_message *spi_msg)
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
-	int mode = FIFO_MODE;
+	int mode = SE_DMA;
 	int fifo_disable = (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
 							FIFO_IF_DISABLE);
 	bool dma_chan_valid =
@@ -325,10 +326,10 @@
 	 */
 	if (fifo_disable && !dma_chan_valid)
 		mode = -EINVAL;
+	else if (!fifo_disable)
+		mode = SE_DMA;
 	else if (dma_chan_valid)
 		mode = GSI_DMA;
-	else
-		mode = FIFO_MODE;
 	return mode;
 }
 
@@ -356,9 +357,6 @@
 	if (mode & SPI_CPHA)
 		flags |= GSI_CPHA;
 
-	if (xfer->cs_change)
-		flags |= GSI_CS_TOGGLE;
-
 	word_len = xfer->bits_per_word - MIN_WORD_LEN;
 	pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
 	ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
@@ -586,8 +584,11 @@
 	}
 
 	cs |= spi_slv->chip_select;
-	if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
-		go_flags |= FRAGMENTATION;
+	if (!xfer->cs_change) {
+		if (!list_is_last(&xfer->transfer_list,
+					&spi->cur_msg->transfers))
+			go_flags |= FRAGMENTATION;
+	}
 	go_tre = setup_go_tre(cmd, cs, rx_len, go_flags, mas);
 
 	sg_init_table(xfer_tx_sg, tx_nent);
@@ -715,25 +716,20 @@
 
 	mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg);
 
-	if (mas->cur_xfer_mode == FIFO_MODE) {
-		geni_se_select_mode(mas->base, FIFO_MODE);
-		reinit_completion(&mas->xfer_done);
-		ret = setup_fifo_params(spi_msg->spi, spi);
+	if (mas->cur_xfer_mode < 0) {
+		dev_err(mas->dev, "%s: Couldn't select mode %d", __func__,
+							mas->cur_xfer_mode);
+		ret = -EINVAL;
 	} else if (mas->cur_xfer_mode == GSI_DMA) {
-		mas->num_tx_eot = 0;
-		mas->num_rx_eot = 0;
-		mas->num_xfers = 0;
-		reinit_completion(&mas->tx_cb);
-		reinit_completion(&mas->rx_cb);
 		memset(mas->gsi, 0,
 				(sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
 		geni_se_select_mode(mas->base, GSI_DMA);
 		ret = spi_geni_map_buf(mas, spi_msg);
 	} else {
-		dev_err(mas->dev, "%s: Couldn't select mode %d", __func__,
-							mas->cur_xfer_mode);
-		ret = -EINVAL;
+		geni_se_select_mode(mas->base, mas->cur_xfer_mode);
+		ret = setup_fifo_params(spi_msg->spi, spi);
 	}
+
 	return ret;
 }
 
@@ -752,13 +748,12 @@
 static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
-	int ret = 0;
+	int ret = 0, count = 0;
 	u32 max_speed = spi->cur_msg->spi->max_speed_hz;
 	struct se_geni_rsc *rsc = &mas->spi_rsc;
 
-	/* Adjust the AB/IB based on the max speed of the slave.*/
+	/* Adjust the IB based on the max speed of the slave.*/
 	rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
-	rsc->ab = max_speed * DEFAULT_BUS_WIDTH;
 	if (mas->shared_se) {
 		struct se_geni_rsc *rsc;
 		int ret = 0;
@@ -780,7 +775,12 @@
 	} else {
 		ret = 0;
 	}
-
+	if (mas->dis_autosuspend) {
+		count = atomic_read(&mas->dev->power.usage_count);
+		if (count <= 0)
+			GENI_SE_ERR(mas->ipc, false, NULL,
+				"resume usage count mismatch:%d", count);
+	}
 	if (unlikely(!mas->setup)) {
 		int proto = get_se_proto(mas->base);
 		unsigned int major;
@@ -869,6 +869,9 @@
 		mas->shared_se =
 			(geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
 							FIFO_IF_DISABLE);
+		if (mas->dis_autosuspend)
+			GENI_SE_DBG(mas->ipc, false, mas->dev,
+					"Auto Suspend is disabled\n");
 	}
 exit_prepare_transfer_hardware:
 	return ret;
@@ -877,7 +880,7 @@
 static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
-
+	int count = 0;
 	if (mas->shared_se) {
 		struct se_geni_rsc *rsc;
 		int ret = 0;
@@ -890,8 +893,16 @@
 			"%s: Error %d pinctrl_select_state\n", __func__, ret);
 	}
 
-	pm_runtime_mark_last_busy(mas->dev);
-	pm_runtime_put_autosuspend(mas->dev);
+	if (mas->dis_autosuspend) {
+		pm_runtime_put_sync(mas->dev);
+		count = atomic_read(&mas->dev->power.usage_count);
+		if (count < 0)
+			GENI_SE_ERR(mas->ipc, false, NULL,
+				"suspend usage count mismatch:%d", count);
+	} else {
+		pm_runtime_mark_last_busy(mas->dev);
+		pm_runtime_put_autosuspend(mas->dev);
+	}
 	return 0;
 }
 
@@ -940,8 +951,6 @@
 		m_cmd = SPI_RX_ONLY;
 
 	spi_tx_cfg &= ~CS_TOGGLE;
-	if (xfer->cs_change)
-		spi_tx_cfg |= CS_TOGGLE;
 	if (!(mas->cur_word_len % MIN_WORD_LEN)) {
 		trans_len =
 			((xfer->len << 3) / mas->cur_word_len) & TRANS_LEN_MSK;
@@ -950,8 +959,12 @@
 
 		trans_len = (xfer->len / bytes_per_word) & TRANS_LEN_MSK;
 	}
-	if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
-		m_param |= FRAGMENTATION;
+
+	if (!xfer->cs_change) {
+		if (!list_is_last(&xfer->transfer_list,
+					&spi->cur_msg->transfers))
+			m_param |= FRAGMENTATION;
+	}
 
 	mas->cur_xfer = xfer;
 	if (m_cmd & SPI_TX_ONLY) {
@@ -963,25 +976,64 @@
 		geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN);
 		mas->rx_rem_bytes = xfer->len;
 	}
+
+	if (trans_len > (mas->tx_fifo_depth * mas->tx_fifo_width)) {
+		if (mas->cur_xfer_mode != SE_DMA) {
+			mas->cur_xfer_mode = SE_DMA;
+			geni_se_select_mode(mas->base, mas->cur_xfer_mode);
+		}
+	} else {
+		if (mas->cur_xfer_mode != FIFO_MODE) {
+			mas->cur_xfer_mode = FIFO_MODE;
+			geni_se_select_mode(mas->base, mas->cur_xfer_mode);
+		}
+	}
+
 	geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG);
 	geni_setup_m_cmd(mas->base, m_cmd, m_param);
 	GENI_SE_DBG(mas->ipc, false, mas->dev,
-		"%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x\n",
-		__func__, trans_len, xfer->len, spi_tx_cfg, m_cmd);
-	if (m_cmd & SPI_TX_ONLY)
-		geni_write_reg(mas->tx_wm, mas->base, SE_GENI_TX_WATERMARK_REG);
+		"%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x cs%d mode%d\n",
+		__func__, trans_len, xfer->len, spi_tx_cfg, m_cmd,
+		xfer->cs_change, mas->cur_xfer_mode);
+	if ((m_cmd & SPI_RX_ONLY) && (mas->cur_xfer_mode == SE_DMA)) {
+		int ret = 0;
+
+		ret =  geni_se_rx_dma_prep(mas->wrapper_dev, mas->base,
+				xfer->rx_buf, xfer->len, &xfer->rx_dma);
+		if (ret)
+			GENI_SE_ERR(mas->ipc, true, mas->dev,
+				"Failed to setup Rx dma %d\n", ret);
+	}
+	if (m_cmd & SPI_TX_ONLY) {
+		if (mas->cur_xfer_mode == FIFO_MODE) {
+			geni_write_reg(mas->tx_wm, mas->base,
+					SE_GENI_TX_WATERMARK_REG);
+		} else if (mas->cur_xfer_mode == SE_DMA) {
+			int ret = 0;
+
+			ret =  geni_se_tx_dma_prep(mas->wrapper_dev, mas->base,
+					(void *)xfer->tx_buf, xfer->len,
+							&xfer->tx_dma);
+			if (ret)
+				GENI_SE_ERR(mas->ipc, true, mas->dev,
+					"Failed to setup tx dma %d\n", ret);
+		}
+	}
+
 	/* Ensure all writes are done before the WM interrupt */
 	mb();
 }
 
-static void handle_fifo_timeout(struct spi_geni_master *mas)
+static void handle_fifo_timeout(struct spi_geni_master *mas,
+					struct spi_transfer *xfer)
 {
 	unsigned long timeout;
 
 	geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
 	reinit_completion(&mas->xfer_done);
 	geni_cancel_m_cmd(mas->base);
-	geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
+	if (mas->cur_xfer_mode == FIFO_MODE)
+		geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
 	/* Ensure cmd cancel is written */
 	mb();
 	timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
@@ -996,6 +1048,15 @@
 			dev_err(mas->dev,
 				"Failed to cancel/abort m_cmd\n");
 	}
+	if (mas->cur_xfer_mode == SE_DMA) {
+		if (xfer->tx_buf)
+			geni_se_tx_dma_unprep(mas->wrapper_dev,
+					xfer->tx_dma, xfer->len);
+		if (xfer->rx_buf)
+			geni_se_rx_dma_unprep(mas->wrapper_dev,
+					xfer->rx_dma, xfer->len);
+	}
+
 }
 
 static int spi_geni_transfer_one(struct spi_master *spi,
@@ -1011,7 +1072,8 @@
 		return -EINVAL;
 	}
 
-	if (mas->cur_xfer_mode == FIFO_MODE) {
+	if (mas->cur_xfer_mode != GSI_DMA) {
+		reinit_completion(&mas->xfer_done);
 		setup_fifo_xfer(xfer, mas, slv->mode, spi);
 		timeout = wait_for_completion_timeout(&mas->xfer_done,
 					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
@@ -1025,7 +1087,22 @@
 			ret = -ETIMEDOUT;
 			goto err_fifo_geni_transfer_one;
 		}
+
+		if (mas->cur_xfer_mode == SE_DMA) {
+			if (xfer->tx_buf)
+				geni_se_tx_dma_unprep(mas->wrapper_dev,
+					xfer->tx_dma, xfer->len);
+			if (xfer->rx_buf)
+				geni_se_rx_dma_unprep(mas->wrapper_dev,
+					xfer->rx_dma, xfer->len);
+		}
 	} else {
+		mas->num_tx_eot = 0;
+		mas->num_rx_eot = 0;
+		mas->num_xfers = 0;
+		reinit_completion(&mas->tx_cb);
+		reinit_completion(&mas->rx_cb);
+
 		setup_gsi_xfer(xfer, mas, slv, spi);
 		if ((mas->num_xfers >= NUM_SPI_XFER) ||
 			(list_is_last(&xfer->transfer_list,
@@ -1069,7 +1146,7 @@
 	dmaengine_terminate_all(mas->tx);
 	return ret;
 err_fifo_geni_transfer_one:
-	handle_fifo_timeout(mas);
+	handle_fifo_timeout(mas, xfer);
 	return ret;
 }
 
@@ -1185,33 +1262,59 @@
 		goto exit_geni_spi_irq;
 	}
 	m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
-	if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
-		geni_spi_handle_rx(mas);
+	if (mas->cur_xfer_mode == FIFO_MODE) {
+		if ((m_irq & M_RX_FIFO_WATERMARK_EN) ||
+						(m_irq & M_RX_FIFO_LAST_EN))
+			geni_spi_handle_rx(mas);
 
-	if ((m_irq & M_TX_FIFO_WATERMARK_EN))
-		geni_spi_handle_tx(mas);
+		if ((m_irq & M_TX_FIFO_WATERMARK_EN))
+			geni_spi_handle_tx(mas);
 
-	if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
-		(m_irq & M_CMD_ABORT_EN)) {
-		complete(&mas->xfer_done);
-		/*
-		 * If this happens, then a CMD_DONE came before all the buffer
-		 * bytes were sent out. This is unusual, log this condition and
-		 * disable the WM interrupt to prevent the system from stalling
-		 * due an interrupt storm.
-		 * If this happens when all Rx bytes haven't been received, log
-		 * the condition.
-		 */
-		if (mas->tx_rem_bytes) {
-			geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
-			GENI_SE_DBG(mas->ipc, false, mas->dev,
-				"%s:Premature Done.tx_rem%d bpw%d\n",
-				__func__, mas->tx_rem_bytes, mas->cur_word_len);
+		if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
+			(m_irq & M_CMD_ABORT_EN)) {
+			complete(&mas->xfer_done);
+			/*
+			 * If this happens, then a CMD_DONE came before all the
+			 * buffer bytes were sent out. This is unusual, log this
+			 * condition and disable the WM interrupt to prevent the
+			 * system from stalling due an interrupt storm.
+			 * If this happens when all Rx bytes haven't been
+			 * received, log the condition.
+			 */
+			if (mas->tx_rem_bytes) {
+				geni_write_reg(0, mas->base,
+						SE_GENI_TX_WATERMARK_REG);
+				GENI_SE_DBG(mas->ipc, false, mas->dev,
+					"%s:Premature Done.tx_rem%d bpw%d\n",
+					__func__, mas->tx_rem_bytes,
+						mas->cur_word_len);
+			}
+			if (mas->rx_rem_bytes)
+				GENI_SE_DBG(mas->ipc, false, mas->dev,
+					"%s:Premature Done.rx_rem%d bpw%d\n",
+						__func__, mas->rx_rem_bytes,
+							mas->cur_word_len);
 		}
-		if (mas->rx_rem_bytes)
-			GENI_SE_DBG(mas->ipc, false, mas->dev,
-				"%s:Premature Done.rx_rem%d bpw%d\n",
-				__func__, mas->rx_rem_bytes, mas->cur_word_len);
+	} else if (mas->cur_xfer_mode == SE_DMA) {
+		u32 dma_tx_status = geni_read_reg(mas->base,
+							SE_DMA_TX_IRQ_STAT);
+		u32 dma_rx_status = geni_read_reg(mas->base,
+							SE_DMA_RX_IRQ_STAT);
+
+		if (dma_tx_status)
+			geni_write_reg(dma_tx_status, mas->base,
+						SE_DMA_TX_IRQ_CLR);
+		if (dma_rx_status)
+			geni_write_reg(dma_rx_status, mas->base,
+						SE_DMA_RX_IRQ_CLR);
+		if (dma_tx_status & TX_DMA_DONE)
+			mas->tx_rem_bytes = 0;
+		if (dma_rx_status & RX_DMA_DONE)
+			mas->rx_rem_bytes = 0;
+		if (!mas->tx_rem_bytes && !mas->rx_rem_bytes)
+			complete(&mas->xfer_done);
+		if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN))
+			complete(&mas->xfer_done);
 	}
 exit_geni_spi_irq:
 	geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
@@ -1264,6 +1367,7 @@
 		goto spi_geni_probe_err;
 	}
 
+	geni_mas->spi_rsc.ctrl_dev = geni_mas->dev;
 	rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
 	if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
 		dev_err(&pdev->dev, "No pinctrl config specified!\n");
@@ -1325,7 +1429,9 @@
 	rt_pri = of_property_read_bool(pdev->dev.of_node, "qcom,rt");
 	if (rt_pri)
 		spi->rt = true;
-
+	geni_mas->dis_autosuspend =
+		of_property_read_bool(pdev->dev.of_node,
+				"qcom,disable-autosuspend");
 	geni_mas->phys_addr = res->start;
 	geni_mas->size = resource_size(res);
 	geni_mas->base = devm_ioremap(&pdev->dev, res->start,
@@ -1365,8 +1471,11 @@
 	init_completion(&geni_mas->tx_cb);
 	init_completion(&geni_mas->rx_cb);
 	pm_runtime_set_suspended(&pdev->dev);
-	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTO_SUSPEND_DELAY);
-	pm_runtime_use_autosuspend(&pdev->dev);
+	if (!geni_mas->dis_autosuspend) {
+		pm_runtime_set_autosuspend_delay(&pdev->dev,
+					SPI_AUTO_SUSPEND_DELAY);
+		pm_runtime_use_autosuspend(&pdev->dev);
+	}
 	pm_runtime_enable(&pdev->dev);
 	ret = spi_register_master(spi);
 	if (ret) {
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index ce31b81..b8e004d 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -38,7 +38,7 @@
 
 	/* SSP register addresses */
 	void __iomem *ioaddr;
-	u32 ssdr_physical;
+	phys_addr_t ssdr_physical;
 
 	/* SSP masks*/
 	u32 dma_cr1;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6db8063..c2e85e2 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -743,8 +743,14 @@
 	for (i = 0; i < sgs; i++) {
 
 		if (vmalloced_buf || kmap_buf) {
-			min = min_t(size_t,
-				    len, desc_len - offset_in_page(buf));
+			/*
+			 * Next scatterlist entry size is the minimum between
+			 * the desc_len and the remaining buffer length that
+			 * fits in a page.
+			 */
+			min = min_t(size_t, desc_len,
+				    min_t(size_t, len,
+					  PAGE_SIZE - offset_in_page(buf)));
 			if (vmalloced_buf)
 				vm_page = vmalloc_to_page(buf);
 			else
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 6199523..0f0b7ba 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1449,6 +1449,7 @@
 	.driver		= {
 		.name	= "spmi_pmic_arb",
 		.of_match_table = spmi_pmic_arb_match_table,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
 };
 
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 7bd27a4..a17c483 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -33,6 +33,15 @@
 	  /sys/module/lowmemorykiller/parameters/adj and convert them
 	  to oom_score_adj values.
 
+config ANDROID_VSOC
+	tristate "Android Virtual SoC support"
+	default n
+	depends on PCI_MSI
+	---help---
+	  This option adds support for the Virtual SoC driver needed to boot
+	  a 'cuttlefish' Android image inside QEmu. The driver interacts with
+	  a QEmu ivshmem device. If built as a module, it will be called vsoc.
+
 source "drivers/staging/android/ion/Kconfig"
 
 source "drivers/staging/android/fiq_debugger/Kconfig"
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 21b0ff4..93c5f5a 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -5,3 +5,4 @@
 
 obj-$(CONFIG_ASHMEM)			+= ashmem.o
 obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)	+= lowmemorykiller.o
+obj-$(CONFIG_ANDROID_VSOC)		+= vsoc.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 64d8c87..edfb680 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -33,5 +33,14 @@
  - clean up and ABI check for security issues
  - move it to drivers/base/dma-buf
 
+vsoc.c, uapi/vsoc_shm.h
+ - The current driver uses the same wait queue for all of the futexes in a
+   region. This will cause false wakeups in regions with a large number of
+   waiting threads. We should eventually use multiple queues and select the
+   queue based on the region.
+ - Add debugfs support for examining the permissions of regions.
+ - Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been
+   superseded by the futex and is there for legacy reasons.
+
 Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
 Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index c1103c7..6000707 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1003,7 +1003,6 @@
 	struct ion_device *dev = client->dev;
 	struct rb_node *n;
 
-	pr_debug("%s: %d\n", __func__, __LINE__);
 	down_write(&dev->lock);
 	rb_erase(&client->node, &dev->clients);
 	up_write(&dev->lock);
@@ -1213,9 +1212,6 @@
 	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
 	int i;
 
-	pr_debug("%s: syncing for device %s\n", __func__,
-		 dev ? dev_name(dev) : "null");
-
 	if (!ion_buffer_fault_user_mappings(buffer))
 		return;
 
@@ -1269,7 +1265,6 @@
 	mutex_lock(&buffer->lock);
 	list_add(&vma_list->list, &buffer->vmas);
 	mutex_unlock(&buffer->lock);
-	pr_debug("%s: adding %pK\n", __func__, vma);
 }
 
 static void ion_vm_close(struct vm_area_struct *vma)
@@ -1277,14 +1272,12 @@
 	struct ion_buffer *buffer = vma->vm_private_data;
 	struct ion_vma_list *vma_list, *tmp;
 
-	pr_debug("%s\n", __func__);
 	mutex_lock(&buffer->lock);
 	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
 		if (vma_list->vma != vma)
 			continue;
 		list_del(&vma_list->list);
 		kfree(vma_list);
-		pr_debug("%s: deleting %pK\n", __func__, vma);
 		break;
 	}
 	mutex_unlock(&buffer->lock);
@@ -1680,7 +1673,6 @@
 {
 	struct ion_client *client = file->private_data;
 
-	pr_debug("%s: %d\n", __func__, __LINE__);
 	ion_client_destroy(client);
 	return 0;
 }
@@ -1692,7 +1684,6 @@
 	struct ion_client *client;
 	char debug_name[64];
 
-	pr_debug("%s: %d\n", __func__, __LINE__);
 	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
 	client = ion_client_create(dev, debug_name);
 	if (IS_ERR(client))
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 72f2b6a..d991b02 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -81,8 +81,6 @@
 	struct device *dev = heap->priv;
 	struct ion_cma_buffer_info *info;
 
-	dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
 	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
 	if (!info)
 		return ION_CMA_ALLOCATE_FAILED;
@@ -123,7 +121,6 @@
 
 	/* keep this for memory release */
 	buffer->priv_virt = info;
-	dev_dbg(dev, "Allocate buffer %pK\n", buffer);
 	return 0;
 
 err:
@@ -137,7 +134,6 @@
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 	unsigned long attrs = 0;
 
-	dev_dbg(dev, "Release buffer %pK\n", buffer);
 	/* release memory */
 	if (info->is_cached)
 		attrs |= DMA_ATTR_FORCE_COHERENT;
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 3ac71ed..3b27252 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -756,8 +756,10 @@
 {
 	int i;
 	for (i = 0; i < num_orders; i++)
-		if (pools[i])
+		if (pools[i]) {
 			ion_page_pool_destroy(pools[i]);
+			pools[i] = NULL;
+		}
 }
 
 /**
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index a1602e4..027094f 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -63,6 +63,10 @@
 #define CREATE_TRACE_POINTS
 #include "trace/lowmemorykiller.h"
 
+/* to enable lowmemorykiller */
+static int enable_lmk = 1;
+module_param_named(enable_lmk, enable_lmk, int, 0644);
+
 static u32 lowmem_debug_level = 1;
 static short lowmem_adj[6] = {
 	0,
@@ -93,6 +97,9 @@
 static unsigned long lowmem_count(struct shrinker *s,
 				  struct shrink_control *sc)
 {
+	if (!enable_lmk)
+		return 0;
+
 	return global_node_page_state(NR_ACTIVE_ANON) +
 		global_node_page_state(NR_ACTIVE_FILE) +
 		global_node_page_state(NR_INACTIVE_ANON) +
diff --git a/drivers/staging/android/uapi/vsoc_shm.h b/drivers/staging/android/uapi/vsoc_shm.h
new file mode 100644
index 0000000..741b138
--- /dev/null
+++ b/drivers/staging/android/uapi/vsoc_shm.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_VSOC_SHM_H
+#define _UAPI_LINUX_VSOC_SHM_H
+
+#include <linux/types.h>
+
+/**
+ * A permission is a token that permits a receiver to read and/or write an area
+ * of memory within a Vsoc region.
+ *
+ * An fd_scoped permission grants both read and write access, and can be
+ * attached to a file description (see open(2)).
+ * Ownership of the area can then be shared by passing a file descriptor
+ * among processes.
+ *
+ * begin_offset and end_offset define the area of memory that is controlled by
+ * the permission. owner_offset points to a word, also in shared memory, that
+ * controls ownership of the area.
+ *
+ * ownership of the region expires when the associated file description is
+ * released.
+ *
+ * At most one permission can be attached to each file description.
+ *
+ * This is useful when implementing HALs like gralloc that scope and pass
+ * ownership of shared resources via file descriptors.
+ *
+ * The caller is responsibe for doing any fencing.
+ *
+ * The calling process will normally identify a currently free area of
+ * memory. It will construct a proposed fd_scoped_permission_arg structure:
+ *
+ *   begin_offset and end_offset describe the area being claimed
+ *
+ *   owner_offset points to the location in shared memory that indicates the
+ *   owner of the area.
+ *
+ *   owned_value is the value that will be stored in owner_offset iff the
+ *   permission can be granted. It must be different than VSOC_REGION_FREE.
+ *
+ * Two fd_scoped_permission structures are compatible if they vary only by
+ * their owned_value fields.
+ *
+ * The driver ensures that, for any group of simultaneous callers proposing
+ * compatible fd_scoped_permissions, it will accept exactly one of the
+ * propopsals. The other callers will get a failure with errno of EAGAIN.
+ *
+ * A process receiving a file descriptor can identify the region being
+ * granted using the VSOC_GET_FD_SCOPED_PERMISSION ioctl.
+ */
+struct fd_scoped_permission {
+	__u32 begin_offset;
+	__u32 end_offset;
+	__u32 owner_offset;
+	__u32 owned_value;
+};
+
+/*
+ * This value represents a free area of memory. The driver expects to see this
+ * value at owner_offset when creating a permission otherwise it will not do it,
+ * and will write this value back once the permission is no longer needed.
+ */
+#define VSOC_REGION_FREE ((__u32)0)
+
+/**
+ * ioctl argument for VSOC_CREATE_FD_SCOPE_PERMISSION
+ */
+struct fd_scoped_permission_arg {
+	struct fd_scoped_permission perm;
+	__s32 managed_region_fd;
+};
+
+#define VSOC_NODE_FREE ((__u32)0)
+
+/*
+ * Describes a signal table in shared memory. Each non-zero entry in the
+ * table indicates that the receiver should signal the futex at the given
+ * offset. Offsets are relative to the region, not the shared memory window.
+ *
+ * interrupt_signalled_offset is used to reliably signal interrupts across the
+ * vmm boundary. There are two roles: transmitter and receiver. For example,
+ * in the host_to_guest_signal_table the host is the transmitter and the
+ * guest is the receiver. The protocol is as follows:
+ *
+ * 1. The transmitter should convert the offset of the futex to an offset
+ *    in the signal table [0, (1 << num_nodes_lg2))
+ *    The transmitter can choose any appropriate hashing algorithm, including
+ *    hash = futex_offset & ((1 << num_nodes_lg2) - 1)
+ *
+ * 3. The transmitter should atomically compare and swap futex_offset with 0
+ *    at hash. There are 3 possible outcomes
+ *      a. The swap fails because the futex_offset is already in the table.
+ *         The transmitter should stop.
+ *      b. Some other offset is in the table. This is a hash collision. The
+ *         transmitter should move to another table slot and try again. One
+ *         possible algorithm:
+ *         hash = (hash + 1) & ((1 << num_nodes_lg2) - 1)
+ *      c. The swap worked. Continue below.
+ *
+ * 3. The transmitter atomically swaps 1 with the value at the
+ *    interrupt_signalled_offset. There are two outcomes:
+ *      a. The prior value was 1. In this case an interrupt has already been
+ *         posted. The transmitter is done.
+ *      b. The prior value was 0, indicating that the receiver may be sleeping.
+ *         The transmitter will issue an interrupt.
+ *
+ * 4. On waking the receiver immediately exchanges a 0 with the
+ *    interrupt_signalled_offset. If it receives a 0 then this a spurious
+ *    interrupt. That may occasionally happen in the current protocol, but
+ *    should be rare.
+ *
+ * 5. The receiver scans the signal table by atomicaly exchanging 0 at each
+ *    location. If a non-zero offset is returned from the exchange the
+ *    receiver wakes all sleepers at the given offset:
+ *      futex((int*)(region_base + old_value), FUTEX_WAKE, MAX_INT);
+ *
+ * 6. The receiver thread then does a conditional wait, waking immediately
+ *    if the value at interrupt_signalled_offset is non-zero. This catches cases
+ *    here additional  signals were posted while the table was being scanned.
+ *    On the guest the wait is handled via the VSOC_WAIT_FOR_INCOMING_INTERRUPT
+ *    ioctl.
+ */
+struct vsoc_signal_table_layout {
+	/* log_2(Number of signal table entries) */
+	__u32 num_nodes_lg2;
+	/*
+	 * Offset to the first signal table entry relative to the start of the
+	 * region
+	 */
+	__u32 futex_uaddr_table_offset;
+	/*
+	 * Offset to an atomic_t / atomic uint32_t. A non-zero value indicates
+	 * that one or more offsets are currently posted in the table.
+	 * semi-unique access to an entry in the table
+	 */
+	__u32 interrupt_signalled_offset;
+};
+
+#define VSOC_REGION_WHOLE ((__s32)0)
+#define VSOC_DEVICE_NAME_SZ 16
+
+/**
+ * Each HAL would (usually) talk to a single device region
+ * Mulitple entities care about these regions:
+ * - The ivshmem_server will populate the regions in shared memory
+ * - The guest kernel will read the region, create minor device nodes, and
+ *   allow interested parties to register for FUTEX_WAKE events in the region
+ * - HALs will access via the minor device nodes published by the guest kernel
+ * - Host side processes will access the region via the ivshmem_server:
+ *   1. Pass name to ivshmem_server at a UNIX socket
+ *   2. ivshmemserver will reply with 2 fds:
+ *     - host->guest doorbell fd
+ *     - guest->host doorbell fd
+ *     - fd for the shared memory region
+ *     - region offset
+ *   3. Start a futex receiver thread on the doorbell fd pointed at the
+ *      signal_nodes
+ */
+struct vsoc_device_region {
+	__u16 current_version;
+	__u16 min_compatible_version;
+	__u32 region_begin_offset;
+	__u32 region_end_offset;
+	__u32 offset_of_region_data;
+	struct vsoc_signal_table_layout guest_to_host_signal_table;
+	struct vsoc_signal_table_layout host_to_guest_signal_table;
+	/* Name of the device. Must always be terminated with a '\0', so
+	 * the longest supported device name is 15 characters.
+	 */
+	char device_name[VSOC_DEVICE_NAME_SZ];
+	/* There are two ways that permissions to access regions are handled:
+	 *   - When subdivided_by is VSOC_REGION_WHOLE, any process that can
+	 *     open the device node for the region gains complete access to it.
+	 *   - When subdivided is set processes that open the region cannot
+	 *     access it. Access to a sub-region must be established by invoking
+	 *     the VSOC_CREATE_FD_SCOPE_PERMISSION ioctl on the region
+	 *     referenced in subdivided_by, providing a fileinstance
+	 *     (represented by a fd) opened on this region.
+	 */
+	__u32 managed_by;
+};
+
+/*
+ * The vsoc layout descriptor.
+ * The first 4K should be reserved for the shm header and region descriptors.
+ * The regions should be page aligned.
+ */
+
+struct vsoc_shm_layout_descriptor {
+	__u16 major_version;
+	__u16 minor_version;
+
+	/* size of the shm. This may be redundant but nice to have */
+	__u32 size;
+
+	/* number of shared memory regions */
+	__u32 region_count;
+
+	/* The offset to the start of region descriptors */
+	__u32 vsoc_region_desc_offset;
+};
+
+/*
+ * This specifies the current version that should be stored in
+ * vsoc_shm_layout_descriptor.major_version and
+ * vsoc_shm_layout_descriptor.minor_version.
+ * It should be updated only if the vsoc_device_region and
+ * vsoc_shm_layout_descriptor structures have changed.
+ * Versioning within each region is transferred
+ * via the min_compatible_version and current_version fields in
+ * vsoc_device_region. The driver does not consult these fields: they are left
+ * for the HALs and host processes and will change independently of the layout
+ * version.
+ */
+#define CURRENT_VSOC_LAYOUT_MAJOR_VERSION 2
+#define CURRENT_VSOC_LAYOUT_MINOR_VERSION 0
+
+#define VSOC_CREATE_FD_SCOPED_PERMISSION \
+	_IOW(0xF5, 0, struct fd_scoped_permission)
+#define VSOC_GET_FD_SCOPED_PERMISSION _IOR(0xF5, 1, struct fd_scoped_permission)
+
+/*
+ * This is used to signal the host to scan the guest_to_host_signal_table
+ * for new futexes to wake. This sends an interrupt if one is not already
+ * in flight.
+ */
+#define VSOC_MAYBE_SEND_INTERRUPT_TO_HOST _IO(0xF5, 2)
+
+/*
+ * When this returns the guest will scan host_to_guest_signal_table to
+ * check for new futexes to wake.
+ */
+/* TODO(ghartman): Consider moving this to the bottom half */
+#define VSOC_WAIT_FOR_INCOMING_INTERRUPT _IO(0xF5, 3)
+
+/*
+ * Guest HALs will use this to retrieve the region description after
+ * opening their device node.
+ */
+#define VSOC_DESCRIBE_REGION _IOR(0xF5, 4, struct vsoc_device_region)
+
+/*
+ * Wake any threads that may be waiting for a host interrupt on this region.
+ * This is mostly used during shutdown.
+ */
+#define VSOC_SELF_INTERRUPT _IO(0xF5, 5)
+
+/*
+ * This is used to signal the host to scan the guest_to_host_signal_table
+ * for new futexes to wake. This sends an interrupt unconditionally.
+ */
+#define VSOC_SEND_INTERRUPT_TO_HOST _IO(0xF5, 6)
+
+enum wait_types {
+	VSOC_WAIT_UNDEFINED = 0,
+	VSOC_WAIT_IF_EQUAL = 1,
+	VSOC_WAIT_IF_EQUAL_TIMEOUT = 2
+};
+
+/*
+ * Wait for a condition to be true
+ *
+ * Note, this is sized and aligned so the 32 bit and 64 bit layouts are
+ * identical.
+ */
+struct vsoc_cond_wait {
+	/* Input: Offset of the 32 bit word to check */
+	__u32 offset;
+	/* Input: Value that will be compared with the offset */
+	__u32 value;
+	/* Monotonic time to wake at in seconds */
+	__u64 wake_time_sec;
+	/* Input: Monotonic time to wait in nanoseconds */
+	__u32 wake_time_nsec;
+	/* Input: Type of wait */
+	__u32 wait_type;
+	/* Output: Number of times the thread woke before returning. */
+	__u32 wakes;
+	/* Ensure that we're 8-byte aligned and 8 byte length for 32/64 bit
+	 * compatibility.
+	 */
+	__u32 reserved_1;
+};
+
+#define VSOC_COND_WAIT _IOWR(0xF5, 7, struct vsoc_cond_wait)
+
+/* Wake any local threads waiting at the offset given in arg */
+#define VSOC_COND_WAKE _IO(0xF5, 8)
+
+#endif /* _UAPI_LINUX_VSOC_SHM_H */
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
new file mode 100644
index 0000000..954ed2c
--- /dev/null
+++ b/drivers/staging/android/vsoc.c
@@ -0,0 +1,1165 @@
+/*
+ * drivers/android/staging/vsoc.c
+ *
+ * Android Virtual System on a Chip (VSoC) driver
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Author: ghartman@google.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory
+ *         Copyright 2009 Cam Macdonell <cam@cs.ualberta.ca>
+ *
+ * Based on cirrusfb.c and 8139cp.c:
+ *   Copyright 1999-2001 Jeff Garzik
+ *   Copyright 2001-2004 Jeff Garzik
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/futex.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/file.h>
+#include "uapi/vsoc_shm.h"
+
+#define VSOC_DEV_NAME "vsoc"
+
+/*
+ * Description of the ivshmem-doorbell PCI device used by QEmu. These
+ * constants follow docs/specs/ivshmem-spec.txt, which can be found in
+ * the QEmu repository. This was last reconciled with the version that
+ * came out with 2.8
+ */
+
+/*
+ * These constants are determined KVM Inter-VM shared memory device
+ * register offsets
+ */
+enum {
+	INTR_MASK = 0x00,	/* Interrupt Mask */
+	INTR_STATUS = 0x04,	/* Interrupt Status */
+	IV_POSITION = 0x08,	/* VM ID */
+	DOORBELL = 0x0c,	/* Doorbell */
+};
+
+static const int REGISTER_BAR;  /* Equal to 0 */
+static const int MAX_REGISTER_BAR_LEN = 0x100;
+/*
+ * The MSI-x BAR is not used directly.
+ *
+ * static const int MSI_X_BAR = 1;
+ */
+static const int SHARED_MEMORY_BAR = 2;
+
+struct vsoc_region_data {
+	char name[VSOC_DEVICE_NAME_SZ + 1];
+	wait_queue_head_t interrupt_wait_queue;
+	/* TODO(b/73664181): Use multiple futex wait queues */
+	wait_queue_head_t futex_wait_queue;
+	/* Flag indicating that an interrupt has been signalled by the host. */
+	atomic_t *incoming_signalled;
+	/* Flag indicating the guest has signalled the host. */
+	atomic_t *outgoing_signalled;
+	bool irq_requested;
+	bool device_created;
+};
+
+struct vsoc_device {
+	/* Kernel virtual address of REGISTER_BAR. */
+	void __iomem *regs;
+	/* Physical address of SHARED_MEMORY_BAR. */
+	phys_addr_t shm_phys_start;
+	/* Kernel virtual address of SHARED_MEMORY_BAR. */
+	void __iomem *kernel_mapped_shm;
+	/* Size of the entire shared memory window in bytes. */
+	size_t shm_size;
+	/*
+	 * Pointer to the virtual address of the shared memory layout structure.
+	 * This is probably identical to kernel_mapped_shm, but saving this
+	 * here saves a lot of annoying casts.
+	 */
+	struct vsoc_shm_layout_descriptor *layout;
+	/*
+	 * Points to a table of region descriptors in the kernel's virtual
+	 * address space. Calculated from
+	 * vsoc_shm_layout_descriptor.vsoc_region_desc_offset
+	 */
+	struct vsoc_device_region *regions;
+	/* Head of a list of permissions that have been granted. */
+	struct list_head permissions;
+	struct pci_dev *dev;
+	/* Per-region (and therefore per-interrupt) information. */
+	struct vsoc_region_data *regions_data;
+	/*
+	 * Table of msi-x entries. This has to be separated from struct
+	 * vsoc_region_data because the kernel deals with them as an array.
+	 */
+	struct msix_entry *msix_entries;
+	/* Mutex that protectes the permission list */
+	struct mutex mtx;
+	/* Major number assigned by the kernel */
+	int major;
+	/* Character device assigned by the kernel */
+	struct cdev cdev;
+	/* Device class assigned by the kernel */
+	struct class *class;
+	/*
+	 * Flags that indicate what we've initialized. These are used to do an
+	 * orderly cleanup of the device.
+	 */
+	bool enabled_device;
+	bool requested_regions;
+	bool cdev_added;
+	bool class_added;
+	bool msix_enabled;
+};
+
+static struct vsoc_device vsoc_dev;
+
+/*
+ * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions.
+ */
+
+struct fd_scoped_permission_node {
+	struct fd_scoped_permission permission;
+	struct list_head list;
+};
+
+struct vsoc_private_data {
+	struct fd_scoped_permission_node *fd_scoped_permission_node;
+};
+
+static long vsoc_ioctl(struct file *, unsigned int, unsigned long);
+static int vsoc_mmap(struct file *, struct vm_area_struct *);
+static int vsoc_open(struct inode *, struct file *);
+static int vsoc_release(struct inode *, struct file *);
+static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *);
+static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
+static int do_create_fd_scoped_permission(
+	struct vsoc_device_region *region_p,
+	struct fd_scoped_permission_node *np,
+	struct fd_scoped_permission_arg __user *arg);
+static void do_destroy_fd_scoped_permission(
+	struct vsoc_device_region *owner_region_p,
+	struct fd_scoped_permission *perm);
+static long do_vsoc_describe_region(struct file *,
+				    struct vsoc_device_region __user *);
+static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off);
+
+/**
+ * Validate arguments on entry points to the driver.
+ */
+inline int vsoc_validate_inode(struct inode *inode)
+{
+	if (iminor(inode) >= vsoc_dev.layout->region_count) {
+		dev_err(&vsoc_dev.dev->dev,
+			"describe_region: invalid region %d\n", iminor(inode));
+		return -ENODEV;
+	}
+	return 0;
+}
+
+inline int vsoc_validate_filep(struct file *filp)
+{
+	int ret = vsoc_validate_inode(file_inode(filp));
+
+	if (ret)
+		return ret;
+	if (!filp->private_data) {
+		dev_err(&vsoc_dev.dev->dev,
+			"No private data on fd, region %d\n",
+			iminor(file_inode(filp)));
+		return -EBADFD;
+	}
+	return 0;
+}
+
+/* Converts from shared memory offset to virtual address */
+static inline void *shm_off_to_virtual_addr(__u32 offset)
+{
+	return (void __force *)vsoc_dev.kernel_mapped_shm + offset;
+}
+
+/* Converts from shared memory offset to physical address */
+static inline phys_addr_t shm_off_to_phys_addr(__u32 offset)
+{
+	return vsoc_dev.shm_phys_start + offset;
+}
+
+/**
+ * Convenience functions to obtain the region from the inode or file.
+ * Dangerous to call before validating the inode/file.
+ */
+static inline struct vsoc_device_region *vsoc_region_from_inode(
+	struct inode *inode)
+{
+	return &vsoc_dev.regions[iminor(inode)];
+}
+
+static inline struct vsoc_device_region *vsoc_region_from_filep(
+	struct file *inode)
+{
+	return vsoc_region_from_inode(file_inode(inode));
+}
+
+static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r)
+{
+	return r->region_end_offset - r->region_begin_offset;
+}
+
+static const struct file_operations vsoc_ops = {
+	.owner = THIS_MODULE,
+	.open = vsoc_open,
+	.mmap = vsoc_mmap,
+	.read = vsoc_read,
+	.unlocked_ioctl = vsoc_ioctl,
+	.compat_ioctl = vsoc_ioctl,
+	.write = vsoc_write,
+	.llseek = vsoc_lseek,
+	.release = vsoc_release,
+};
+
+static struct pci_device_id vsoc_id_table[] = {
+	{0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+	{0},
+};
+
+MODULE_DEVICE_TABLE(pci, vsoc_id_table);
+
+static void vsoc_remove_device(struct pci_dev *pdev);
+static int vsoc_probe_device(struct pci_dev *pdev,
+			     const struct pci_device_id *ent);
+
+static struct pci_driver vsoc_pci_driver = {
+	.name = "vsoc",
+	.id_table = vsoc_id_table,
+	.probe = vsoc_probe_device,
+	.remove = vsoc_remove_device,
+};
+
+static int do_create_fd_scoped_permission(
+	struct vsoc_device_region *region_p,
+	struct fd_scoped_permission_node *np,
+	struct fd_scoped_permission_arg __user *arg)
+{
+	struct file *managed_filp;
+	s32 managed_fd;
+	atomic_t *owner_ptr = NULL;
+	struct vsoc_device_region *managed_region_p;
+
+	if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) ||
+	    copy_from_user(&managed_fd,
+			   &arg->managed_region_fd, sizeof(managed_fd))) {
+		return -EFAULT;
+	}
+	managed_filp = fdget(managed_fd).file;
+	/* Check that it's a valid fd, */
+	if (!managed_filp || vsoc_validate_filep(managed_filp))
+		return -EPERM;
+	/* EEXIST if the given fd already has a permission. */
+	if (((struct vsoc_private_data *)managed_filp->private_data)->
+	    fd_scoped_permission_node)
+		return -EEXIST;
+	managed_region_p = vsoc_region_from_filep(managed_filp);
+	/* Check that the provided region is managed by this one */
+	if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p)
+		return -EPERM;
+	/* The area must be well formed and have non-zero size */
+	if (np->permission.begin_offset >= np->permission.end_offset)
+		return -EINVAL;
+	/* The area must fit in the memory window */
+	if (np->permission.end_offset >
+	    vsoc_device_region_size(managed_region_p))
+		return -ERANGE;
+	/* The area must be in the region data section */
+	if (np->permission.begin_offset <
+	    managed_region_p->offset_of_region_data)
+		return -ERANGE;
+	/* The area must be page aligned */
+	if (!PAGE_ALIGNED(np->permission.begin_offset) ||
+	    !PAGE_ALIGNED(np->permission.end_offset))
+		return -EINVAL;
+	/* Owner offset must be naturally aligned in the window */
+	if (np->permission.owner_offset &
+	    (sizeof(np->permission.owner_offset) - 1))
+		return -EINVAL;
+	/* The owner flag must reside in the owner memory */
+	if (np->permission.owner_offset + sizeof(np->permission.owner_offset) >
+	    vsoc_device_region_size(region_p))
+		return -ERANGE;
+	/* The owner flag must reside in the data section */
+	if (np->permission.owner_offset < region_p->offset_of_region_data)
+		return -EINVAL;
+	/* The owner value must change to claim the memory */
+	if (np->permission.owned_value == VSOC_REGION_FREE)
+		return -EINVAL;
+	owner_ptr =
+	    (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset +
+						np->permission.owner_offset);
+	/* We've already verified that this is in the shared memory window, so
+	 * it should be safe to write to this address.
+	 */
+	if (atomic_cmpxchg(owner_ptr,
+			   VSOC_REGION_FREE,
+			   np->permission.owned_value) != VSOC_REGION_FREE) {
+		return -EBUSY;
+	}
+	((struct vsoc_private_data *)managed_filp->private_data)->
+	    fd_scoped_permission_node = np;
+	/* The file offset needs to be adjusted if the calling
+	 * process did any read/write operations on the fd
+	 * before creating the permission.
+	 */
+	if (managed_filp->f_pos) {
+		if (managed_filp->f_pos > np->permission.end_offset) {
+			/* If the offset is beyond the permission end, set it
+			 * to the end.
+			 */
+			managed_filp->f_pos = np->permission.end_offset;
+		} else {
+			/* If the offset is within the permission interval
+			 * keep it there otherwise reset it to zero.
+			 */
+			if (managed_filp->f_pos < np->permission.begin_offset) {
+				managed_filp->f_pos = 0;
+			} else {
+				managed_filp->f_pos -=
+				    np->permission.begin_offset;
+			}
+		}
+	}
+	return 0;
+}
+
+static void do_destroy_fd_scoped_permission_node(
+	struct vsoc_device_region *owner_region_p,
+	struct fd_scoped_permission_node *node)
+{
+	if (node) {
+		do_destroy_fd_scoped_permission(owner_region_p,
+						&node->permission);
+		mutex_lock(&vsoc_dev.mtx);
+		list_del(&node->list);
+		mutex_unlock(&vsoc_dev.mtx);
+		kfree(node);
+	}
+}
+
+static void do_destroy_fd_scoped_permission(
+		struct vsoc_device_region *owner_region_p,
+		struct fd_scoped_permission *perm)
+{
+	atomic_t *owner_ptr = NULL;
+	int prev = 0;
+
+	if (!perm)
+		return;
+	owner_ptr = (atomic_t *)shm_off_to_virtual_addr(
+		owner_region_p->region_begin_offset + perm->owner_offset);
+	prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE);
+	if (prev != perm->owned_value)
+		dev_err(&vsoc_dev.dev->dev,
+			"%x-%x: owner (%s) %x: expected to be %x was %x",
+			perm->begin_offset, perm->end_offset,
+			owner_region_p->device_name, perm->owner_offset,
+			perm->owned_value, prev);
+}
+
+static long do_vsoc_describe_region(struct file *filp,
+				    struct vsoc_device_region __user *dest)
+{
+	struct vsoc_device_region *region_p;
+	int retval = vsoc_validate_filep(filp);
+
+	if (retval)
+		return retval;
+	region_p = vsoc_region_from_filep(filp);
+	if (copy_to_user(dest, region_p, sizeof(*region_p)))
+		return -EFAULT;
+	return 0;
+}
+
+/**
+ * Implements the inner logic of cond_wait. Copies to and from userspace are
+ * done in the helper function below.
+ */
+static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
+{
+	DEFINE_WAIT(wait);
+	u32 region_number = iminor(file_inode(filp));
+	struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
+	struct hrtimer_sleeper timeout, *to = NULL;
+	int ret = 0;
+	struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
+	atomic_t *address = NULL;
+	struct timespec ts;
+
+	/* Ensure that the offset is aligned */
+	if (arg->offset & (sizeof(uint32_t) - 1))
+		return -EADDRNOTAVAIL;
+	/* Ensure that the offset is within shared memory */
+	if (((uint64_t)arg->offset) + region_p->region_begin_offset +
+	    sizeof(uint32_t) > region_p->region_end_offset)
+		return -E2BIG;
+	address = shm_off_to_virtual_addr(region_p->region_begin_offset +
+					  arg->offset);
+
+	/* Ensure that the type of wait is valid */
+	switch (arg->wait_type) {
+	case VSOC_WAIT_IF_EQUAL:
+		break;
+	case VSOC_WAIT_IF_EQUAL_TIMEOUT:
+		to = &timeout;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (to) {
+		/* Copy the user-supplied timesec into the kernel structure.
+		 * We do things this way to flatten differences between 32 bit
+		 * and 64 bit timespecs.
+		 */
+		ts.tv_sec = arg->wake_time_sec;
+		ts.tv_nsec = arg->wake_time_nsec;
+
+		if (!timespec_valid(&ts))
+			return -EINVAL;
+		hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC,
+				      HRTIMER_MODE_ABS);
+		hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts),
+					     current->timer_slack_ns);
+
+		hrtimer_init_sleeper(to, current);
+	}
+
+	while (1) {
+		prepare_to_wait(&data->futex_wait_queue, &wait,
+				TASK_INTERRUPTIBLE);
+		/*
+		 * Check the sentinel value after prepare_to_wait. If the value
+		 * changes after this check the writer will call signal,
+		 * changing the task state from INTERRUPTIBLE to RUNNING. That
+		 * will ensure that schedule() will eventually schedule this
+		 * task.
+		 */
+		if (atomic_read(address) != arg->value) {
+			ret = 0;
+			break;
+		}
+		if (to) {
+			hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
+			if (likely(to->task))
+				freezable_schedule();
+			hrtimer_cancel(&to->timer);
+			if (!to->task) {
+				ret = -ETIMEDOUT;
+				break;
+			}
+		} else {
+			freezable_schedule();
+		}
+		/* Count the number of times that we woke up. This is useful
+		 * for unit testing.
+		 */
+		++arg->wakes;
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+	}
+	finish_wait(&data->futex_wait_queue, &wait);
+	if (to)
+		destroy_hrtimer_on_stack(&to->timer);
+	return ret;
+}
+
+/**
+ * Handles the details of copying from/to userspace to ensure that the copies
+ * happen on all of the return paths of cond_wait.
+ */
+static int do_vsoc_cond_wait(struct file *filp,
+			     struct vsoc_cond_wait __user *untrusted_in)
+{
+	struct vsoc_cond_wait arg;
+	int rval = 0;
+
+	if (copy_from_user(&arg, untrusted_in, sizeof(arg)))
+		return -EFAULT;
+	/* wakes is an out parameter. Initialize it to something sensible. */
+	arg.wakes = 0;
+	rval = handle_vsoc_cond_wait(filp, &arg);
+	if (copy_to_user(untrusted_in, &arg, sizeof(arg)))
+		return -EFAULT;
+	return rval;
+}
+
+static int do_vsoc_cond_wake(struct file *filp, uint32_t offset)
+{
+	struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
+	u32 region_number = iminor(file_inode(filp));
+	struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
+	/* Ensure that the offset is aligned */
+	if (offset & (sizeof(uint32_t) - 1))
+		return -EADDRNOTAVAIL;
+	/* Ensure that the offset is within shared memory */
+	if (((uint64_t)offset) + region_p->region_begin_offset +
+	    sizeof(uint32_t) > region_p->region_end_offset)
+		return -E2BIG;
+	/*
+	 * TODO(b/73664181): Use multiple futex wait queues.
+	 * We need to wake every sleeper when the condition changes. Typically
+	 * only a single thread will be waiting on the condition, but there
+	 * are exceptions. The worst case is about 10 threads.
+	 */
+	wake_up_interruptible_all(&data->futex_wait_queue);
+	return 0;
+}
+
+static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int rv = 0;
+	struct vsoc_device_region *region_p;
+	u32 reg_num;
+	struct vsoc_region_data *reg_data;
+	int retval = vsoc_validate_filep(filp);
+
+	if (retval)
+		return retval;
+	region_p = vsoc_region_from_filep(filp);
+	reg_num = iminor(file_inode(filp));
+	reg_data = vsoc_dev.regions_data + reg_num;
+	switch (cmd) {
+	case VSOC_CREATE_FD_SCOPED_PERMISSION:
+		{
+			struct fd_scoped_permission_node *node = NULL;
+
+			node = kzalloc(sizeof(*node), GFP_KERNEL);
+			/* We can't allocate memory for the permission */
+			if (!node)
+				return -ENOMEM;
+			INIT_LIST_HEAD(&node->list);
+			rv = do_create_fd_scoped_permission(
+				region_p,
+				node,
+				(struct fd_scoped_permission_arg __user *)arg);
+			if (!rv) {
+				mutex_lock(&vsoc_dev.mtx);
+				list_add(&node->list, &vsoc_dev.permissions);
+				mutex_unlock(&vsoc_dev.mtx);
+			} else {
+				kfree(node);
+				return rv;
+			}
+		}
+		break;
+
+	case VSOC_GET_FD_SCOPED_PERMISSION:
+		{
+			struct fd_scoped_permission_node *node =
+			    ((struct vsoc_private_data *)filp->private_data)->
+			    fd_scoped_permission_node;
+			if (!node)
+				return -ENOENT;
+			if (copy_to_user
+			    ((struct fd_scoped_permission __user *)arg,
+			     &node->permission, sizeof(node->permission)))
+				return -EFAULT;
+		}
+		break;
+
+	case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST:
+		if (!atomic_xchg(
+			    reg_data->outgoing_signalled,
+			    1)) {
+			writel(reg_num, vsoc_dev.regs + DOORBELL);
+			return 0;
+		} else {
+			return -EBUSY;
+		}
+		break;
+
+	case VSOC_SEND_INTERRUPT_TO_HOST:
+		writel(reg_num, vsoc_dev.regs + DOORBELL);
+		return 0;
+
+	case VSOC_WAIT_FOR_INCOMING_INTERRUPT:
+		wait_event_interruptible(
+			reg_data->interrupt_wait_queue,
+			(atomic_read(reg_data->incoming_signalled) != 0));
+		break;
+
+	case VSOC_DESCRIBE_REGION:
+		return do_vsoc_describe_region(
+			filp,
+			(struct vsoc_device_region __user *)arg);
+
+	case VSOC_SELF_INTERRUPT:
+		atomic_set(reg_data->incoming_signalled, 1);
+		wake_up_interruptible(&reg_data->interrupt_wait_queue);
+		break;
+
+	case VSOC_COND_WAIT:
+		return do_vsoc_cond_wait(filp,
+					 (struct vsoc_cond_wait __user *)arg);
+	case VSOC_COND_WAKE:
+		return do_vsoc_cond_wake(filp, arg);
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len,
+			 loff_t *poffset)
+{
+	__u32 area_off;
+	const void *area_p;
+	ssize_t area_len;
+	int retval = vsoc_validate_filep(filp);
+
+	if (retval)
+		return retval;
+	area_len = vsoc_get_area(filp, &area_off);
+	area_p = shm_off_to_virtual_addr(area_off);
+	area_p += *poffset;
+	area_len -= *poffset;
+	if (area_len <= 0)
+		return 0;
+	if (area_len < len)
+		len = area_len;
+	if (copy_to_user(buffer, area_p, len))
+		return -EFAULT;
+	*poffset += len;
+	return len;
+}
+
+static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin)
+{
+	ssize_t area_len = 0;
+	int retval = vsoc_validate_filep(filp);
+
+	if (retval)
+		return retval;
+	area_len = vsoc_get_area(filp, NULL);
+	switch (origin) {
+	case SEEK_SET:
+		break;
+
+	case SEEK_CUR:
+		if (offset > 0 && offset + filp->f_pos < 0)
+			return -EOVERFLOW;
+		offset += filp->f_pos;
+		break;
+
+	case SEEK_END:
+		if (offset > 0 && offset + area_len < 0)
+			return -EOVERFLOW;
+		offset += area_len;
+		break;
+
+	case SEEK_DATA:
+		if (offset >= area_len)
+			return -EINVAL;
+		if (offset < 0)
+			offset = 0;
+		break;
+
+	case SEEK_HOLE:
+		/* Next hole is always the end of the region, unless offset is
+		 * beyond that
+		 */
+		if (offset < area_len)
+			offset = area_len;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (offset < 0 || offset > area_len)
+		return -EINVAL;
+	filp->f_pos = offset;
+
+	return offset;
+}
+
+static ssize_t vsoc_write(struct file *filp, const char __user *buffer,
+			  size_t len, loff_t *poffset)
+{
+	__u32 area_off;
+	void *area_p;
+	ssize_t area_len;
+	int retval = vsoc_validate_filep(filp);
+
+	if (retval)
+		return retval;
+	area_len = vsoc_get_area(filp, &area_off);
+	area_p = shm_off_to_virtual_addr(area_off);
+	area_p += *poffset;
+	area_len -= *poffset;
+	if (area_len <= 0)
+		return 0;
+	if (area_len < len)
+		len = area_len;
+	if (copy_from_user(area_p, buffer, len))
+		return -EFAULT;
+	*poffset += len;
+	return len;
+}
+
+static irqreturn_t vsoc_interrupt(int irq, void *region_data_v)
+{
+	struct vsoc_region_data *region_data =
+	    (struct vsoc_region_data *)region_data_v;
+	int reg_num = region_data - vsoc_dev.regions_data;
+
+	if (unlikely(!region_data))
+		return IRQ_NONE;
+
+	if (unlikely(reg_num < 0 ||
+		     reg_num >= vsoc_dev.layout->region_count)) {
+		dev_err(&vsoc_dev.dev->dev,
+			"invalid irq @%p reg_num=0x%04x\n",
+			region_data, reg_num);
+		return IRQ_NONE;
+	}
+	if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) {
+		dev_err(&vsoc_dev.dev->dev,
+			"irq not aligned @%p reg_num=0x%04x\n",
+			region_data, reg_num);
+		return IRQ_NONE;
+	}
+	wake_up_interruptible(&region_data->interrupt_wait_queue);
+	return IRQ_HANDLED;
+}
+
+static int vsoc_probe_device(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	int result;
+	int i;
+	resource_size_t reg_size;
+	dev_t devt;
+
+	vsoc_dev.dev = pdev;
+	result = pci_enable_device(pdev);
+	if (result) {
+		dev_err(&pdev->dev,
+			"pci_enable_device failed %s: error %d\n",
+			pci_name(pdev), result);
+		return result;
+	}
+	vsoc_dev.enabled_device = true;
+	result = pci_request_regions(pdev, "vsoc");
+	if (result < 0) {
+		dev_err(&pdev->dev, "pci_request_regions failed\n");
+		vsoc_remove_device(pdev);
+		return -EBUSY;
+	}
+	vsoc_dev.requested_regions = true;
+	/* Set up the control registers in BAR 0 */
+	reg_size = pci_resource_len(pdev, REGISTER_BAR);
+	if (reg_size > MAX_REGISTER_BAR_LEN)
+		vsoc_dev.regs =
+		    pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN);
+	else
+		vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size);
+
+	if (!vsoc_dev.regs) {
+		dev_err(&pdev->dev,
+			"cannot map registers of size %zu\n",
+		       (size_t)reg_size);
+		vsoc_remove_device(pdev);
+		return -EBUSY;
+	}
+
+	/* Map the shared memory in BAR 2 */
+	vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
+	vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
+
+	dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n",
+		 &vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
+	vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0);
+	if (!vsoc_dev.kernel_mapped_shm) {
+		dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
+		vsoc_remove_device(pdev);
+		return -EBUSY;
+	}
+
+	vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *)
+				vsoc_dev.kernel_mapped_shm;
+	dev_info(&pdev->dev, "major_version: %d\n",
+		 vsoc_dev.layout->major_version);
+	dev_info(&pdev->dev, "minor_version: %d\n",
+		 vsoc_dev.layout->minor_version);
+	dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size);
+	dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count);
+	if (vsoc_dev.layout->major_version !=
+	    CURRENT_VSOC_LAYOUT_MAJOR_VERSION) {
+		dev_err(&vsoc_dev.dev->dev,
+			"driver supports only major_version %d\n",
+			CURRENT_VSOC_LAYOUT_MAJOR_VERSION);
+		vsoc_remove_device(pdev);
+		return -EBUSY;
+	}
+	result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count,
+				     VSOC_DEV_NAME);
+	if (result) {
+		dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n");
+		vsoc_remove_device(pdev);
+		return -EBUSY;
+	}
+	vsoc_dev.major = MAJOR(devt);
+	cdev_init(&vsoc_dev.cdev, &vsoc_ops);
+	vsoc_dev.cdev.owner = THIS_MODULE;
+	result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count);
+	if (result) {
+		dev_err(&vsoc_dev.dev->dev, "cdev_add error\n");
+		vsoc_remove_device(pdev);
+		return -EBUSY;
+	}
+	vsoc_dev.cdev_added = true;
+	vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
+	if (IS_ERR(vsoc_dev.class)) {
+		dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
+		vsoc_remove_device(pdev);
+		return PTR_ERR(vsoc_dev.class);
+	}
+	vsoc_dev.class_added = true;
+	vsoc_dev.regions = (struct vsoc_device_region __force *)
+		((void *)vsoc_dev.layout +
+		 vsoc_dev.layout->vsoc_region_desc_offset);
+	vsoc_dev.msix_entries = kcalloc(
+			vsoc_dev.layout->region_count,
+			sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL);
+	if (!vsoc_dev.msix_entries) {
+		dev_err(&vsoc_dev.dev->dev,
+			"unable to allocate msix_entries\n");
+		vsoc_remove_device(pdev);
+		return -ENOSPC;
+	}
+	vsoc_dev.regions_data = kcalloc(
+			vsoc_dev.layout->region_count,
+			sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL);
+	if (!vsoc_dev.regions_data) {
+		dev_err(&vsoc_dev.dev->dev,
+			"unable to allocate regions' data\n");
+		vsoc_remove_device(pdev);
+		return -ENOSPC;
+	}
+	for (i = 0; i < vsoc_dev.layout->region_count; ++i)
+		vsoc_dev.msix_entries[i].entry = i;
+
+	result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries,
+				       vsoc_dev.layout->region_count);
+	if (result) {
+		dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result);
+		vsoc_remove_device(pdev);
+		return -ENOSPC;
+	}
+	/* Check that all regions are well formed */
+	for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+		const struct vsoc_device_region *region = vsoc_dev.regions + i;
+
+		if (!PAGE_ALIGNED(region->region_begin_offset) ||
+		    !PAGE_ALIGNED(region->region_end_offset)) {
+			dev_err(&vsoc_dev.dev->dev,
+				"region %d not aligned (%x:%x)", i,
+				region->region_begin_offset,
+				region->region_end_offset);
+			vsoc_remove_device(pdev);
+			return -EFAULT;
+		}
+		if (region->region_begin_offset >= region->region_end_offset ||
+		    region->region_end_offset > vsoc_dev.shm_size) {
+			dev_err(&vsoc_dev.dev->dev,
+				"region %d offsets are wrong: %x %x %zx",
+				i, region->region_begin_offset,
+				region->region_end_offset, vsoc_dev.shm_size);
+			vsoc_remove_device(pdev);
+			return -EFAULT;
+		}
+		if (region->managed_by >= vsoc_dev.layout->region_count) {
+			dev_err(&vsoc_dev.dev->dev,
+				"region %d has invalid owner: %u",
+				i, region->managed_by);
+			vsoc_remove_device(pdev);
+			return -EFAULT;
+		}
+	}
+	vsoc_dev.msix_enabled = true;
+	for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+		const struct vsoc_device_region *region = vsoc_dev.regions + i;
+		size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
+		const struct vsoc_signal_table_layout *h_to_g_signal_table =
+			&region->host_to_guest_signal_table;
+		const struct vsoc_signal_table_layout *g_to_h_signal_table =
+			&region->guest_to_host_signal_table;
+
+		vsoc_dev.regions_data[i].name[name_sz] = '\0';
+		memcpy(vsoc_dev.regions_data[i].name, region->device_name,
+		       name_sz);
+		dev_info(&pdev->dev, "region %d name=%s\n",
+			 i, vsoc_dev.regions_data[i].name);
+		init_waitqueue_head(
+				&vsoc_dev.regions_data[i].interrupt_wait_queue);
+		init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
+		vsoc_dev.regions_data[i].incoming_signalled =
+			shm_off_to_virtual_addr(region->region_begin_offset) +
+			h_to_g_signal_table->interrupt_signalled_offset;
+		vsoc_dev.regions_data[i].outgoing_signalled =
+			shm_off_to_virtual_addr(region->region_begin_offset) +
+			g_to_h_signal_table->interrupt_signalled_offset;
+		result = request_irq(
+				vsoc_dev.msix_entries[i].vector,
+				vsoc_interrupt, 0,
+				vsoc_dev.regions_data[i].name,
+				vsoc_dev.regions_data + i);
+		if (result) {
+			dev_info(&pdev->dev,
+				 "request_irq failed irq=%d vector=%d\n",
+				i, vsoc_dev.msix_entries[i].vector);
+			vsoc_remove_device(pdev);
+			return -ENOSPC;
+		}
+		vsoc_dev.regions_data[i].irq_requested = true;
+		if (!device_create(vsoc_dev.class, NULL,
+				   MKDEV(vsoc_dev.major, i),
+				   NULL, vsoc_dev.regions_data[i].name)) {
+			dev_err(&vsoc_dev.dev->dev, "device_create failed\n");
+			vsoc_remove_device(pdev);
+			return -EBUSY;
+		}
+		vsoc_dev.regions_data[i].device_created = true;
+	}
+	return 0;
+}
+
+/*
+ * This should undo all of the allocations in the probe function in reverse
+ * order.
+ *
+ * Notes:
+ *
+ *   The device may have been partially initialized, so double check
+ *   that the allocations happened.
+ *
+ *   This function may be called multiple times, so mark resources as freed
+ *   as they are deallocated.
+ */
+static void vsoc_remove_device(struct pci_dev *pdev)
+{
+	int i;
+	/*
+	 * pdev is the first thing to be set on probe and the last thing
+	 * to be cleared here. If it's NULL then there is no cleanup.
+	 */
+	if (!pdev || !vsoc_dev.dev)
+		return;
+	dev_info(&pdev->dev, "remove_device\n");
+	if (vsoc_dev.regions_data) {
+		for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+			if (vsoc_dev.regions_data[i].device_created) {
+				device_destroy(vsoc_dev.class,
+					       MKDEV(vsoc_dev.major, i));
+				vsoc_dev.regions_data[i].device_created = false;
+			}
+			if (vsoc_dev.regions_data[i].irq_requested)
+				free_irq(vsoc_dev.msix_entries[i].vector, NULL);
+			vsoc_dev.regions_data[i].irq_requested = false;
+		}
+		kfree(vsoc_dev.regions_data);
+		vsoc_dev.regions_data = NULL;
+	}
+	if (vsoc_dev.msix_enabled) {
+		pci_disable_msix(pdev);
+		vsoc_dev.msix_enabled = false;
+	}
+	kfree(vsoc_dev.msix_entries);
+	vsoc_dev.msix_entries = NULL;
+	vsoc_dev.regions = NULL;
+	if (vsoc_dev.class_added) {
+		class_destroy(vsoc_dev.class);
+		vsoc_dev.class_added = false;
+	}
+	if (vsoc_dev.cdev_added) {
+		cdev_del(&vsoc_dev.cdev);
+		vsoc_dev.cdev_added = false;
+	}
+	if (vsoc_dev.major && vsoc_dev.layout) {
+		unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
+					 vsoc_dev.layout->region_count);
+		vsoc_dev.major = 0;
+	}
+	vsoc_dev.layout = NULL;
+	if (vsoc_dev.kernel_mapped_shm) {
+		pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
+		vsoc_dev.kernel_mapped_shm = NULL;
+	}
+	if (vsoc_dev.regs) {
+		pci_iounmap(pdev, vsoc_dev.regs);
+		vsoc_dev.regs = NULL;
+	}
+	if (vsoc_dev.requested_regions) {
+		pci_release_regions(pdev);
+		vsoc_dev.requested_regions = false;
+	}
+	if (vsoc_dev.enabled_device) {
+		pci_disable_device(pdev);
+		vsoc_dev.enabled_device = false;
+	}
+	/* Do this last: it indicates that the device is not initialized. */
+	vsoc_dev.dev = NULL;
+}
+
+static void __exit vsoc_cleanup_module(void)
+{
+	vsoc_remove_device(vsoc_dev.dev);
+	pci_unregister_driver(&vsoc_pci_driver);
+}
+
+static int __init vsoc_init_module(void)
+{
+	int err = -ENOMEM;
+
+	INIT_LIST_HEAD(&vsoc_dev.permissions);
+	mutex_init(&vsoc_dev.mtx);
+
+	err = pci_register_driver(&vsoc_pci_driver);
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static int vsoc_open(struct inode *inode, struct file *filp)
+{
+	/* Can't use vsoc_validate_filep because filp is still incomplete */
+	int ret = vsoc_validate_inode(inode);
+
+	if (ret)
+		return ret;
+	filp->private_data =
+		kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL);
+	if (!filp->private_data)
+		return -ENOMEM;
+	return 0;
+}
+
+static int vsoc_release(struct inode *inode, struct file *filp)
+{
+	struct vsoc_private_data *private_data = NULL;
+	struct fd_scoped_permission_node *node = NULL;
+	struct vsoc_device_region *owner_region_p = NULL;
+	int retval = vsoc_validate_filep(filp);
+
+	if (retval)
+		return retval;
+	private_data = (struct vsoc_private_data *)filp->private_data;
+	if (!private_data)
+		return 0;
+
+	node = private_data->fd_scoped_permission_node;
+	if (node) {
+		owner_region_p = vsoc_region_from_inode(inode);
+		if (owner_region_p->managed_by != VSOC_REGION_WHOLE) {
+			owner_region_p =
+			    &vsoc_dev.regions[owner_region_p->managed_by];
+		}
+		do_destroy_fd_scoped_permission_node(owner_region_p, node);
+		private_data->fd_scoped_permission_node = NULL;
+	}
+	kfree(private_data);
+	filp->private_data = NULL;
+
+	return 0;
+}
+
+/*
+ * Returns the device relative offset and length of the area specified by the
+ * fd scoped permission. If there is no fd scoped permission set, a default
+ * permission covering the entire region is assumed, unless the region is owned
+ * by another one, in which case the default is a permission with zero size.
+ */
+static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset)
+{
+	__u32 off = 0;
+	ssize_t length = 0;
+	struct vsoc_device_region *region_p;
+	struct fd_scoped_permission *perm;
+
+	region_p = vsoc_region_from_filep(filp);
+	off = region_p->region_begin_offset;
+	perm = &((struct vsoc_private_data *)filp->private_data)->
+		fd_scoped_permission_node->permission;
+	if (perm) {
+		off += perm->begin_offset;
+		length = perm->end_offset - perm->begin_offset;
+	} else if (region_p->managed_by == VSOC_REGION_WHOLE) {
+		/* No permission set and the regions is not owned by another,
+		 * default to full region access.
+		 */
+		length = vsoc_device_region_size(region_p);
+	} else {
+		/* return zero length, access is denied. */
+		length = 0;
+	}
+	if (area_offset)
+		*area_offset = off;
+	return length;
+}
+
+static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long len = vma->vm_end - vma->vm_start;
+	__u32 area_off;
+	phys_addr_t mem_off;
+	ssize_t area_len;
+	int retval = vsoc_validate_filep(filp);
+
+	if (retval)
+		return retval;
+	area_len = vsoc_get_area(filp, &area_off);
+	/* Add the requested offset */
+	area_off += (vma->vm_pgoff << PAGE_SHIFT);
+	area_len -= (vma->vm_pgoff << PAGE_SHIFT);
+	if (area_len < len)
+		return -EINVAL;
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	mem_off = shm_off_to_phys_addr(area_off);
+	if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT,
+			       len, vma->vm_page_prot))
+		return -EAGAIN;
+	return 0;
+}
+
+module_init(vsoc_init_module);
+module_exit(vsoc_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Hartman <ghartman@google.com>");
+MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device");
+MODULE_VERSION("1.0");
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index a574885..18c5312 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1284,6 +1284,8 @@
 		ack |= NISTC_INTA_ACK_AI_START;
 	if (a_status & NISTC_AI_STATUS1_STOP)
 		ack |= NISTC_INTA_ACK_AI_STOP;
+	if (a_status & NISTC_AI_STATUS1_OVER)
+		ack |= NISTC_INTA_ACK_AI_ERR;
 	if (ack)
 		ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG);
 }
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index eaeb3c5..cb95c3e 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -75,6 +75,8 @@
 
 	for (np = of_find_matching_node(NULL, its_device_id); np;
 	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
 		if (!of_property_read_bool(np, "msi-controller"))
 			continue;
 
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index f6fc4dd..722c33f 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -253,7 +253,7 @@
 	struct sptlrpc_flavor    cl_flvr_mgc;   /* fixed flavor of mgc->mgs */
 
 	/* the grant values are protected by loi_list_lock below */
-	unsigned long		 cl_dirty_pages;	/* all _dirty_ in pahges */
+	unsigned long		 cl_dirty_pages;	/* all _dirty_ in pages */
 	unsigned long		 cl_dirty_max_pages;	/* allowed w/o rpc */
 	unsigned long		 cl_dirty_transit;	/* dirty synchronous */
 	unsigned long		 cl_avail_grant;	/* bytes of credit for ost */
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index cd19ce8..9e63171 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -2928,7 +2928,7 @@
 	if (lsm && !lmm) {
 		int i;
 
-		for (i = 1; i < lsm->lsm_md_stripe_count; i++) {
+		for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
 			/*
 			 * For migrating inode, the master stripe and master
 			 * object will be the same, so do not need iput, see
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 4bbe219..1a8c9f5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1542,7 +1542,7 @@
 	if (rc < 0)
 		return 0;
 
-	if (cli->cl_dirty_pages <= cli->cl_dirty_max_pages &&
+	if (cli->cl_dirty_pages < cli->cl_dirty_max_pages &&
 	    atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
 		osc_consume_write_grant(cli, &oap->oap_brw_page);
 		if (transient) {
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 457eeb5..5fe9593 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1705,6 +1705,8 @@
 
 		priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
 		priv->oldaddr = kmalloc(16, GFP_KERNEL);
+		if (!priv->oldaddr)
+			return -ENOMEM;
 		oldaddr = priv->oldaddr;
 		align = ((long)oldaddr) & 3;
 		if (align) {
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 170de1c..f815f9d 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -169,7 +169,7 @@
 				     hw->ident_sta_fw.variant) >
 	    HFA384x_FIRMWARE_VERSION(1, 5, 0)) {
 		if (msg->scantype.data != P80211ENUM_scantype_active)
-			word = cpu_to_le16(msg->maxchanneltime.data);
+			word = msg->maxchanneltime.data;
 		else
 			word = 0;
 
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 1259654..ae24a68 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -58,4 +58,4 @@
 obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
 obj-$(CONFIG_GENERIC_ADC_THERMAL)	+= thermal-generic-adc.o
 obj-$(CONFIG_THERMAL_QPNP_ADC_TM)	+= qpnp-adc-tm.o
-obj-$(CONFIG_THERMAL_TSENS)	+= msm-tsens.o tsens2xxx.o tsens-dbg.o tsens-mtc.o tsens1xxx.o
+obj-$(CONFIG_THERMAL_TSENS)	+= msm-tsens.o tsens2xxx.o tsens-dbg.o tsens-mtc.o tsens1xxx.o tsens_calib.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 2c4a63a..02f93f4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -224,7 +224,7 @@
 static int cpufreq_cooling_pm_notify(struct notifier_block *nb,
 				unsigned long mode, void *_unused)
 {
-	struct cpufreq_cooling_device *cpufreq_dev;
+	struct cpufreq_cooling_device *cpufreq_dev, *next;
 	unsigned int cpu;
 
 	switch (mode) {
@@ -236,8 +236,8 @@
 	case PM_POST_HIBERNATION:
 	case PM_POST_RESTORE:
 	case PM_POST_SUSPEND:
-		mutex_lock(&cooling_list_lock);
-		list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+		list_for_each_entry_safe(cpufreq_dev, next, &cpufreq_dev_list,
+						node) {
 			mutex_lock(&core_isolate_lock);
 			if (cpufreq_dev->cpufreq_state ==
 				cpufreq_dev->max_level) {
@@ -259,7 +259,6 @@
 			}
 			mutex_unlock(&core_isolate_lock);
 		}
-		mutex_unlock(&cooling_list_lock);
 
 		atomic_set(&in_suspend, 0);
 		break;
diff --git a/drivers/thermal/gov_low_limits.c b/drivers/thermal/gov_low_limits.c
index 278869c..d02ea26 100644
--- a/drivers/thermal/gov_low_limits.c
+++ b/drivers/thermal/gov_low_limits.c
@@ -62,19 +62,30 @@
 		dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
 					old_target, (int)instance->target);
 
-		if (old_target == instance->target)
+		if (instance->initialized && old_target == instance->target)
 			continue;
 
-		if (old_target == THERMAL_NO_TARGET &&
+		if (!instance->initialized) {
+			if (instance->target != THERMAL_NO_TARGET) {
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							true);
+				tz->passive += 1;
+			}
+		} else {
+			if (old_target == THERMAL_NO_TARGET &&
 				instance->target != THERMAL_NO_TARGET) {
-			trace_thermal_zone_trip(tz, trip, trip_type, true);
-			tz->passive += 1;
-		} else if (old_target != THERMAL_NO_TARGET &&
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							true);
+				tz->passive += 1;
+			} else if (old_target != THERMAL_NO_TARGET &&
 				instance->target == THERMAL_NO_TARGET) {
-			trace_thermal_zone_trip(tz, trip, trip_type, false);
-			tz->passive -= 1;
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							false);
+				tz->passive -= 1;
+			}
 		}
 
+		instance->initialized = true;
 		instance->cdev->updated = false; /* cdev needs update */
 	}
 
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 06912f0..b7cb49a 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -587,6 +587,9 @@
 	regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
 	regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
 
+	data->irq_enabled = true;
+	data->mode = THERMAL_DEVICE_ENABLED;
+
 	ret = devm_request_threaded_irq(&pdev->dev, data->irq,
 			imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
 			0, "imx_thermal", data);
@@ -598,9 +601,6 @@
 		return ret;
 	}
 
-	data->irq_enabled = true;
-	data->mode = THERMAL_DEVICE_ENABLED;
-
 	return 0;
 }
 
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index c137d3d..a4c89e0 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -90,6 +90,9 @@
 	{	.compatible = "qcom,msm8937-tsens",
 		.data = &data_tsens14xx,
 	},
+	{	.compatible = "qcom,msm8909-tsens",
+		.data = &data_tsens1xxx_8909,
+	},
 	{}
 };
 MODULE_DEVICE_TABLE(of, tsens_table);
@@ -217,6 +220,10 @@
 
 static int tsens_tm_remove(struct platform_device *pdev)
 {
+	struct tsens_device *tmdev = platform_get_drvdata(pdev);
+
+	if (tmdev)
+		list_del(&tmdev->list);
 	platform_set_drvdata(pdev, NULL);
 
 	return 0;
@@ -275,6 +282,7 @@
 		.name = "msm-tsens",
 		.owner = THIS_MODULE,
 		.of_match_table = tsens_table,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index b4d3116..3055f9a 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -523,6 +523,7 @@
 	struct thermal_instance *instance;
 	struct power_allocator_params *params = tz->governor_data;
 
+	mutex_lock(&tz->lock);
 	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
 		if ((instance->trip != params->trip_max_desired_temperature) ||
 		    (!cdev_is_power_actor(instance->cdev)))
@@ -534,6 +535,7 @@
 		mutex_unlock(&instance->cdev->lock);
 		thermal_cdev_update(instance->cdev);
 	}
+	mutex_unlock(&tz->lock);
 }
 
 /**
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index ad1186d..a45810b 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -185,6 +185,7 @@
  * @regulator: pointer to the TMU regulator structure.
  * @reg_conf: pointer to structure to register with core thermal.
  * @ntrip: number of supported trip points.
+ * @enabled: current status of TMU device
  * @tmu_initialize: SoC specific TMU initialization method
  * @tmu_control: SoC specific TMU control method
  * @tmu_read: SoC specific TMU temperature read method
@@ -205,6 +206,7 @@
 	struct regulator *regulator;
 	struct thermal_zone_device *tzd;
 	unsigned int ntrip;
+	bool enabled;
 
 	int (*tmu_initialize)(struct platform_device *pdev);
 	void (*tmu_control)(struct platform_device *pdev, bool on);
@@ -398,6 +400,7 @@
 	mutex_lock(&data->lock);
 	clk_enable(data->clk);
 	data->tmu_control(pdev, on);
+	data->enabled = on;
 	clk_disable(data->clk);
 	mutex_unlock(&data->lock);
 }
@@ -889,19 +892,24 @@
 static int exynos_get_temp(void *p, int *temp)
 {
 	struct exynos_tmu_data *data = p;
+	int value, ret = 0;
 
-	if (!data || !data->tmu_read)
+	if (!data || !data->tmu_read || !data->enabled)
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
 	clk_enable(data->clk);
 
-	*temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
+	value = data->tmu_read(data);
+	if (value < 0)
+		ret = value;
+	else
+		*temp = code_to_temp(data, value) * MCELSIUS;
 
 	clk_disable(data->clk);
 	mutex_unlock(&data->lock);
 
-	return 0;
+	return ret;
 }
 
 #ifdef CONFIG_THERMAL_EMULATION
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 4bbb47a..4d75f6b 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -190,16 +190,26 @@
 		if (instance->initialized && old_target == instance->target)
 			continue;
 
-		/* Activate a passive thermal instance */
-		if (old_target == THERMAL_NO_TARGET &&
-			instance->target != THERMAL_NO_TARGET) {
-			update_passive_instance(tz, trip_type, 1);
-			trace_thermal_zone_trip(tz, trip, trip_type, true);
-		/* Deactivate a passive thermal instance */
-		} else if (old_target != THERMAL_NO_TARGET &&
-			instance->target == THERMAL_NO_TARGET) {
-			update_passive_instance(tz, trip_type, -1);
-			trace_thermal_zone_trip(tz, trip, trip_type, false);
+		if (!instance->initialized) {
+			if (instance->target != THERMAL_NO_TARGET) {
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							true);
+				update_passive_instance(tz, trip_type, 1);
+			}
+		} else {
+			/* Activate a passive thermal instance */
+			if (old_target == THERMAL_NO_TARGET &&
+				instance->target != THERMAL_NO_TARGET) {
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							true);
+				update_passive_instance(tz, trip_type, 1);
+			/* Deactivate a passive thermal instance */
+			} else if (old_target != THERMAL_NO_TARGET &&
+				instance->target == THERMAL_NO_TARGET) {
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							false);
+				update_passive_instance(tz, trip_type, -1);
+			}
 		}
 
 		instance->initialized = true;
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index 730d124..c8e6233 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -23,7 +23,8 @@
 
 #define DEBUG_SIZE				10
 #define TSENS_MAX_SENSORS			16
-#define TSENS_1x_MAX_SENSORS			11
+#define TSENS_NUM_SENSORS_8937			11
+#define TSENS_NUM_SENSORS_8909			5
 #define TSENS_CONTROLLER_ID(n)			(n)
 #define TSENS_CTRL_ADDR(n)			(n)
 #define TSENS_TM_SN_STATUS(n)			((n) + 0xa0)
@@ -32,6 +33,9 @@
 #define ONE_PT_CALIB2		0x2
 #define TWO_PT_CALIB		0x3
 
+#define SLOPE_FACTOR		1000
+#define SLOPE_DEFAULT		3200
+
 enum tsens_dbg_type {
 	TSENS_DBG_POLL,
 	TSENS_DBG_LOG_TEMP_READS,
@@ -143,6 +147,7 @@
 	struct device			*dev;
 	struct platform_device		*pdev;
 	struct list_head		list;
+	bool				prev_reading_avail;
 	struct regmap			*map;
 	struct regmap_field		*status_field;
 	void __iomem			*tsens_srot_addr;
@@ -153,12 +158,15 @@
 	spinlock_t			tsens_crit_lock;
 	spinlock_t			tsens_upp_low_lock;
 	const struct tsens_data		*ctrl_data;
+	struct tsens_mtc_sysfs  mtcsys;
 	struct tsens_sensor		sensor[0];
-	struct tsens_mtc_sysfs	mtcsys;
 };
 
 extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx;
-extern const struct tsens_data data_tsens14xx;
+extern const struct tsens_data data_tsens14xx, data_tsens1xxx_8909;
 extern struct list_head tsens_device_list;
 
+extern int calibrate_8937(struct tsens_device *tmdev);
+extern int calibrate_8909(struct tsens_device *tmdev);
+
 #endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/tsens1xxx.c b/drivers/thermal/tsens1xxx.c
index d63fcd1..d698aed 100644
--- a/drivers/thermal/tsens1xxx.c
+++ b/drivers/thermal/tsens1xxx.c
@@ -33,6 +33,7 @@
 #define TSENS_UPPER_THRESHOLD_SHIFT	10
 
 #define TSENS_S0_STATUS_ADDR(n)		((n) + 0x30)
+#define TSENS_S0_TRDY_ADDR(n)		((n) + 0x5c)
 #define TSENS_SN_ADDR_OFFSET		0x4
 #define TSENS_SN_STATUS_TEMP_MASK	0x3ff
 #define TSENS_SN_STATUS_LOWER_STATUS	BIT(11)
@@ -55,102 +56,6 @@
 #define TSENS_THRESHOLD_MIN_CODE	0x0
 #define TSENS_SCALE_MILLIDEG		1000
 
-/* eeprom layout data for 8937 */
-#define BASE0_MASK	0x000000ff
-#define BASE1_MASK	0xff000000
-#define BASE1_SHIFT	24
-
-#define S0_P1_MASK		0x000001f8
-#define S1_P1_MASK		0x001f8000
-#define S2_P1_MASK_0_4		0xf8000000
-#define S2_P1_MASK_5		0x00000001
-#define S3_P1_MASK		0x00001f80
-#define S4_P1_MASK		0x01f80000
-#define S5_P1_MASK		0x00003f00
-#define S6_P1_MASK		0x03f00000
-#define S7_P1_MASK		0x0000003f
-#define S8_P1_MASK		0x0003f000
-#define S9_P1_MASK		0x0000003f
-#define S10_P1_MASK		0x0003f000
-
-#define S0_P2_MASK		0x00007e00
-#define S1_P2_MASK		0x07e00000
-#define S2_P2_MASK		0x0000007e
-#define S3_P2_MASK		0x0007e000
-#define S4_P2_MASK		0x7e000000
-#define S5_P2_MASK		0x000fc000
-#define S6_P2_MASK		0xfc000000
-#define S7_P2_MASK		0x00000fc0
-#define S8_P2_MASK		0x00fc0000
-#define S9_P2_MASK		0x00000fc0
-#define S10_P2_MASK		0x00fc0000
-
-#define S0_P1_SHIFT     3
-#define S1_P1_SHIFT     15
-#define S2_P1_SHIFT_0_4 27
-#define S2_P1_SHIFT_5   5
-#define S3_P1_SHIFT     7
-#define S4_P1_SHIFT     19
-#define S5_P1_SHIFT     8
-#define S6_P1_SHIFT     20
-#define S8_P1_SHIFT     12
-#define S10_P1_SHIFT    12
-
-#define S0_P2_SHIFT     9
-#define S1_P2_SHIFT     21
-#define S2_P2_SHIFT     1
-#define S3_P2_SHIFT     13
-#define S4_P2_SHIFT     25
-#define S5_P2_SHIFT     14
-#define S6_P2_SHIFT     26
-#define S7_P2_SHIFT     6
-#define S8_P2_SHIFT     18
-#define S9_P2_SHIFT     6
-#define S10_P2_SHIFT    18
-
-#define CAL_SEL_MASK	0x00000007
-
-#define CAL_DEGC_PT1		30
-#define CAL_DEGC_PT2		120
-#define SLOPE_FACTOR		1000
-#define SLOPE_DEFAULT		3200
-
-/*
- * Use this function on devices where slope and offset calculations
- * depend on calibration data read from qfprom. On others the slope
- * and offset values are derived from tz->tzp->slope and tz->tzp->offset
- * resp.
- */
-static void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
-			     u32 *p2, u32 mode)
-{
-	int i;
-	int num, den;
-
-	for (i = 0; i < TSENS_1x_MAX_SENSORS; i++) {
-		pr_debug(
-			"sensor%d - data_point1:%#x data_point2:%#x\n",
-			i, p1[i], p2[i]);
-
-		tmdev->sensor[i].slope = SLOPE_DEFAULT;
-		if (mode == TWO_PT_CALIB) {
-			/*
-			 * slope (m) = adc_code2 - adc_code1 (y2 - y1)/
-			 *	temp_120_degc - temp_30_degc (x2 - x1)
-			 */
-			num = p2[i] - p1[i];
-			num *= SLOPE_FACTOR;
-			den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
-			tmdev->sensor[i].slope = num / den;
-		}
-
-		tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
-				(CAL_DEGC_PT1 *
-				tmdev->sensor[i].slope);
-		pr_debug("offset:%d\n", tmdev->sensor[i].offset);
-	}
-}
-
 static int code_to_degc(u32 adc_code, const struct tsens_sensor *sensor)
 {
 	int degc, num, den;
@@ -184,72 +89,6 @@
 	return code;
 }
 
-static int calibrate_8937(struct tsens_device *tmdev)
-{
-	int base0 = 0, base1 = 0, i;
-	u32 p1[TSENS_1x_MAX_SENSORS], p2[TSENS_1x_MAX_SENSORS];
-	int mode = 0, tmp = 0;
-	u32 qfprom_cdata[5] = {0, 0, 0, 0, 0};
-
-	qfprom_cdata[0] = readl_relaxed(tmdev->tsens_calib_addr + 0x1D8);
-	qfprom_cdata[1] = readl_relaxed(tmdev->tsens_calib_addr + 0x1DC);
-	qfprom_cdata[2] = readl_relaxed(tmdev->tsens_calib_addr + 0x210);
-	qfprom_cdata[3] = readl_relaxed(tmdev->tsens_calib_addr + 0x214);
-	qfprom_cdata[4] = readl_relaxed(tmdev->tsens_calib_addr + 0x230);
-
-	mode = (qfprom_cdata[2] & CAL_SEL_MASK);
-	pr_debug("calibration mode is %d\n", mode);
-
-	switch (mode) {
-	case TWO_PT_CALIB:
-		base1 = (qfprom_cdata[1] & BASE1_MASK) >> BASE1_SHIFT;
-		p2[0] = (qfprom_cdata[2] & S0_P2_MASK) >> S0_P2_SHIFT;
-		p2[1] = (qfprom_cdata[2] & S1_P2_MASK) >> S1_P2_SHIFT;
-		p2[2] = (qfprom_cdata[3] & S2_P2_MASK) >> S2_P2_SHIFT;
-		p2[3] = (qfprom_cdata[3] & S3_P2_MASK) >> S3_P2_SHIFT;
-		p2[4] = (qfprom_cdata[3] & S4_P2_MASK) >> S4_P2_SHIFT;
-		p2[5] = (qfprom_cdata[0] & S5_P2_MASK) >> S5_P2_SHIFT;
-		p2[6] = (qfprom_cdata[0] & S6_P2_MASK) >> S6_P2_SHIFT;
-		p2[7] = (qfprom_cdata[1] & S7_P2_MASK) >> S7_P2_SHIFT;
-		p2[8] = (qfprom_cdata[1] & S8_P2_MASK) >> S8_P2_SHIFT;
-		p2[9] = (qfprom_cdata[4] & S9_P2_MASK) >> S9_P2_SHIFT;
-		p2[10] = (qfprom_cdata[4] & S10_P2_MASK) >> S10_P2_SHIFT;
-
-		for (i = 0; i < TSENS_1x_MAX_SENSORS; i++)
-			p2[i] = ((base1 + p2[i]) << 2);
-		/* Fall through */
-	case ONE_PT_CALIB2:
-		base0 = (qfprom_cdata[0] & BASE0_MASK);
-		p1[0] = (qfprom_cdata[2] & S0_P1_MASK) >> S0_P1_SHIFT;
-		p1[1] = (qfprom_cdata[2] & S1_P1_MASK) >> S1_P1_SHIFT;
-		p1[2] = (qfprom_cdata[2] & S2_P1_MASK_0_4) >> S2_P1_SHIFT_0_4;
-		tmp = (qfprom_cdata[3] & S2_P1_MASK_5) << S2_P1_SHIFT_5;
-		p1[2] |= tmp;
-		p1[3] = (qfprom_cdata[3] & S3_P1_MASK) >> S3_P1_SHIFT;
-		p1[4] = (qfprom_cdata[3] & S4_P1_MASK) >> S4_P1_SHIFT;
-		p1[5] = (qfprom_cdata[0] & S5_P1_MASK) >> S5_P1_SHIFT;
-		p1[6] = (qfprom_cdata[0] & S6_P1_MASK) >> S6_P1_SHIFT;
-		p1[7] = (qfprom_cdata[1] & S7_P1_MASK);
-		p1[8] = (qfprom_cdata[1] & S8_P1_MASK) >> S8_P1_SHIFT;
-		p1[9] = (qfprom_cdata[4] & S9_P1_MASK);
-		p1[10] = (qfprom_cdata[4] & S10_P1_MASK) >> S10_P1_SHIFT;
-
-		for (i = 0; i < TSENS_1x_MAX_SENSORS; i++)
-			p1[i] = (((base0) + p1[i]) << 2);
-		break;
-	default:
-		for (i = 0; i < TSENS_1x_MAX_SENSORS; i++) {
-			p1[i] = 500;
-			p2[i] = 780;
-		}
-		break;
-	}
-
-	compute_intercept_slope(tmdev, p1, p2, mode);
-
-	return 0;
-}
-
 static int tsens1xxx_get_temp(struct tsens_sensor *sensor, int *temp)
 {
 	struct tsens_device *tmdev = NULL;
@@ -265,8 +104,21 @@
 
 	tmdev = sensor->tmdev;
 
-	trdy_addr = TSENS_TRDY_ADDR(tmdev->tsens_tm_addr);
-	sensor_addr = TSENS_SN_STATUS_ADDR(tmdev->tsens_tm_addr);
+	if ((tmdev->ctrl_data->ver_major == 1) &&
+			(tmdev->ctrl_data->ver_minor == 1)) {
+		trdy_addr = TSENS_S0_TRDY_ADDR(tmdev->tsens_tm_addr);
+		sensor_addr = TSENS_S0_STATUS_ADDR(tmdev->tsens_tm_addr);
+
+		if (!(tmdev->prev_reading_avail)) {
+			while (!((readl_relaxed(trdy_addr)) & TSENS_TRDY_MASK))
+				usleep_range(TSENS_TRDY_RDY_MIN_TIME,
+						TSENS_TRDY_RDY_MAX_TIME);
+			tmdev->prev_reading_avail = true;
+		}
+	} else {
+		trdy_addr = TSENS_TRDY_ADDR(tmdev->tsens_tm_addr);
+		sensor_addr = TSENS_SN_STATUS_ADDR(tmdev->tsens_tm_addr);
+	}
 
 	code = readl_relaxed(sensor_addr +
 			(sensor->hw_id << TSENS_STATUS_ADDR_OFFSET));
@@ -486,11 +338,17 @@
 	void __iomem *sensor_status_ctrl_addr;
 	u32 rc = 0, addr_offset;
 
-	sensor_status_addr = TSENS_SN_STATUS_ADDR(tm->tsens_tm_addr);
+
+	if ((tm->ctrl_data->ver_major == 1) &&
+			(tm->ctrl_data->ver_minor == 1))
+		sensor_status_addr = TSENS_S0_STATUS_ADDR(tm->tsens_tm_addr);
+	else
+		sensor_status_addr = TSENS_SN_STATUS_ADDR(tm->tsens_tm_addr);
+
 	sensor_status_ctrl_addr =
 		TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(tm->tsens_tm_addr);
 
-	for (i = 0; i < TSENS_1x_MAX_SENSORS; i++) {
+	for (i = 0; i < tm->ctrl_data->num_sensors; i++) {
 		bool upper_thr = false, lower_thr = false;
 
 		if (IS_ERR(tm->sensor[i].tzd))
@@ -517,8 +375,8 @@
 			th_temp = code_to_degc((threshold &
 					TSENS_UPPER_THRESHOLD_MASK) >>
 					TSENS_UPPER_THRESHOLD_SHIFT,
-					tm->sensor);
-			if (th_temp > temp) {
+					(tm->sensor + i));
+			if (th_temp > (temp/TSENS_SCALE_MILLIDEG)) {
 				pr_debug("Re-arm high threshold\n");
 				rc = tsens_tz_activate_trip_type(
 						&tm->sensor[i],
@@ -539,8 +397,8 @@
 					tm->tsens_tm_addr + addr_offset));
 			th_temp = code_to_degc((threshold &
 					TSENS_LOWER_THRESHOLD_MASK),
-					tm->sensor);
-			if (th_temp < temp) {
+					(tm->sensor + i));
+			if (th_temp < (temp/TSENS_SCALE_MILLIDEG)) {
 				pr_debug("Re-arm Low threshold\n");
 				rc = tsens_tz_activate_trip_type(
 						&tm->sensor[i],
@@ -581,7 +439,12 @@
 	void __iomem *srot_addr;
 	unsigned int srot_val, sensor_en;
 
-	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+	if ((tmdev->ctrl_data->ver_major == 1) &&
+			(tmdev->ctrl_data->ver_minor == 1))
+		srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr);
+	else
+		srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+
 	srot_val = readl_relaxed(srot_addr);
 	srot_val = TSENS_CTRL_SENSOR_EN_MASK(srot_val);
 
@@ -595,7 +458,12 @@
 	void __iomem *srot_addr;
 	unsigned int srot_val;
 
-	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+	if ((tmdev->ctrl_data->ver_major == 1) &&
+			(tmdev->ctrl_data->ver_minor == 1))
+		srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr);
+	else
+		srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+
 	srot_val = readl_relaxed(srot_addr);
 	if (!(srot_val & TSENS_EN)) {
 		pr_err("TSENS device is not enabled\n");
@@ -665,9 +533,27 @@
 };
 
 const struct tsens_data data_tsens14xx = {
+	.num_sensors = TSENS_NUM_SENSORS_8937,
 	.ops = &ops_tsens1xxx,
 	.valid_status_check = true,
 	.mtc = true,
 	.ver_major = 1,
 	.ver_minor = 4,
 };
+
+static const struct tsens_ops ops_tsens1xxx_8909 = {
+	.hw_init		= tsens1xxx_hw_init,
+	.get_temp		= tsens1xxx_get_temp,
+	.set_trips		= tsens1xxx_set_trip_temp,
+	.interrupts_reg	= tsens1xxx_register_interrupts,
+	.sensor_en		= tsens1xxx_hw_sensor_en,
+	.calibrate		= calibrate_8909,
+	.dbg			= tsens2xxx_dbg,
+};
+
+const struct tsens_data data_tsens1xxx_8909 = {
+	.num_sensors = TSENS_NUM_SENSORS_8909,
+	.ops = &ops_tsens1xxx_8909,
+	.ver_major = 1,
+	.ver_minor = 1,
+};
diff --git a/drivers/thermal/tsens_calib.c b/drivers/thermal/tsens_calib.c
new file mode 100644
index 0000000..04dd5c5
--- /dev/null
+++ b/drivers/thermal/tsens_calib.c
@@ -0,0 +1,285 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include "tsens.h"
+
+/* eeprom layout data for 8937 */
+#define BASE0_MASK_8937				0x000000ff
+#define BASE1_MASK_8937				0xff000000
+#define BASE1_SHIFT_8937			24
+
+#define S0_P1_MASK_8937				0x000001f8
+#define S1_P1_MASK_8937				0x001f8000
+#define S2_P1_MASK_0_4_8937			0xf8000000
+#define S2_P1_MASK_5_8937			0x00000001
+#define S3_P1_MASK_8937				0x00001f80
+#define S4_P1_MASK_8937				0x01f80000
+#define S5_P1_MASK_8937				0x00003f00
+#define S6_P1_MASK_8937				0x03f00000
+#define S7_P1_MASK_8937				0x0000003f
+#define S8_P1_MASK_8937				0x0003f000
+#define S9_P1_MASK_8937				0x0000003f
+#define S10_P1_MASK_8937			0x0003f000
+
+#define S0_P2_MASK_8937				0x00007e00
+#define S1_P2_MASK_8937				0x07e00000
+#define S2_P2_MASK_8937				0x0000007e
+#define S3_P2_MASK_8937				0x0007e000
+#define S4_P2_MASK_8937				0x7e000000
+#define S5_P2_MASK_8937				0x000fc000
+#define S6_P2_MASK_8937				0xfc000000
+#define S7_P2_MASK_8937				0x00000fc0
+#define S8_P2_MASK_8937				0x00fc0000
+#define S9_P2_MASK_8937				0x00000fc0
+#define S10_P2_MASK_8937			0x00fc0000
+
+#define S0_P1_SHIFT_8937			3
+#define S1_P1_SHIFT_8937			15
+#define S2_P1_SHIFT_0_4_8937			27
+#define S2_P1_SHIFT_5_8937			5
+#define S3_P1_SHIFT_8937			7
+#define S4_P1_SHIFT_8937			19
+#define S5_P1_SHIFT_8937			8
+#define S6_P1_SHIFT_8937			20
+#define S8_P1_SHIFT_8937			12
+#define S10_P1_SHIFT_8937			12
+
+#define S0_P2_SHIFT_8937			9
+#define S1_P2_SHIFT_8937			21
+#define S2_P2_SHIFT_8937			1
+#define S3_P2_SHIFT_8937			13
+#define S4_P2_SHIFT_8937			25
+#define S5_P2_SHIFT_8937			14
+#define S6_P2_SHIFT_8937			26
+#define S7_P2_SHIFT_8937			6
+#define S8_P2_SHIFT_8937			18
+#define S9_P2_SHIFT_8937			6
+#define S10_P2_SHIFT_8937			18
+
+#define CAL_SEL_MASK_8937			0x00000007
+
+/* eeprom layout for 8909 */
+#define TSENS_EEPROM(n)				((n) + 0xa0)
+#define BASE0_MASK_8909				0x000000ff
+#define BASE1_MASK_8909				0x0000ff00
+
+#define S0_P1_MASK_8909				0x0000003f
+#define S1_P1_MASK_8909				0x0003f000
+#define S2_P1_MASK_8909				0x3f000000
+#define S3_P1_MASK_8909				0x000003f0
+#define S4_P1_MASK_8909				0x003f0000
+
+#define S0_P2_MASK_8909				0x00000fc0
+#define S1_P2_MASK_8909				0x00fc0000
+#define S2_P2_MASK_0_1_8909				0xc0000000
+#define S2_P2_MASK_2_5_8909				0x0000000f
+#define S3_P2_MASK_8909				0x0000fc00
+#define S4_P2_MASK_8909				0x0fc00000
+
+#define TSENS_CAL_SEL_8909				0x00070000
+#define CAL_SEL_SHIFT_8909				16
+#define BASE1_SHIFT_8909				8
+
+#define S1_P1_SHIFT_8909				12
+#define S2_P1_SHIFT_8909				24
+#define S3_P1_SHIFT_8909				4
+#define S4_P1_SHIFT_8909				16
+
+#define S0_P2_SHIFT_8909				6
+#define S1_P2_SHIFT_8909				18
+#define S2_P2_SHIFT_0_1_8909				30
+#define S2_P2_SHIFT_2_5_8909				2
+#define S3_P2_SHIFT_8909				10
+#define S4_P2_SHIFT_8909				22
+
+#define CAL_DEGC_PT1				30
+#define CAL_DEGC_PT2				120
+/*
+ * Use this function on devices where slope and offset calculations
+ * depend on calibration data read from qfprom. On others the slope
+ * and offset values are derived from tz->tzp->slope and tz->tzp->offset
+ * resp.
+ */
+static void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
+	u32 *p2, u32 mode)
+{
+	int i;
+	int num, den;
+
+	for (i = 0; i < tmdev->ctrl_data->num_sensors; i++) {
+		pr_debug(
+			"sensor%d - data_point1:%#x data_point2:%#x\n",
+			i, p1[i], p2[i]);
+
+		tmdev->sensor[i].slope = SLOPE_DEFAULT;
+		if (mode == TWO_PT_CALIB) {
+			/*
+			 * slope (m) = adc_code2 - adc_code1 (y2 - y1)/
+			 *	temp_120_degc - temp_30_degc (x2 - x1)
+			 */
+			num = p2[i] - p1[i];
+			num *= SLOPE_FACTOR;
+			den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
+			tmdev->sensor[i].slope = num / den;
+		}
+
+		tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
+			(CAL_DEGC_PT1 *
+			tmdev->sensor[i].slope);
+		pr_debug("offset:%d\n", tmdev->sensor[i].offset);
+	}
+}
+
+int calibrate_8937(struct tsens_device *tmdev)
+{
+	int base0 = 0, base1 = 0, i;
+	u32 p1[TSENS_NUM_SENSORS_8937], p2[TSENS_NUM_SENSORS_8937];
+	int mode = 0, tmp = 0;
+	u32 qfprom_cdata[5] = {0, 0, 0, 0, 0};
+
+	qfprom_cdata[0] = readl_relaxed(tmdev->tsens_calib_addr + 0x1D8);
+	qfprom_cdata[1] = readl_relaxed(tmdev->tsens_calib_addr + 0x1DC);
+	qfprom_cdata[2] = readl_relaxed(tmdev->tsens_calib_addr + 0x210);
+	qfprom_cdata[3] = readl_relaxed(tmdev->tsens_calib_addr + 0x214);
+	qfprom_cdata[4] = readl_relaxed(tmdev->tsens_calib_addr + 0x230);
+
+	mode = (qfprom_cdata[2] & CAL_SEL_MASK_8937);
+	pr_debug("calibration mode is %d\n", mode);
+
+	switch (mode) {
+	case TWO_PT_CALIB:
+		base1 = (qfprom_cdata[1] &
+				BASE1_MASK_8937) >> BASE1_SHIFT_8937;
+		p2[0] = (qfprom_cdata[2] &
+				S0_P2_MASK_8937) >> S0_P2_SHIFT_8937;
+		p2[1] = (qfprom_cdata[2] &
+				S1_P2_MASK_8937) >> S1_P2_SHIFT_8937;
+		p2[2] = (qfprom_cdata[3] &
+				S2_P2_MASK_8937) >> S2_P2_SHIFT_8937;
+		p2[3] = (qfprom_cdata[3] &
+				S3_P2_MASK_8937) >> S3_P2_SHIFT_8937;
+		p2[4] = (qfprom_cdata[3] &
+				S4_P2_MASK_8937) >> S4_P2_SHIFT_8937;
+		p2[5] = (qfprom_cdata[0] &
+				S5_P2_MASK_8937) >> S5_P2_SHIFT_8937;
+		p2[6] = (qfprom_cdata[0] &
+				S6_P2_MASK_8937) >> S6_P2_SHIFT_8937;
+		p2[7] = (qfprom_cdata[1] &
+				S7_P2_MASK_8937) >> S7_P2_SHIFT_8937;
+		p2[8] = (qfprom_cdata[1] &
+				S8_P2_MASK_8937) >> S8_P2_SHIFT_8937;
+		p2[9] = (qfprom_cdata[4] &
+				S9_P2_MASK_8937) >> S9_P2_SHIFT_8937;
+		p2[10] = (qfprom_cdata[4] &
+				S10_P2_MASK_8937) >> S10_P2_SHIFT_8937;
+
+		for (i = 0; i < TSENS_NUM_SENSORS_8937; i++)
+			p2[i] = ((base1 + p2[i]) << 2);
+		/* Fall through */
+	case ONE_PT_CALIB2:
+		base0 = (qfprom_cdata[0] & BASE0_MASK_8937);
+		p1[0] = (qfprom_cdata[2] &
+				S0_P1_MASK_8937) >> S0_P1_SHIFT_8937;
+		p1[1] = (qfprom_cdata[2] &
+				S1_P1_MASK_8937) >> S1_P1_SHIFT_8937;
+		p1[2] = (qfprom_cdata[2] &
+				S2_P1_MASK_0_4_8937) >> S2_P1_SHIFT_0_4_8937;
+		tmp = (qfprom_cdata[3] &
+				S2_P1_MASK_5_8937) << S2_P1_SHIFT_5_8937;
+		p1[2] |= tmp;
+		p1[3] = (qfprom_cdata[3] &
+				S3_P1_MASK_8937) >> S3_P1_SHIFT_8937;
+		p1[4] = (qfprom_cdata[3] &
+				S4_P1_MASK_8937) >> S4_P1_SHIFT_8937;
+		p1[5] = (qfprom_cdata[0] &
+				S5_P1_MASK_8937) >> S5_P1_SHIFT_8937;
+		p1[6] = (qfprom_cdata[0] &
+				S6_P1_MASK_8937) >> S6_P1_SHIFT_8937;
+		p1[7] = (qfprom_cdata[1] & S7_P1_MASK_8937);
+		p1[8] = (qfprom_cdata[1] &
+				S8_P1_MASK_8937) >> S8_P1_SHIFT_8937;
+		p1[9] = (qfprom_cdata[4] & S9_P1_MASK_8937);
+		p1[10] = (qfprom_cdata[4] &
+				S10_P1_MASK_8937) >> S10_P1_SHIFT_8937;
+
+		for (i = 0; i < TSENS_NUM_SENSORS_8937; i++)
+			p1[i] = (((base0) + p1[i]) << 2);
+		break;
+	default:
+		for (i = 0; i < TSENS_NUM_SENSORS_8937; i++) {
+			p1[i] = 500;
+			p2[i] = 780;
+		}
+		break;
+	}
+
+	compute_intercept_slope(tmdev, p1, p2, mode);
+
+	return 0;
+}
+
+int calibrate_8909(struct tsens_device *tmdev)
+{
+	int i, base0 = 0, base1 = 0;
+	u32 p1[TSENS_NUM_SENSORS_8909], p2[TSENS_NUM_SENSORS_8909];
+	int mode = 0, temp = 0;
+	uint32_t calib_data[3] = {0, 0, 0};
+
+	calib_data[0] = readl_relaxed(
+		TSENS_EEPROM(tmdev->tsens_calib_addr));
+	calib_data[1] = readl_relaxed(
+		(TSENS_EEPROM(tmdev->tsens_calib_addr) + 0x4));
+	calib_data[2] = readl_relaxed(
+		(TSENS_EEPROM(tmdev->tsens_calib_addr) + 0x3c));
+	mode = (calib_data[2] & TSENS_CAL_SEL_8909) >> CAL_SEL_SHIFT_8909;
+
+	pr_debug("calib mode is %d\n", mode);
+
+	switch (mode) {
+	case TWO_PT_CALIB:
+		base1 = (calib_data[2] & BASE1_MASK_8909) >> BASE1_SHIFT_8909;
+		p2[0] = (calib_data[0] & S0_P2_MASK_8909) >> S0_P2_SHIFT_8909;
+		p2[1] = (calib_data[0] & S1_P2_MASK_8909) >> S1_P2_SHIFT_8909;
+		p2[2] = (calib_data[0] &
+				S2_P2_MASK_0_1_8909) >> S2_P2_SHIFT_0_1_8909;
+		temp  = (calib_data[1] &
+				S2_P2_MASK_2_5_8909) << S2_P2_SHIFT_2_5_8909;
+		p2[2] |= temp;
+		p2[3] = (calib_data[1] & S3_P2_MASK_8909) >> S3_P2_SHIFT_8909;
+		p2[4] = (calib_data[1] & S4_P2_MASK_8909) >> S4_P2_SHIFT_8909;
+
+		for (i = 0; i < TSENS_NUM_SENSORS_8909; i++)
+			p2[i] = ((base1 + p2[i]) << 2);
+	/* Fall through */
+	case ONE_PT_CALIB2:
+		base0 = (calib_data[2] & BASE0_MASK_8909);
+		p1[0] = (calib_data[0] & S0_P1_MASK_8909);
+		p1[1] = (calib_data[0] & S1_P1_MASK_8909) >> S1_P1_SHIFT_8909;
+		p1[2] = (calib_data[0] & S2_P1_MASK_8909) >> S2_P1_SHIFT_8909;
+		p1[3] = (calib_data[1] & S3_P1_MASK_8909) >> S3_P1_SHIFT_8909;
+		p1[4] = (calib_data[1] & S4_P1_MASK_8909) >> S4_P1_SHIFT_8909;
+		for (i = 0; i < TSENS_NUM_SENSORS_8909; i++)
+			p1[i] = (((base0) + p1[i]) << 2);
+		break;
+	default:
+		for (i = 0; i < TSENS_NUM_SENSORS_8909; i++) {
+			p1[i] = 500;
+			p2[i] = 780;
+		}
+		break;
+	}
+
+	compute_intercept_slope(tmdev, p1, p2, mode);
+	return 0;
+}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index a8c2041..cba6bc6 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -628,6 +628,7 @@
 					    * we just disable hotplug, the
 					    * pci-tunnels stay alive.
 					    */
+	.thaw_noirq = nhi_resume_noirq,
 	.restore_noirq = nhi_resume_noirq,
 };
 
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 54cab59..9e9016e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -137,6 +137,9 @@
 	struct mutex mutex;
 
 	/* Link layer */
+	int mode;
+#define DLCI_MODE_ABM		0	/* Normal Asynchronous Balanced Mode */
+#define DLCI_MODE_ADM		1	/* Asynchronous Disconnected Mode */
 	spinlock_t lock;	/* Protects the internal state */
 	struct timer_list t1;	/* Retransmit timer for SABM and UA */
 	int retries;
@@ -1380,7 +1383,13 @@
 	ctrl->data = data;
 	ctrl->len = clen;
 	gsm->pending_cmd = ctrl;
-	gsm->cretries = gsm->n2;
+
+	/* If DLCI0 is in ADM mode skip retries, it won't respond */
+	if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
+		gsm->cretries = 1;
+	else
+		gsm->cretries = gsm->n2;
+
 	mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
 	gsm_control_transmit(gsm, ctrl);
 	spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1467,6 +1476,10 @@
  *	in which case an opening port goes back to closed and a closing port
  *	is simply put into closed state (any further frames from the other
  *	end will get a DM response)
+ *
+ *	Some control dlci can stay in ADM mode with other dlci working just
+ *	fine. In that case we can just keep the control dlci open after the
+ *	DLCI_OPENING retries time out.
  */
 
 static void gsm_dlci_t1(unsigned long data)
@@ -1480,8 +1493,16 @@
 		if (dlci->retries) {
 			gsm_command(dlci->gsm, dlci->addr, SABM|PF);
 			mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
-		} else
+		} else if (!dlci->addr && gsm->control == (DM | PF)) {
+			if (debug & 8)
+				pr_info("DLCI %d opening in ADM mode.\n",
+					dlci->addr);
+			dlci->mode = DLCI_MODE_ADM;
+			gsm_dlci_open(dlci);
+		} else {
 			gsm_dlci_close(dlci);
+		}
+
 		break;
 	case DLCI_CLOSING:
 		dlci->retries--;
@@ -1499,8 +1520,8 @@
  *	@dlci: DLCI to open
  *
  *	Commence opening a DLCI from the Linux side. We issue SABM messages
- *	to the modem which should then reply with a UA, at which point we
- *	will move into open state. Opening is done asynchronously with retry
+ *	to the modem which should then reply with a UA or ADM, at which point
+ *	we will move into open state. Opening is done asynchronously with retry
  *	running off timers and the responses.
  */
 
@@ -2854,11 +2875,22 @@
 static int gsm_carrier_raised(struct tty_port *port)
 {
 	struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+	struct gsm_mux *gsm = dlci->gsm;
+
 	/* Not yet open so no carrier info */
 	if (dlci->state != DLCI_OPEN)
 		return 0;
 	if (debug & 2)
 		return 1;
+
+	/*
+	 * Basic mode with control channel in ADM mode may not respond
+	 * to CMD_MSC at all and modem_rx is empty.
+	 */
+	if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
+	    !dlci->modem_rx)
+		return 1;
+
 	return dlci->modem_rx & TIOCM_CD;
 }
 
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index faf50df..1c70541 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2182,6 +2182,12 @@
 				}
 				if (tty_hung_up_p(file))
 					break;
+				/*
+				 * Abort readers for ttys which never actually
+				 * get hung up.  See __tty_hangup().
+				 */
+				if (test_bit(TTY_HUPPING, &tty->flags))
+					break;
 				if (!timeout)
 					break;
 				if (file->f_flags & O_NONBLOCK) {
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index da31159..e8b34f1 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -613,6 +613,10 @@
 	up->lsr_saved_flags = 0;
 	up->msr_saved_flags = 0;
 
+	/* Disable DMA for console UART */
+	if (uart_console(port))
+		up->dma = NULL;
+
 	if (up->dma) {
 		ret = serial8250_request_dma(up);
 		if (ret) {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index f6e4373..5d9038a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1815,7 +1815,8 @@
 
 	status = serial_port_in(port, UART_LSR);
 
-	if (status & (UART_LSR_DR | UART_LSR_BI)) {
+	if (status & (UART_LSR_DR | UART_LSR_BI) &&
+	    iir & UART_IIR_RDI) {
 		if (!up->dma || handle_rx_dma(up, iir))
 			status = serial8250_rx_chars(up, status);
 	}
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 5ac06fc..fec48de 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -596,6 +596,11 @@
 	if (dev_id < 0)
 		dev_id = 0;
 
+	if (dev_id >= ARRAY_SIZE(arc_uart_ports)) {
+		dev_err(&pdev->dev, "serial%d out of range\n", dev_id);
+		return -EINVAL;
+	}
+
 	uart = &arc_uart_ports[dev_id];
 	port = &uart->port;
 
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 3b31fd8..b9a4625 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -172,7 +172,7 @@
  */
 int __init setup_earlycon(char *buf)
 {
-	const struct earlycon_id *match;
+	const struct earlycon_id **p_match;
 
 	if (!buf || !buf[0])
 		return -EINVAL;
@@ -180,7 +180,9 @@
 	if (early_con.flags & CON_ENABLED)
 		return -EALREADY;
 
-	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+	for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+	     p_match++) {
+		const struct earlycon_id *match = *p_match;
 		size_t len = strlen(match->name);
 
 		if (strncmp(buf, match->name, len))
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 76103f2..937f5e1 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1902,6 +1902,10 @@
 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
 		return ret;
 	}
+	if (ret >= ARRAY_SIZE(lpuart_ports)) {
+		dev_err(&pdev->dev, "serial%d out of range\n", ret);
+		return -EINVAL;
+	}
 	sport->port.line = ret;
 	sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart");
 
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index f575a33..b24edf6 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2080,6 +2080,12 @@
 	else if (ret < 0)
 		return ret;
 
+	if (sport->port.line >= ARRAY_SIZE(imx_ports)) {
+		dev_err(&pdev->dev, "serial%d out of range\n",
+			sport->port.line);
+		return -EINVAL;
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(base))
@@ -2145,7 +2151,9 @@
 		 * and DCD (when they are outputs) or enables the respective
 		 * irqs. So set this bit early, i.e. before requesting irqs.
 		 */
-		writel(UFCR_DCEDTE, sport->port.membase + UFCR);
+		reg = readl(sport->port.membase + UFCR);
+		if (!(reg & UFCR_DCEDTE))
+			writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR);
 
 		/*
 		 * Disable UCR3_RI and UCR3_DCD irqs. They are also not
@@ -2156,7 +2164,15 @@
 		       sport->port.membase + UCR3);
 
 	} else {
-		writel(0, sport->port.membase + UFCR);
+		unsigned long ucr3 = UCR3_DSR;
+
+		reg = readl(sport->port.membase + UFCR);
+		if (reg & UFCR_DCEDTE)
+			writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR);
+
+		if (!is_imx1_uart(sport))
+			ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
+		writel(ucr3, sport->port.membase + UCR3);
 	}
 
 	clk_disable_unprepare(sport->clk_ipg);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index f76b3db..9025707 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -617,7 +617,6 @@
 static void msm_geni_serial_complete_rx_eot(struct uart_port *uport)
 {
 	int poll_done = 0, tries = 0;
-	u32 geni_status = 0;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
 	do {
@@ -626,11 +625,11 @@
 		tries++;
 	} while (!poll_done && tries < 5);
 
-	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
-
 	if (!poll_done)
-		IPC_LOG_MSG(port->ipc_log_misc, "%s: RX_EOT, GENI:0x%x\n",
-							__func__, geni_status);
+		IPC_LOG_MSG(port->ipc_log_misc,
+		"%s: RX_EOT, GENI:0x%x, DMA_DEBUG:0x%x\n", __func__,
+		geni_read_reg_nolog(uport->membase, SE_GENI_STATUS),
+		geni_read_reg_nolog(uport->membase, SE_DMA_DEBUG_REG0));
 	else
 		geni_write_reg_nolog(RX_EOT, uport->membase, SE_DMA_RX_IRQ_CLR);
 }
@@ -1131,7 +1130,9 @@
 	 * cancel control bit.
 	 */
 	mb();
-	msm_geni_serial_complete_rx_eot(uport);
+	if (!uart_console(uport))
+		msm_geni_serial_complete_rx_eot(uport);
+
 	done = msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
 					S_GENI_CMD_CANCEL, false);
 	if (done) {
@@ -1660,7 +1661,7 @@
 		msm_port->rx_buf = devm_kzalloc(uport->dev, DMA_RX_BUF_SIZE,
 								GFP_KERNEL);
 		if (!msm_port->rx_buf) {
-			kfree(msm_port->rx_fifo);
+			devm_kfree(uport->dev, msm_port->rx_fifo);
 			msm_port->rx_fifo = NULL;
 			ret = -ENOMEM;
 			goto exit_portsetup;
@@ -1856,6 +1857,7 @@
 	unsigned long ser_clk_cfg = 0;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 	unsigned long clk_rate;
+	unsigned long flags;
 
 	if (!uart_console(uport)) {
 		int ret = msm_geni_serial_power_on(uport);
@@ -1867,7 +1869,13 @@
 			return;
 		}
 	}
+	/* Take a spinlock else stop_rx causes a race with an ISR due to Cancel
+	 * and FSM_RESET. This also has a potential race with the dma_map/unmap
+	 * operations of ISR.
+	 */
+	spin_lock_irqsave(&uport->lock, flags);
 	msm_geni_serial_stop_rx(uport);
+	spin_unlock_irqrestore(&uport->lock, flags);
 	/* baud rate */
 	baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
 	port->cur_baud = baud;
@@ -2419,6 +2427,8 @@
 	if (ret)
 		goto exit_geni_serial_probe;
 
+	dev_port->serial_rsc.ctrl_dev = &pdev->dev;
+
 	if (of_property_read_u32(pdev->dev.of_node, "qcom,wakeup-byte",
 					&wake_char)) {
 		dev_dbg(&pdev->dev, "No Wakeup byte specified\n");
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index efaac5e..69fa4dc 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -395,10 +395,22 @@
 
 static inline void msm_wait_for_xmitr(struct uart_port *port)
 {
+	u32 count = 500000;
+
 	while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
 		if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
 			break;
 		udelay(1);
+
+		/* At worst case, it is stuck in this loop for waiting
+		 * TX ready, have a 500ms timeout to avoid stuck here
+		 * and only miss some log to uart.
+		 */
+		if (count-- == 0) {
+			msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+			printk_deferred("uart may lost data, resetting TX!\n");
+			break;
+		}
 	}
 	msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
 }
@@ -1858,10 +1870,35 @@
 
 	return 0;
 }
+
+static int msm_serial_freeze(struct device *dev)
+{
+	struct uart_port *port = dev_get_drvdata(dev);
+	struct msm_port *msm_port = UART_TO_MSM(port);
+	int ret;
+
+	ret = msm_serial_suspend(dev);
+	if (ret)
+		return ret;
+
+	/*
+	 * Set the rate as recommended to avoid issues where the clock
+	 * driver skips reconfiguring the clock hardware during
+	 * hibernation resume.
+	 */
+	return clk_set_rate(msm_port->clk, 19200000);
+}
 #endif
 
 static const struct dev_pm_ops msm_serial_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(msm_serial_suspend, msm_serial_resume)
+#ifdef CONFIG_PM_SLEEP
+	.suspend = msm_serial_suspend,
+	.resume = msm_serial_resume,
+	.freeze = msm_serial_freeze,
+	.thaw = msm_serial_resume,
+	.poweroff = msm_serial_suspend,
+	.restore = msm_serial_resume,
+#endif
 };
 
 static struct platform_driver msm_platform_driver = {
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 07390f8..1d9d778 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1664,6 +1664,10 @@
 		s->port.line = pdev->id < 0 ? 0 : pdev->id;
 	else if (ret < 0)
 		return ret;
+	if (s->port.line >= ARRAY_SIZE(auart_port)) {
+		dev_err(&pdev->dev, "serial%d out of range\n", s->port.line);
+		return -EINVAL;
+	}
 
 	if (of_id) {
 		pdev->id_entry = of_id->data;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index d65f92b..f2ab6d8a 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1813,6 +1813,10 @@
 
 	dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
 
+	if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
+		dev_err(&pdev->dev, "serial%d out of range\n", index);
+		return -EINVAL;
+	}
 	ourport = &s3c24xx_serial_ports[index];
 
 	ourport->drv_data = s3c24xx_get_driver_data(pdev);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index fcf803f..b9c7a90 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -884,14 +884,28 @@
 
 	clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(clk)) {
-		if (PTR_ERR(clk) == -EPROBE_DEFER) {
-			ret = -EPROBE_DEFER;
+		ret = PTR_ERR(clk);
+		if (ret == -EPROBE_DEFER)
 			goto err_out;
-		}
+		uartclk = 0;
+	} else {
+		ret = clk_prepare_enable(clk);
+		if (ret)
+			goto err_out;
+
+		ret = devm_add_action_or_reset(&pdev->dev,
+				(void(*)(void *))clk_disable_unprepare,
+				clk);
+		if (ret)
+			goto err_out;
+
+		uartclk = clk_get_rate(clk);
+	}
+
+	if (!uartclk) {
 		dev_notice(&pdev->dev, "Using default clock frequency\n");
 		uartclk = s->chip->freq_std;
-	} else
-		uartclk = clk_get_rate(clk);
+	}
 
 	/* Check input frequency */
 	if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) {
@@ -983,7 +997,7 @@
 	uart_unregister_driver(&s->uart);
 err_out:
 	if (!IS_ERR(s->regulator))
-		return regulator_disable(s->regulator);
+		regulator_disable(s->regulator);
 
 	return ret;
 }
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index a55c94d..107f0d1 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1545,7 +1545,16 @@
 	if (s->chan_rx)
 		sci_rx_dma_release(s, false);
 }
-#else
+
+static void sci_flush_buffer(struct uart_port *port)
+{
+	/*
+	 * In uart_flush_buffer(), the xmit circular buffer has just been
+	 * cleared, so we have to reset tx_dma_len accordingly.
+	 */
+	to_sci_port(port)->tx_dma_len = 0;
+}
+#else /* !CONFIG_SERIAL_SH_SCI_DMA */
 static inline void sci_request_dma(struct uart_port *port)
 {
 }
@@ -1553,7 +1562,9 @@
 static inline void sci_free_dma(struct uart_port *port)
 {
 }
-#endif
+
+#define sci_flush_buffer	NULL
+#endif /* !CONFIG_SERIAL_SH_SCI_DMA */
 
 static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
 {
@@ -2551,6 +2562,7 @@
 	.break_ctl	= sci_break_ctl,
 	.startup	= sci_startup,
 	.shutdown	= sci_shutdown,
+	.flush_buffer	= sci_flush_buffer,
 	.set_termios	= sci_set_termios,
 	.pm		= sci_pm,
 	.type		= sci_type,
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index dd4c02f..7497f1d 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1106,7 +1106,7 @@
 	struct uart_port *port;
 
 	/* Try the given port id if failed use default method */
-	if (cdns_uart_port[id].mapbase != 0) {
+	if (id < CDNS_UART_NR_PORTS && cdns_uart_port[id].mapbase != 0) {
 		/* Find the next unused port */
 		for (id = 0; id < CDNS_UART_NR_PORTS; id++)
 			if (cdns_uart_port[id].mapbase == 0)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index fb9bada..789c814 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -709,6 +709,14 @@
 		return;
 	}
 
+	/*
+	 * Some console devices aren't actually hung up for technical and
+	 * historical reasons, which can lead to indefinite interruptible
+	 * sleep in n_tty_read().  The following explicitly tells
+	 * n_tty_read() to abort readers.
+	 */
+	set_bit(TTY_HUPPING, &tty->flags);
+
 	/* inuse_filps is protected by the single tty lock,
 	   this really needs to change if we want to flush the
 	   workqueue with the lock held */
@@ -763,6 +771,7 @@
 	 * from the ldisc side, which is now guaranteed.
 	 */
 	set_bit(TTY_HUPPED, &tty->flags);
+	clear_bit(TTY_HUPPING, &tty->flags);
 	tty_unlock(tty);
 
 	if (f)
@@ -3161,7 +3170,10 @@
 
 	kref_init(&tty->kref);
 	tty->magic = TTY_MAGIC;
-	tty_ldisc_init(tty);
+	if (tty_ldisc_init(tty)) {
+		kfree(tty);
+		return NULL;
+	}
 	tty->session = NULL;
 	tty->pgrp = NULL;
 	mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 3a9e2a2..4ab518d 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -175,12 +175,11 @@
 			return ERR_CAST(ldops);
 	}
 
-	ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
-	if (ld == NULL) {
-		put_ldops(ldops);
-		return ERR_PTR(-ENOMEM);
-	}
-
+	/*
+	 * There is no way to handle allocation failure of only 16 bytes.
+	 * Let's simplify error handling and save more memory.
+	 */
+	ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
 	ld->ops = ldops;
 	ld->tty = tty;
 
@@ -753,12 +752,13 @@
  *	the tty structure is not completely set up when this call is made.
  */
 
-void tty_ldisc_init(struct tty_struct *tty)
+int tty_ldisc_init(struct tty_struct *tty)
 {
 	struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
 	if (IS_ERR(ld))
-		panic("n_tty: init_tty");
+		return PTR_ERR(ld);
 	tty->ldisc = ld;
+	return 0;
 }
 
 /**
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 68c7bb0..9e1ac58 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1354,6 +1354,11 @@
 		case 3:
 			vc->vc_italic = 1;
 			break;
+		case 21:
+			/*
+			 * No console drivers support double underline, so
+			 * convert it to a single underline.
+			 */
 		case 4:
 			vc->vc_underline = 1;
 			break;
@@ -1389,7 +1394,6 @@
 			vc->vc_disp_ctrl = 1;
 			vc->vc_toggle_meta = 1;
 			break;
-		case 21:
 		case 22:
 			vc->vc_intensity = 1;
 			break;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index fba021f..208bc52 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -279,7 +279,7 @@
 		map = kzalloc(sizeof(*map), GFP_KERNEL);
 		if (!map) {
 			ret = -ENOMEM;
-			goto err_map_kobj;
+			goto err_map;
 		}
 		kobject_init(&map->kobj, &map_attr_type);
 		map->mem = mem;
@@ -289,7 +289,7 @@
 			goto err_map_kobj;
 		ret = kobject_uevent(&map->kobj, KOBJ_ADD);
 		if (ret)
-			goto err_map;
+			goto err_map_kobj;
 	}
 
 	for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) {
@@ -308,7 +308,7 @@
 		portio = kzalloc(sizeof(*portio), GFP_KERNEL);
 		if (!portio) {
 			ret = -ENOMEM;
-			goto err_portio_kobj;
+			goto err_portio;
 		}
 		kobject_init(&portio->kobj, &portio_attr_type);
 		portio->port = port;
@@ -319,7 +319,7 @@
 			goto err_portio_kobj;
 		ret = kobject_uevent(&portio->kobj, KOBJ_ADD);
 		if (ret)
-			goto err_portio;
+			goto err_portio_kobj;
 	}
 
 	return 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 6e0d614..64c6af2c 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -839,7 +839,7 @@
 {
 	ci_hdrc_gadget_destroy(ci);
 	ci_hdrc_host_destroy(ci);
-	if (ci->is_otg)
+	if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
 		ci_hdrc_otg_destroy(ci);
 }
 
@@ -939,27 +939,35 @@
 	/* initialize role(s) before the interrupt is requested */
 	if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
 		ret = ci_hdrc_host_init(ci);
-		if (ret)
-			dev_info(dev, "doesn't support host\n");
+		if (ret) {
+			if (ret == -ENXIO)
+				dev_info(dev, "doesn't support host\n");
+			else
+				goto deinit_phy;
+		}
 	}
 
 	if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
 		ret = ci_hdrc_gadget_init(ci);
-		if (ret)
-			dev_info(dev, "doesn't support gadget\n");
+		if (ret) {
+			if (ret == -ENXIO)
+				dev_info(dev, "doesn't support gadget\n");
+			else
+				goto deinit_host;
+		}
 	}
 
 	if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
 		dev_err(dev, "no supported roles\n");
 		ret = -ENODEV;
-		goto deinit_phy;
+		goto deinit_gadget;
 	}
 
 	if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) {
 		ret = ci_hdrc_otg_init(ci);
 		if (ret) {
 			dev_err(dev, "init otg fails, ret = %d\n", ret);
-			goto stop;
+			goto deinit_gadget;
 		}
 	}
 
@@ -1024,7 +1032,12 @@
 
 	ci_extcon_unregister(ci);
 stop:
-	ci_role_destroy(ci);
+	if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
+		ci_hdrc_otg_destroy(ci);
+deinit_gadget:
+	ci_hdrc_gadget_destroy(ci);
+deinit_host:
+	ci_hdrc_host_destroy(ci);
 deinit_phy:
 	ci_usb_phy_exit(ci);
 
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 34d23cc..fe22ac7 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -174,6 +174,7 @@
 		wb = &acm->wb[wbn];
 		if (!wb->use) {
 			wb->use = 1;
+			wb->len = 0;
 			return wbn;
 		}
 		wbn = (wbn + 1) % ACM_NW;
@@ -731,16 +732,18 @@
 static void acm_tty_flush_chars(struct tty_struct *tty)
 {
 	struct acm *acm = tty->driver_data;
-	struct acm_wb *cur = acm->putbuffer;
+	struct acm_wb *cur;
 	int err;
 	unsigned long flags;
 
+	spin_lock_irqsave(&acm->write_lock, flags);
+
+	cur = acm->putbuffer;
 	if (!cur) /* nothing to do */
-		return;
+		goto out;
 
 	acm->putbuffer = NULL;
 	err = usb_autopm_get_interface_async(acm->control);
-	spin_lock_irqsave(&acm->write_lock, flags);
 	if (err < 0) {
 		cur->use = 0;
 		acm->putbuffer = cur;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 0dce6ab..876679a 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -186,7 +186,9 @@
 static const unsigned short high_speed_maxpacket_maxes[4] = {
 	[USB_ENDPOINT_XFER_CONTROL] = 64,
 	[USB_ENDPOINT_XFER_ISOC] = 1024,
-	[USB_ENDPOINT_XFER_BULK] = 512,
+
+	/* Bulk should be 512, but some devices use 1024: we will warn below */
+	[USB_ENDPOINT_XFER_BULK] = 1024,
 	[USB_ENDPOINT_XFER_INT] = 1024,
 };
 static const unsigned short super_speed_maxpacket_maxes[4] = {
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ee33c0d..5532246 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1495,9 +1495,10 @@
 	 * Some buses would like to keep their devices in suspend
 	 * state after system resume.  Their resume happen when
 	 * a remote wakeup is detected or interface driver start
-	 * I/O.
+	 * I/O. And in the case when the system is restoring from
+	 * hibernation, make sure all the devices are resumed.
 	 */
-	if (udev->bus->skip_resume)
+	if (udev->bus->skip_resume && msg.event != PM_EVENT_RESTORE)
 		return 0;
 
 	/* For all calls, take the device back to full power and
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 0f10ff2..8df85f6 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -240,8 +240,13 @@
 	if (!udev->parent)
 		rc = hcd_bus_suspend(udev, msg);
 
-	/* Non-root devices don't need to do anything for FREEZE or PRETHAW */
-	else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
+	/*
+	 * Non-root USB2 devices don't need to do anything for FREEZE
+	 * or PRETHAW. USB3 devices don't support global suspend and
+	 * needs to be selectively suspended.
+	 */
+	else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
+		 && (udev->speed < USB_SPEED_SUPER))
 		rc = 0;
 	else
 		rc = usb_port_suspend(udev, msg);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e4b39a7..6a4ea98 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2430,6 +2430,7 @@
 
 	spin_lock_irqsave (&hcd_root_hub_lock, flags);
 	if (hcd->rh_registered) {
+		pm_wakeup_event(&hcd->self.root_hub->dev, 0);
 		set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
 		queue_work(pm_wq, &hcd->wakeup_work);
 	}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a9117ee..a9b3bbd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -659,12 +659,17 @@
 		unsigned int portnum)
 {
 	struct usb_hub *hub;
+	struct usb_port *port_dev;
 
 	if (!hdev)
 		return;
 
 	hub = usb_hub_to_struct_hub(hdev);
 	if (hub) {
+		port_dev = hub->ports[portnum - 1];
+		if (port_dev && port_dev->child)
+			pm_wakeup_event(&port_dev->child->dev, 0);
+
 		set_bit(portnum, hub->wakeup_bits);
 		kick_hub_wq(hub);
 	}
@@ -3428,8 +3433,11 @@
 
 	/* Skip the initial Clear-Suspend step for a remote wakeup */
 	status = hub_port_status(hub, port1, &portstatus, &portchange);
-	if (status == 0 && !port_is_suspended(hub, portstatus))
+	if (status == 0 && !port_is_suspended(hub, portstatus)) {
+		if (portchange & USB_PORT_STAT_C_SUSPEND)
+			pm_wakeup_event(&udev->dev, 0);
 		goto SuspendCleared;
+	}
 
 	/* see 7.1.7.7; affects power usage, but not budgeting */
 	if (hub_is_superspeed(hub->hdev))
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 4f1c6f8..40ce175 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -45,6 +45,9 @@
 	{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
 			USB_QUIRK_STRING_FETCH_255 },
 
+	/* HP v222w 16GB Mini USB Drive */
+	{ USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+
 	/* Creative SB Audigy 2 NX */
 	{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 2a21a04..0f45a2f 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -209,7 +209,7 @@
 	unsigned char           dir_in;
 	unsigned char           index;
 	unsigned char           mc;
-	unsigned char           interval;
+	u16                     interval;
 
 	unsigned int            halted:1;
 	unsigned int            periodic:1;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index cfdd5c3..09921ef 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2642,12 +2642,6 @@
 	dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
 	       DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
 
-	dwc2_hsotg_enqueue_setup(hsotg);
-
-	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
-		dwc2_readl(hsotg->regs + DIEPCTL0),
-		dwc2_readl(hsotg->regs + DOEPCTL0));
-
 	/* clear global NAKs */
 	val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
 	if (!is_usb_reset)
@@ -2658,6 +2652,12 @@
 	mdelay(3);
 
 	hsotg->lx_state = DWC2_L0;
+
+	dwc2_hsotg_enqueue_setup(hsotg);
+
+	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+		dwc2_readl(hsotg->regs + DIEPCTL0),
+		dwc2_readl(hsotg->regs + DOEPCTL0));
 }
 
 static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index dfc0566..0a0cf15 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2268,10 +2268,22 @@
  */
 static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
 {
-	u32 hcfg, hfir, otgctl;
+	u32 hcfg, hfir, otgctl, usbcfg;
 
 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
 
+	/* Set HS/FS Timeout Calibration to 7 (max available value).
+	 * The number of PHY clocks that the application programs in
+	 * this field is added to the high/full speed interpacket timeout
+	 * duration in the core to account for any additional delays
+	 * introduced by the PHY. This can be required, because the delay
+	 * introduced by the PHY in generating the linestate condition
+	 * can vary from one PHY to another.
+	 */
+	usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+	usbcfg |= GUSBCFG_TOUTCAL(7);
+	dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+
 	/* Restart the Phy Clock */
 	dwc2_writel(0, hsotg->regs + PCGCTL);
 
@@ -3220,7 +3232,6 @@
 		dwc2_core_init(hsotg, false);
 		dwc2_enable_global_interrupts(hsotg);
 		spin_lock_irqsave(&hsotg->lock, flags);
-		dwc2_hsotg_disconnect(hsotg);
 		dwc2_hsotg_core_init_disconnected(hsotg, false);
 		spin_unlock_irqrestore(&hsotg->lock, flags);
 		dwc2_hsotg_core_connect(hsotg);
@@ -3238,8 +3249,12 @@
 		if (count > 250)
 			dev_err(hsotg->dev,
 				"Connection id status change timed out\n");
-		hsotg->op_state = OTG_STATE_A_HOST;
 
+		spin_lock_irqsave(&hsotg->lock, flags);
+		dwc2_hsotg_disconnect(hsotg);
+		spin_unlock_irqrestore(&hsotg->lock, flags);
+
+		hsotg->op_state = OTG_STATE_A_HOST;
 		/* Initialize the Core for Host mode */
 		dwc2_core_init(hsotg, false);
 		dwc2_enable_global_interrupts(hsotg);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ef3f542..93d8e14 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -910,6 +910,21 @@
 	}
 
 	/*
+	 * Workaround for STAR 9001285599 which affects dwc3 core version 3.20a
+	 * only. If the PM TIMER ECN is enabled thru GUCTL2[19], then link
+	 * compliance test (TD7.21) may fail. If the ECN is not enabled
+	 * GUCTL2[19] = 0), the controller will use the old timer value (5us),
+	 * which is still fine for Link Compliance test. Hence Do not enable
+	 * PM TIMER ECN in V3.20a by setting GUCTL2[19] by default,
+	 * instead use GUCTL2[19] = 0.
+	 */
+	if (dwc->revision == DWC3_REVISION_320A) {
+		reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
+		reg &= ~DWC3_GUCTL2_LC_TIMER;
+		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
+	}
+
+	/*
 	 * Enable hardware control of sending remote wakeup in HS when
 	 * the device is in the L1 state.
 	 */
@@ -1567,6 +1582,19 @@
 	return 0;
 }
 
+static int dwc3_pm_restore(struct device *dev)
+{
+	/*
+	 * Set the core as runtime active to prevent the runtime
+	 * PM ops being called before the PM restore is completed.
+	 */
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	return 0;
+}
+
 static int dwc3_resume(struct device *dev)
 {
 	struct dwc3	*dwc = dev_get_drvdata(dev);
@@ -1591,7 +1619,12 @@
 #endif /* CONFIG_PM_SLEEP */
 
 static const struct dev_pm_ops dwc3_dev_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+	.suspend	= dwc3_suspend,
+	.resume		= dwc3_resume,
+	.freeze		= dwc3_suspend,
+	.thaw		= dwc3_pm_restore,
+	.poweroff	= dwc3_suspend,
+	.restore	= dwc3_pm_restore,
 	SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
 			dwc3_runtime_idle)
 };
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e5fe7a4..0963aa3 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -269,6 +269,8 @@
 #define DWC3_GUSB3PIPECTL_ELASTIC_BUF_MODE	(1 << 0)
 
 /* Global TX Fifo Size Register */
+#define DWC31_GTXFIFOSIZ_TXFRAMNUM	BIT(15)		/* DWC_usb31 only */
+#define DWC31_GTXFIFOSIZ_TXFDEF(n)	((n) & 0x7fff)	/* DWC_usb31 only */
 #define DWC3_GTXFIFOSIZ_TXFDEF(n)	((n) & 0xffff)
 #define DWC3_GTXFIFOSIZ_TXFSTADDR(n)	((n) & 0xffff0000)
 
@@ -333,6 +335,7 @@
 #define DWC3_GUCTL2_RST_ACTBITLATER		(1 << 14)
 #define DWC3_GUCTL2_HP_TIMER(n)			((n) << 21)
 #define DWC3_GUCTL2_HP_TIMER_MASK		DWC3_GUCTL2_HP_TIMER(0x1f)
+#define DWC3_GUCTL2_LC_TIMER			(1 << 19)
 
 /* Device Configuration Register */
 #define DWC3_DCFG_DEVADDR(addr)	((addr) << 3)
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 7266470..12ee23f 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -107,6 +107,10 @@
 		return PTR_ERR(kdwc->usbss);
 
 	kdwc->clk = devm_clk_get(kdwc->dev, "usb");
+	if (IS_ERR(kdwc->clk)) {
+		dev_err(kdwc->dev, "unable to get usb clock\n");
+		return PTR_ERR(kdwc->clk);
+	}
 
 	error = clk_prepare_enable(kdwc->clk);
 	if (error < 0) {
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 1cab2ca..2c30660 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -73,6 +73,8 @@
 
 /* XHCI registers */
 #define USB3_HCSPARAMS1		(0x4)
+#define USB3_HCCPARAMS2		(0x1c)
+#define HCC_CTC(p)		((p) & (1 << 3))
 #define USB3_PORTSC		(0x420)
 
 /**
@@ -255,6 +257,7 @@
 	struct notifier_block	host_restart_nb;
 
 	struct notifier_block	host_nb;
+	bool			xhci_ss_compliance_enable;
 
 	atomic_t                in_p3;
 	unsigned int		lpm_to_suspend_delay;
@@ -2093,6 +2096,19 @@
 	dwc3_core_init(dwc);
 	/* Re-configure event buffers */
 	dwc3_event_buffers_setup(dwc);
+
+	/* Get initial P3 status and enable IN_P3 event */
+	val = dwc3_msm_read_reg_field(mdwc->base,
+		DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
+	atomic_set(&mdwc->in_p3, val == DWC3_LINK_STATE_U3);
+	dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
+				PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
+
+	if (mdwc->otg_state == OTG_STATE_A_HOST) {
+		dev_dbg(mdwc->dev, "%s: set the core in host mode\n",
+							__func__);
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+	}
 }
 
 static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
@@ -2246,7 +2262,7 @@
 	}
 }
 
-static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
+static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool hibernation)
 {
 	int ret;
 	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
@@ -2363,8 +2379,8 @@
 	clk_disable_unprepare(mdwc->xo_clk);
 
 	/* Perform controller power collapse */
-	if (!mdwc->in_host_mode && (!mdwc->in_device_mode ||
-					mdwc->in_restart)) {
+	if ((!mdwc->in_host_mode && (!mdwc->in_device_mode ||
+					mdwc->in_restart)) || hibernation) {
 		mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
 		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
 		dwc3_msm_config_gdsc(mdwc, 0);
@@ -2520,8 +2536,6 @@
 
 	/* Recover from controller power collapse */
 	if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
-		u32 tmp;
-
 		if (mdwc->iommu_map) {
 			ret = arm_iommu_attach_device(mdwc->dev,
 					mdwc->iommu_map);
@@ -2536,13 +2550,6 @@
 
 		dwc3_msm_power_collapse_por(mdwc);
 
-		/* Get initial P3 status and enable IN_P3 event */
-		tmp = dwc3_msm_read_reg_field(mdwc->base,
-			DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
-		atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
-		dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
-					PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
-
 		mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
 	}
 
@@ -2821,8 +2828,11 @@
 	int ret;
 
 	mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
-	if (IS_ERR(mdwc->dwc3_gdsc))
+	if (IS_ERR(mdwc->dwc3_gdsc)) {
+		if (PTR_ERR(mdwc->dwc3_gdsc) == -EPROBE_DEFER)
+			return PTR_ERR(mdwc->dwc3_gdsc);
 		mdwc->dwc3_gdsc = NULL;
+	}
 
 	mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
 	if (IS_ERR(mdwc->xo_clk)) {
@@ -3279,6 +3289,35 @@
 static DEVICE_ATTR_RW(usb_compliance_mode);
 
 
+static ssize_t xhci_link_compliance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	if (mdwc->xhci_ss_compliance_enable)
+		return snprintf(buf, PAGE_SIZE, "y\n");
+	else
+		return snprintf(buf, PAGE_SIZE, "n\n");
+}
+
+static ssize_t xhci_link_compliance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	bool value;
+	int ret;
+
+	ret = strtobool(buf, &value);
+	if (!ret) {
+		mdwc->xhci_ss_compliance_enable = value;
+		return count;
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR_RW(xhci_link_compliance);
+
 static int dwc3_msm_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node, *dwc3_node;
@@ -3636,6 +3675,7 @@
 	device_create_file(&pdev->dev, &dev_attr_mode);
 	device_create_file(&pdev->dev, &dev_attr_speed);
 	device_create_file(&pdev->dev, &dev_attr_usb_compliance_mode);
+	device_create_file(&pdev->dev, &dev_attr_xhci_link_compliance);
 
 	host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
 	if (!dwc->is_drd && host_mode) {
@@ -3674,6 +3714,7 @@
 	int ret_pm;
 
 	device_remove_file(&pdev->dev, &dev_attr_mode);
+	device_remove_file(&pdev->dev, &dev_attr_xhci_link_compliance);
 	if (mdwc->usb_psy)
 		power_supply_put(mdwc->usb_psy);
 
@@ -3939,6 +3980,25 @@
 		}
 
 		/*
+		 * If the Compliance Transition Capability(CTC) flag of
+		 * HCCPARAMS2 register is set and xhci_link_compliance sysfs
+		 * param has been enabled by the user for the SuperSpeed host
+		 * controller, then write 10 (Link in Compliance Mode State)
+		 * onto the Port Link State(PLS) field of the PORTSC register
+		 * for 3.0 host controller which is at an offset of USB3_PORTSC
+		 * + 0x10 from the DWC3 base address. Also, disable the runtime
+		 * PM of 3.0 root hub (root hub of shared_hcd of xhci device)
+		 */
+		if (HCC_CTC(dwc3_msm_read_reg(mdwc->base, USB3_HCCPARAMS2))
+				&& mdwc->xhci_ss_compliance_enable
+				&& dwc->maximum_speed == USB_SPEED_SUPER) {
+			dwc3_msm_write_reg(mdwc->base, USB3_PORTSC + 0x10,
+					0x10340);
+			pm_runtime_disable(&hcd_to_xhci(platform_get_drvdata(
+				dwc->xhci))->shared_hcd->self.root_hub->dev);
+		}
+
+		/*
 		 * In some cases it is observed that USB PHY is not going into
 		 * suspend with host mode suspend functionality. Hence disable
 		 * XHCI's runtime PM here if disable_host_mode_pm is set.
@@ -4400,7 +4460,39 @@
 		return -EBUSY;
 	}
 
-	ret = dwc3_msm_suspend(mdwc);
+	ret = dwc3_msm_suspend(mdwc, false);
+	if (!ret)
+		atomic_set(&mdwc->pm_suspended, 1);
+
+	return ret;
+}
+
+static int dwc3_msm_pm_freeze(struct device *dev)
+{
+	int ret = 0;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(dev, "dwc3-msm PM freeze\n");
+	dbg_event(0xFF, "PM Freeze", 0);
+
+	flush_workqueue(mdwc->dwc3_wq);
+
+	/* Resume the core to make sure we can power collapse it */
+	ret = dwc3_msm_resume(mdwc);
+
+	/*
+	 * PHYs also needed to be power collapsed, so call the notify_disconnect
+	 * before suspend to ensure it.
+	 */
+	usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+	mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+	if (mdwc->ss_phy) {
+		usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
+		mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+	}
+
+	ret = dwc3_msm_suspend(mdwc, true);
 	if (!ret)
 		atomic_set(&mdwc->pm_suspended, 1);
 
@@ -4424,6 +4516,35 @@
 
 	return 0;
 }
+
+static int dwc3_msm_pm_restore(struct device *dev)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(dev, "dwc3-msm PM restore\n");
+	dbg_event(0xFF, "PM Restore", 0);
+
+	atomic_set(&mdwc->pm_suspended, 0);
+
+	dwc3_msm_resume(mdwc);
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	/* Restore PHY flags if hibernated in host mode */
+	if (mdwc->otg_state == OTG_STATE_A_HOST) {
+		usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+		mdwc->hs_phy->flags |= PHY_HOST_MODE;
+		if (mdwc->ss_phy) {
+			usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
+			mdwc->ss_phy->flags |= PHY_HOST_MODE;
+		}
+	}
+
+
+	return 0;
+}
 #endif
 
 #ifdef CONFIG_PM
@@ -4446,7 +4567,7 @@
 	dev_dbg(dev, "DWC3-msm runtime suspend\n");
 	dbg_event(0xFF, "RT Sus", 0);
 
-	return dwc3_msm_suspend(mdwc);
+	return dwc3_msm_suspend(mdwc, false);
 }
 
 static int dwc3_msm_runtime_resume(struct device *dev)
@@ -4462,7 +4583,12 @@
 #endif
 
 static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
+	.suspend	= dwc3_msm_pm_suspend,
+	.resume		= dwc3_msm_pm_resume,
+	.freeze		= dwc3_msm_pm_freeze,
+	.restore	= dwc3_msm_pm_restore,
+	.thaw		= dwc3_msm_pm_restore,
+	.poweroff	= dwc3_msm_pm_suspend,
 	SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
 				dwc3_msm_runtime_idle)
 };
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 35b6351..f221cb4 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -598,9 +598,25 @@
 	return 0;
 }
 
+static void dwc3_omap_complete(struct device *dev)
+{
+	struct dwc3_omap	*omap = dev_get_drvdata(dev);
+
+	if (extcon_get_state(omap->edev, EXTCON_USB))
+		dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+	else
+		dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
+	if (extcon_get_state(omap->edev, EXTCON_USB_HOST))
+		dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+	else
+		dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
+}
+
 static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {
 
 	SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume)
+	.complete = dwc3_omap_complete,
 };
 
 #define DEV_PM_OPS	(&dwc3_omap_dev_pm_ops)
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 427291a..d6493ab 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -173,7 +173,7 @@
 	ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
 	if (ret) {
 		dev_err(dev, "couldn't add resources to dwc3 device\n");
-		return ret;
+		goto err;
 	}
 
 	dwc3->dev.parent = dev;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 902d36e..d9baac6 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -43,6 +43,8 @@
 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
 		struct dwc3_ep *dep, struct dwc3_request *req);
+static int dwc3_ep0_delegate_req(struct dwc3 *dwc,
+		struct usb_ctrlrequest *ctrl);
 
 static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
 {
@@ -358,12 +360,14 @@
 static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
 {
 }
+
 /*
  * ch 9.4.5
  */
 static int dwc3_ep0_handle_status(struct dwc3 *dwc,
 		struct usb_ctrlrequest *ctrl)
 {
+	int ret;
 	struct dwc3_ep		*dep;
 	u32			recip;
 	u32			reg;
@@ -397,7 +401,8 @@
 		 * Function Remote Wake Capable	D0
 		 * Function Remote Wakeup	D1
 		 */
-		break;
+		ret = dwc3_ep0_delegate_req(dwc, ctrl);
+		return ret;
 
 	case USB_RECIP_ENDPOINT:
 		dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
@@ -524,6 +529,9 @@
 			if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
 				/* XXX enable remote wakeup */
 				;
+			ret = dwc3_ep0_delegate_req(dwc, ctrl);
+			if (ret)
+				return ret;
 			break;
 		default:
 			return -EINVAL;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 73caa97..e6c89a3 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -414,7 +414,7 @@
 		dwc3_trace(trace_dwc3_gadget, "Command Timed Out");
 		dev_err(dwc->dev, "%s command timeout for %s\n",
 			dwc3_gadget_ep_cmd_string(cmd), dep->name);
-		if (cmd != DWC3_DEPCMD_ENDTRANSFER) {
+		if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_ENDTRANSFER) {
 			dwc->ep_cmd_timeout_cnt++;
 			dwc3_notify_event(dwc,
 				DWC3_CONTROLLER_RESTART_USB_SESSION, 0);
@@ -1307,7 +1307,7 @@
 
 	if (req->request.status == -EINPROGRESS) {
 		ret = -EBUSY;
-		dev_err_ratelimited(dwc->dev, "%s: %p request already in queue",
+		dev_err_ratelimited(dwc->dev, "%s: %pK request already in queue",
 					dep->name, req);
 		return ret;
 	}
@@ -1971,7 +1971,7 @@
 	if (dwc3_gadget_is_suspended(dwc)) {
 		pr_debug("USB bus is suspended. Scheduling wakeup and returning -EAGAIN.\n");
 		dwc3_gadget_wakeup(&dwc->gadget);
-		return -EAGAIN;
+		return -EACCES;
 	}
 
 	if (dwc->revision < DWC3_REVISION_220A) {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index d3e0ca5..90cbb61 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -568,6 +568,7 @@
 config USB_CONFIGFS_F_GSI
 	bool "USB GSI function"
 	select USB_F_GSI
+	select USB_U_ETHER
 	depends on USB_CONFIGFS
 	help
 	  Generic function driver to support h/w acceleration to IPA over GSI.
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index d4c243c..42936f1 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -70,18 +70,11 @@
 	struct usb_phy *phy = udc->transceiver;
 
 	if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) {
-		u32 temp;
-
 		usb_phy_io_write(phy,
 				ULPI_MISC_A_VBUSVLDEXT |
 				ULPI_MISC_A_VBUSVLDEXTSEL,
 				ULPI_CLR(ULPI_MISC_A));
 
-		/* Notify LINK of VBUS LOW */
-		temp = readl_relaxed(USB_USBCMD);
-		temp &= ~USBCMD_SESS_VLD_CTRL;
-		writel_relaxed(temp, USB_USBCMD);
-
 		/*
 		 * Add memory barrier as it is must to complete
 		 * above USB PHY and Link register writes before
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 939c219..703fb24 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -3951,7 +3951,7 @@
 	usb_del_gadget_udc(&udc->gadget);
 remove_trans:
 	if (udc->transceiver)
-		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+		otg_set_peripheral(udc->transceiver->otg, NULL);
 
 	err("error = %i", retval);
 put_transceiver:
@@ -3989,7 +3989,7 @@
 	usb_del_gadget_udc(&udc->gadget);
 
 	if (udc->transceiver) {
-		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+		otg_set_peripheral(udc->transceiver->otg, NULL);
 		usb_put_phy(udc->transceiver);
 	}
 	destroy_eps(udc);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index ab2c623..6f8a04e 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -490,11 +490,17 @@
 
 	spin_lock_irqsave(&func->config->cdev->lock, flags);
 	ret = usb_func_wakeup_int(func);
-	if (ret == -EAGAIN) {
+	if (ret == -EACCES) {
 		DBG(func->config->cdev,
 			"Function wakeup for %s could not complete due to suspend state. Delayed until after bus resume.\n",
 			func->name ? func->name : "");
 		ret = 0;
+		func->func_wakeup_pending = 1;
+	} else if (ret == -EAGAIN) {
+		DBG(func->config->cdev,
+			"Function wakeup for %s sent.\n",
+			func->name ? func->name : "");
+		ret = 0;
 	} else if (ret < 0 && ret != -ENOTSUPP) {
 		ERROR(func->config->cdev,
 			"Failed to wake function %s from suspend state. ret=%d. Canceling USB request.\n",
@@ -524,7 +530,12 @@
 	gadget = func->config->cdev->gadget;
 	if (func->func_is_suspended && func->func_wakeup_allowed) {
 		ret = usb_gadget_func_wakeup(gadget, func->intf_id);
-		if (ret == -EAGAIN) {
+		if (ret == -EACCES) {
+			pr_debug("bus suspended func wakeup for %s delayed until bus resume.\n",
+				func->name ? func->name : "");
+			func->func_wakeup_pending = 1;
+			ret = -EAGAIN;
+		} else if (ret == -EAGAIN) {
 			pr_debug("bus suspended func wakeup for %s delayed until bus resume.\n",
 				func->name ? func->name : "");
 		} else if (ret < 0 && ret != -ENOTSUPP) {
@@ -1563,7 +1574,7 @@
 	return res;
 }
 
-static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
+static int fill_ext_compat(struct usb_configuration *c, u8 *buf)
 {
 	int i, count;
 
@@ -1590,10 +1601,12 @@
 				buf += 23;
 			}
 			count += 24;
-			if (count >= 4096)
-				return;
+			if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ)
+				return count;
 		}
 	}
+
+	return count;
 }
 
 static int count_ext_prop(struct usb_configuration *c, int interface)
@@ -1638,25 +1651,20 @@
 	struct usb_os_desc *d;
 	struct usb_os_desc_ext_prop *ext_prop;
 	int j, count, n, ret;
-	u8 *start = buf;
 
 	f = c->interface[interface];
+	count = 10; /* header length */
 	for (j = 0; j < f->os_desc_n; ++j) {
 		if (interface != f->os_desc_table[j].if_id)
 			continue;
 		d = f->os_desc_table[j].os_desc;
 		if (d)
 			list_for_each_entry(ext_prop, &d->ext_prop, entry) {
-				/* 4kB minus header length */
-				n = buf - start;
-				if (n >= 4086)
-					return 0;
-
-				count = ext_prop->data_len +
+				n = ext_prop->data_len +
 					ext_prop->name_len + 14;
-				if (count > 4086 - n)
-					return -EINVAL;
-				usb_ext_prop_put_size(buf, count);
+				if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ)
+					return count;
+				usb_ext_prop_put_size(buf, n);
 				usb_ext_prop_put_type(buf, ext_prop->type);
 				ret = usb_ext_prop_put_name(buf, ext_prop->name,
 							    ext_prop->name_len);
@@ -1682,11 +1690,12 @@
 				default:
 					return -EINVAL;
 				}
-				buf += count;
+				buf += n;
+				count += n;
 			}
 	}
 
-	return 0;
+	return count;
 }
 
 /*
@@ -1980,6 +1989,7 @@
 			req->complete = composite_setup_complete;
 			buf = req->buf;
 			os_desc_cfg = cdev->os_desc_config;
+			w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ);
 			memset(buf, 0, w_length);
 			buf[5] = 0x01;
 			switch (ctrl->bRequestType & USB_RECIP_MASK) {
@@ -2003,8 +2013,8 @@
 					count += 16; /* header */
 					put_unaligned_le32(count, buf);
 					buf += 16;
-					fill_ext_compat(os_desc_cfg, buf);
-					value = w_length;
+					value = fill_ext_compat(os_desc_cfg, buf);
+					value = min_t(u16, w_length, value);
 				}
 				break;
 			case USB_RECIP_INTERFACE:
@@ -2033,8 +2043,7 @@
 							      interface, buf);
 					if (value < 0)
 						return value;
-
-					value = w_length;
+					value = min_t(u16, w_length, value);
 				}
 				break;
 			}
@@ -2336,8 +2345,8 @@
 		goto end;
 	}
 
-	/* OS feature descriptor length <= 4kB */
-	cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
+	cdev->os_desc_req->buf = kmalloc(USB_COMP_EP0_OS_DESC_BUFSIZ,
+					 GFP_KERNEL);
 	if (!cdev->os_desc_req->buf) {
 		ret = -ENOMEM;
 		usb_ep_free_request(ep0, cdev->os_desc_req);
@@ -2473,22 +2482,24 @@
 	spin_lock_irqsave(&cdev->lock, flags);
 	if (cdev->config) {
 		list_for_each_entry(f, &cdev->config->functions, list) {
-			ret = usb_func_wakeup_int(f);
-			if (ret) {
-				if (ret == -EAGAIN) {
-					ERROR(f->config->cdev,
-						"Function wakeup for %s could not complete due to suspend state.\n",
-						f->name ? f->name : "");
-					break;
-				} else if (ret != -ENOTSUPP) {
-					ERROR(f->config->cdev,
-						"Failed to wake function %s from suspend state. ret=%d. Canceling USB request.\n",
-						f->name ? f->name : "",
-						ret);
+			if (f->func_wakeup_pending) {
+				ret = usb_func_wakeup_int(f);
+				if (ret) {
+					if (ret == -EAGAIN) {
+						ERROR(f->config->cdev,
+							"Function wakeup for %s could not complete due to suspend state.\n",
+							f->name ? f->name : "");
+					} else if (ret != -ENOTSUPP) {
+						ERROR(f->config->cdev,
+							"Failed to wake function %s from suspend state. ret=%d. Canceling USB request.\n",
+							f->name ? f->name : "",
+							ret);
+					}
 				}
+				f->func_wakeup_pending = 0;
 			}
 
-			if (f->resume)
+			if (gadget->speed != USB_SPEED_SUPER && f->resume)
 				f->resume(f);
 		}
 
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index f779fdc30..14c18f3 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1257,9 +1257,9 @@
 
 		cfg = container_of(c, struct config_usb_cfg, c);
 
-		list_for_each_entry_safe(f, tmp, &c->functions, list) {
+		list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
 
-			list_move_tail(&f->list, &cfg->func_list);
+			list_move(&f->list, &cfg->func_list);
 			if (f->unbind) {
 				dev_dbg(&gi->cdev.gadget->dev,
 					"unbind function '%s'/%pK\n",
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 4d4f039..a7166e2 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -1256,6 +1256,7 @@
 		ret = -EFAULT;
 	} else {
 		req->length = xfer_size;
+		req->zero = 1;
 		ret = usb_ep_queue(in, req, GFP_KERNEL);
 		if (ret) {
 			pr_err("EP QUEUE failed:%d\n", ret);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8eed1b3..6fe4fc5 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -997,7 +997,6 @@
 	struct ffs_epfile *epfile = file->private_data;
 	struct usb_request *req;
 	struct ffs_ep *ep;
-	struct ffs_data *ffs = epfile->ffs;
 	char *data = NULL;
 	ssize_t ret, data_len = -EINVAL;
 	int halt;
@@ -1096,8 +1095,8 @@
 			data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
 		spin_unlock_irq(&epfile->ffs->eps_lock);
 
-		extra_buf_alloc = ffs->gadget->extra_buf_alloc;
-		if (io_data->read)
+		extra_buf_alloc = gadget->extra_buf_alloc;
+		if (!io_data->read)
 			data = kmalloc(data_len + extra_buf_alloc,
 					GFP_KERNEL);
 		else
@@ -3734,7 +3733,7 @@
 
 	ffs_log("exit");
 
-	return 0;
+	return USB_GADGET_DELAYED_STATUS;
 }
 
 static bool ffs_func_req_match(struct usb_function *f,
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 5ffbf12..a038839 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -14,12 +14,18 @@
 #include "f_gsi.h"
 #include "rndis.h"
 
+struct usb_gsi_debugfs {
+	struct dentry *debugfs_root;
+};
+
+static struct usb_gsi_debugfs debugfs;
+
 static bool qti_packet_debug;
 module_param(qti_packet_debug, bool, 0644);
 MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data");
 
 static struct workqueue_struct *ipa_usb_wq;
-static struct f_gsi *__gsi[IPA_USB_MAX_TETH_PROT_SIZE];
+static struct f_gsi *__gsi[USB_PROT_MAX];
 static void *ipc_log_ctxt;
 
 #define NUM_LOG_PAGES 15
@@ -56,6 +62,15 @@
 static struct gsi_ctrl_pkt *gsi_ctrl_pkt_alloc(unsigned int len, gfp_t flags);
 static void gsi_ctrl_pkt_free(struct gsi_ctrl_pkt *pkt);
 
+static inline bool is_ext_prot_ether(int prot_id)
+{
+	if (prot_id == USB_PROT_RMNET_ETHER ||
+				prot_id == USB_PROT_DPL_ETHER)
+		return true;
+
+	return false;
+}
+
 static inline bool usb_gsi_remote_wakeup_allowed(struct usb_function *f)
 {
 	bool remote_wakeup_allowed;
@@ -196,6 +211,208 @@
 	return ret;
 }
 
+static void gsi_rw_timer_func(unsigned long arg)
+{
+	struct f_gsi *gsi = (struct f_gsi *)arg;
+
+	if (!atomic_read(&gsi->connected)) {
+		log_event_dbg("%s: gsi not connected.. bail-out\n", __func__);
+		gsi->debugfs_rw_timer_enable = 0;
+		return;
+	}
+
+	log_event_dbg("%s: calling gsi_wakeup_host\n", __func__);
+	gsi_wakeup_host(gsi);
+
+	if (gsi->debugfs_rw_timer_enable) {
+		log_event_dbg("%s: re-arm the timer\n", __func__);
+		mod_timer(&gsi->gsi_rw_timer,
+			jiffies + msecs_to_jiffies(gsi->gsi_rw_timer_interval));
+	}
+}
+
+static struct f_gsi *get_connected_gsi(void)
+{
+	struct f_gsi *connected_gsi;
+	bool gsi_connected = false;
+	unsigned int i;
+
+	for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) {
+		connected_gsi = __gsi[i];
+		if (connected_gsi && atomic_read(&connected_gsi->connected)) {
+			gsi_connected = true;
+			break;
+		}
+	}
+
+	if (!gsi_connected)
+		connected_gsi = NULL;
+
+	return connected_gsi;
+}
+
+#define DEFAULT_RW_TIMER_INTERVAL 500 /* in ms */
+static ssize_t usb_gsi_rw_write(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct f_gsi *gsi;
+	u8 input;
+	int ret;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		goto err;
+	}
+
+	if (ubuf == NULL) {
+		log_event_dbg("%s: buffer is Null.\n", __func__);
+		goto err;
+	}
+
+	ret = kstrtou8_from_user(ubuf, count, 0, &input);
+	if (ret) {
+		log_event_err("%s: Invalid value. err:%d\n", __func__, ret);
+		goto err;
+	}
+
+	if (gsi->debugfs_rw_timer_enable == !!input) {
+		if (!!input)
+			log_event_dbg("%s: RW already enabled\n", __func__);
+		else
+			log_event_dbg("%s: RW already disabled\n", __func__);
+		goto err;
+	}
+
+	gsi->debugfs_rw_timer_enable = !!input;
+
+	if (gsi->debugfs_rw_timer_enable) {
+		mod_timer(&gsi->gsi_rw_timer, jiffies +
+			  msecs_to_jiffies(gsi->gsi_rw_timer_interval));
+		log_event_dbg("%s: timer initialized\n", __func__);
+	} else {
+		del_timer_sync(&gsi->gsi_rw_timer);
+		log_event_dbg("%s: timer deleted\n", __func__);
+	}
+
+err:
+	return count;
+}
+
+static int usb_gsi_rw_show(struct seq_file *s, void *unused)
+{
+
+	struct f_gsi *gsi;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		return 0;
+	}
+
+	seq_printf(s, "%d\n", gsi->debugfs_rw_timer_enable);
+
+	return 0;
+}
+
+static int usb_gsi_rw_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, usb_gsi_rw_show, inode->i_private);
+}
+
+static const struct file_operations fops_usb_gsi_rw = {
+	.open = usb_gsi_rw_open,
+	.read = seq_read,
+	.write = usb_gsi_rw_write,
+	.owner = THIS_MODULE,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static ssize_t usb_gsi_rw_timer_write(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct f_gsi *gsi;
+	u16 timer_val;
+	int ret;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		goto err;
+	}
+
+	if (ubuf == NULL) {
+		log_event_dbg("%s: buffer is NULL.\n", __func__);
+		goto err;
+	}
+
+	ret = kstrtou16_from_user(ubuf, count, 0, &timer_val);
+	if (ret) {
+		log_event_err("%s: Invalid value. err:%d\n", __func__, ret);
+		goto err;
+	}
+
+	if (timer_val <= 0 || timer_val >  10000) {
+		log_event_err("%s: value must be > 0 and < 10000.\n", __func__);
+		goto err;
+	}
+
+	gsi->gsi_rw_timer_interval = timer_val;
+err:
+	return count;
+}
+
+static int usb_gsi_rw_timer_show(struct seq_file *s, void *unused)
+{
+	struct f_gsi *gsi;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		return 0;
+	}
+
+	seq_printf(s, "%ums\n", gsi->gsi_rw_timer_interval);
+
+	return 0;
+}
+
+static int usb_gsi_rw_timer_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, usb_gsi_rw_timer_show, inode->i_private);
+}
+
+static const struct file_operations fops_usb_gsi_rw_timer = {
+	.open = usb_gsi_rw_timer_open,
+	.read = seq_read,
+	.write = usb_gsi_rw_timer_write,
+	.owner = THIS_MODULE,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int usb_gsi_debugfs_init(void)
+{
+	debugfs.debugfs_root = debugfs_create_dir("usb_gsi", NULL);
+	if (!debugfs.debugfs_root)
+		return -ENOMEM;
+
+	debugfs_create_file("remote_wakeup_enable", 0600,
+					debugfs.debugfs_root,
+					__gsi, &fops_usb_gsi_rw);
+	debugfs_create_file("remote_wakeup_interval", 0600,
+					debugfs.debugfs_root,
+					__gsi,
+					&fops_usb_gsi_rw_timer);
+	return 0;
+}
+
+static void usb_gsi_debugfs_exit(void)
+{
+	debugfs_remove_recursive(debugfs.debugfs_root);
+}
+
 /*
  * Callback for when when network interface is up
  * and userspace is ready to answer DHCP requests,  or remote wakeup
@@ -226,7 +443,7 @@
 		log_event_err("%s: Set net_ready_trigger", __func__);
 		gsi->d_port.net_ready_trigger = true;
 
-		if (gsi->prot_id == IPA_USB_ECM) {
+		if (gsi->prot_id == USB_PROT_ECM_IPA) {
 			cpkt_notify_connect = gsi_ctrl_pkt_alloc(0, GFP_ATOMIC);
 			if (IS_ERR(cpkt_notify_connect)) {
 				spin_unlock_irqrestore(&gsi->d_port.lock,
@@ -260,7 +477,7 @@
 		 * Do not post EVT_CONNECTED for RNDIS.
 		 * Data path for RNDIS is enabled on EVT_HOST_READY.
 		 */
-		if (gsi->prot_id != IPA_USB_RNDIS) {
+		if (gsi->prot_id != USB_PROT_RNDIS_IPA) {
 			post_event(&gsi->d_port, EVT_CONNECTED);
 			queue_work(gsi->d_port.ipa_usb_wq,
 					&gsi->d_port.usb_ipa_w);
@@ -311,7 +528,7 @@
 
 	log_event_dbg("%s: USB GSI IN OPS Completed", __func__);
 	in_params->client =
-		(gsi->prot_id != IPA_USB_DIAG) ? IPA_CLIENT_USB_CONS :
+		(gsi->prot_id != USB_PROT_DIAG_IPA) ? IPA_CLIENT_USB_CONS :
 						IPA_CLIENT_USB_DPL_CONS;
 	in_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
 	in_params->teth_prot = gsi->prot_id;
@@ -395,7 +612,7 @@
 	conn_params->usb_to_ipa_xferrscidx =
 			d_port->out_xfer_rsc_index;
 	conn_params->usb_to_ipa_xferrscidx_valid =
-			(gsi->prot_id != IPA_USB_DIAG) ? true : false;
+			(gsi->prot_id != USB_PROT_DIAG_IPA) ? true : false;
 	conn_params->ipa_to_usb_xferrscidx_valid = true;
 	conn_params->teth_prot = gsi->prot_id;
 	conn_params->teth_prot_params.max_xfer_size_bytes_to_dev = 23700;
@@ -440,7 +657,7 @@
 	d_port->in_request.db_reg_phs_addr_msb =
 		ipa_in_channel_out_params.db_reg_phs_addr_msb;
 
-	if (gsi->prot_id != IPA_USB_DIAG) {
+	if (gsi->prot_id != USB_PROT_DIAG_IPA) {
 		d_port->out_channel_handle =
 			ipa_out_channel_out_params.clnt_hdl;
 		d_port->out_request.db_reg_phs_addr_lsb =
@@ -690,6 +907,11 @@
 			log_event_dbg("%s: ST_CON_IN_PROG_EVT_HOST_READY",
 					 __func__);
 		} else if (event == EVT_CONNECTED) {
+			if (peek_event(d_port) == EVT_SUSPEND) {
+				log_event_dbg("%s: ST_CON_IN_PROG_EVT_SUSPEND",
+					 __func__);
+				break;
+			}
 			ipa_data_path_enable(d_port);
 			d_port->sm_state = STATE_CONNECTED;
 			log_event_dbg("%s: ST_CON_IN_PROG_EVT_CON %d",
@@ -1159,7 +1381,8 @@
 
 	switch (cmd) {
 	case QTI_CTRL_MODEM_OFFLINE:
-		if (gsi->prot_id == IPA_USB_DIAG) {
+		if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+				gsi->prot_id == USB_PROT_DPL_ETHER) {
 			log_event_dbg("%s:Modem Offline not handled", __func__);
 			goto exit_ioctl;
 		}
@@ -1177,7 +1400,8 @@
 		gsi_ctrl_send_notification(gsi);
 		break;
 	case QTI_CTRL_MODEM_ONLINE:
-		if (gsi->prot_id == IPA_USB_DIAG) {
+		if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+				gsi->prot_id == USB_PROT_DPL_ETHER) {
 			log_event_dbg("%s:Modem Online not handled", __func__);
 			goto exit_ioctl;
 		}
@@ -1186,7 +1410,8 @@
 		break;
 	case QTI_CTRL_GET_LINE_STATE:
 		val = atomic_read(&gsi->connected);
-		if (gsi->prot_id == IPA_USB_RMNET)
+		if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+				gsi->prot_id == USB_PROT_RMNET_ETHER)
 			val = gsi->rmnet_dtr_status;
 
 		ret = copy_to_user((void __user *)arg, &val, sizeof(val));
@@ -1207,32 +1432,43 @@
 			break;
 		}
 
-		if (gsi->prot_id == IPA_USB_DIAG &&
+		if ((gsi->prot_id == USB_PROT_DIAG_IPA ||
+				gsi->prot_id == USB_PROT_DPL_ETHER) &&
 				(gsi->d_port.in_channel_handle == -EINVAL)) {
 			ret = -EAGAIN;
 			break;
 		}
 
-		if (gsi->d_port.in_channel_handle == -EINVAL &&
-			gsi->d_port.out_channel_handle == -EINVAL) {
-			ret = -EAGAIN;
-			break;
+		if (gsi->prot_id != USB_PROT_GPS_CTRL) {
+			if (gsi->d_port.in_channel_handle == -EINVAL &&
+				gsi->d_port.out_channel_handle == -EINVAL) {
+				ret = -EAGAIN;
+				break;
+			}
+			info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
+			info.ph_ep_info.peripheral_iface_id = gsi->data_id;
+		} else {
+			info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
+			info.ph_ep_info.peripheral_iface_id = gsi->ctrl_id;
 		}
 
-		info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
-		info.ph_ep_info.peripheral_iface_id = gsi->data_id;
-		info.ipa_ep_pair.cons_pipe_num =
-		(gsi->prot_id == IPA_USB_DIAG) ? -1 :
-				gsi->d_port.out_channel_handle;
-		info.ipa_ep_pair.prod_pipe_num = gsi->d_port.in_channel_handle;
-
 		log_event_dbg("%s: prot id :%d ep_type:%d intf:%d",
 				__func__, gsi->prot_id, info.ph_ep_info.ep_type,
 				info.ph_ep_info.peripheral_iface_id);
+		if (gsi->prot_id != USB_PROT_GPS_CTRL) {
+			info.ipa_ep_pair.cons_pipe_num =
+			(gsi->prot_id == USB_PROT_DIAG_IPA ||
+				gsi->prot_id == USB_PROT_DPL_ETHER) ? -1 :
+					gsi->d_port.out_channel_handle;
+			info.ipa_ep_pair.prod_pipe_num =
+					gsi->d_port.in_channel_handle;
 
-		log_event_dbg("%s: ipa_cons_idx:%d ipa_prod_idx:%d",
-				__func__, info.ipa_ep_pair.cons_pipe_num,
-				info.ipa_ep_pair.prod_pipe_num);
+
+			log_event_dbg("%s: ipa_cons_idx:%d ipa_prod_idx:%d",
+					__func__,
+					info.ipa_ep_pair.cons_pipe_num,
+					info.ipa_ep_pair.prod_pipe_num);
+		}
 
 		ret = copy_to_user((void __user *)arg, &info,
 			sizeof(info));
@@ -1328,8 +1564,8 @@
 static int gsi_function_ctrl_port_init(struct f_gsi *gsi)
 {
 	int ret;
+	char *cdev_name = NULL;
 	int sz = GSI_CTRL_NAME_LEN;
-	bool ctrl_dev_create = true;
 
 	INIT_LIST_HEAD(&gsi->c_port.cpkt_req_q);
 	INIT_LIST_HEAD(&gsi->c_port.cpkt_resp_q);
@@ -1338,17 +1574,33 @@
 
 	init_waitqueue_head(&gsi->c_port.read_wq);
 
-	if (gsi->prot_id == IPA_USB_RMNET)
-		strlcat(gsi->c_port.name, GSI_RMNET_CTRL_NAME, sz);
-	else if (gsi->prot_id == IPA_USB_MBIM)
-		strlcat(gsi->c_port.name, GSI_MBIM_CTRL_NAME, sz);
-	else if (gsi->prot_id == IPA_USB_DIAG)
-		strlcat(gsi->c_port.name, GSI_DPL_CTRL_NAME, sz);
-	else
-		ctrl_dev_create = false;
+	switch (gsi->prot_id) {
+	case USB_PROT_RMNET_IPA:
+		cdev_name = GSI_RMNET_CTRL_NAME;
+		break;
+	case USB_PROT_RMNET_ETHER:
+		cdev_name = ETHER_RMNET_CTRL_NAME;
+		break;
+	case USB_PROT_MBIM_IPA:
+		cdev_name = GSI_MBIM_CTRL_NAME;
+		break;
+	case USB_PROT_DIAG_IPA:
+		cdev_name = GSI_DPL_CTRL_NAME;
+		break;
+	case USB_PROT_DPL_ETHER:
+		cdev_name = ETHER_DPL_CTRL_NAME;
+		break;
+	case USB_PROT_GPS_CTRL:
+		cdev_name = GSI_GPS_CTRL_NAME;
+		break;
+	default:
+		break;
+	}
 
-	if (!ctrl_dev_create)
+	if (!cdev_name)
 		return 0;
+	else
+		strlcat(gsi->c_port.name, cdev_name, sz);
 
 	gsi->c_port.ctrl_device.name = gsi->c_port.name;
 	gsi->c_port.ctrl_device.fops = &gsi_ctrl_dev_fops;
@@ -1419,6 +1671,12 @@
 	} else {
 		log_event_dbg("%s: posting HOST_READY\n", __func__);
 		post_event(d_port, EVT_HOST_READY);
+		/*
+		 * If host supports flow control with RNDIS_MSG_INIT then
+		 * mark the flag to true. This flag will be used further to
+		 * enable the flow control on resume path.
+		 */
+		gsi->host_supports_flow_control = true;
 	}
 
 	queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
@@ -1512,7 +1770,7 @@
 		event->wValue = cpu_to_le16(0);
 		event->wLength = cpu_to_le16(0);
 
-		if (gsi->prot_id == IPA_USB_RNDIS) {
+		if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
 			data = req->buf;
 			data[0] = cpu_to_le32(1);
 			data[1] = cpu_to_le32(0);
@@ -1737,7 +1995,7 @@
 		/* read the request; process it later */
 		value = w_length;
 		req->context = gsi;
-		if (gsi->prot_id == IPA_USB_RNDIS)
+		if (gsi->prot_id == USB_PROT_RNDIS_IPA)
 			req->complete = gsi_rndis_command_complete;
 		else
 			req->complete = gsi_ctrl_cmd_complete;
@@ -1749,7 +2007,7 @@
 		if (w_value || w_index != id)
 			goto invalid;
 
-		if (gsi->prot_id == IPA_USB_RNDIS) {
+		if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
 			/* return the result */
 			buf = rndis_get_next_response(gsi->params, &n);
 			if (buf) {
@@ -1785,7 +2043,8 @@
 	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
 			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
 		line_state = (w_value & GSI_CTRL_DTR ? true : false);
-		if (gsi->prot_id == IPA_USB_RMNET)
+		if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+				gsi->prot_id == USB_PROT_RMNET_ETHER)
 			gsi->rmnet_dtr_status = line_state;
 		log_event_dbg("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE DTR:%d\n",
 						__func__, line_state);
@@ -1882,9 +2141,10 @@
 	struct f_gsi *gsi = func_to_gsi(f);
 
 	/* RNDIS, RMNET and DPL only support alt 0*/
-	if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RNDIS ||
-			gsi->prot_id == IPA_USB_RMNET ||
-			gsi->prot_id == IPA_USB_DIAG)
+	if (intf == gsi->ctrl_id || gsi->prot_id == USB_PROT_RNDIS_IPA ||
+			gsi->prot_id == USB_PROT_RMNET_IPA ||
+			gsi->prot_id == USB_PROT_DIAG_IPA ||
+			is_ext_prot_ether(gsi->prot_id))
 		return 0;
 	else if (intf == gsi->data_id)
 		return gsi->data_interface_up;
@@ -2003,7 +2263,8 @@
 	log_event_dbg("intf=%u, alt=%u", intf, alt);
 
 	/* Control interface has only altsetting 0 */
-	if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RMNET) {
+	if (intf == gsi->ctrl_id || gsi->prot_id == USB_PROT_RMNET_IPA ||
+				gsi->prot_id == USB_PROT_RMNET_ETHER) {
 		if (alt != 0)
 			goto fail;
 
@@ -2037,10 +2298,11 @@
 	if (intf == gsi->data_id) {
 		gsi->d_port.net_ready_trigger = false;
 		/* for rndis and rmnet alt is always 0 update alt accordingly */
-		if (gsi->prot_id == IPA_USB_RNDIS ||
-				gsi->prot_id == IPA_USB_RMNET ||
-				gsi->prot_id == IPA_USB_DIAG)
-				alt = 1;
+		if (gsi->prot_id == USB_PROT_RNDIS_IPA ||
+				gsi->prot_id == USB_PROT_RMNET_IPA ||
+				gsi->prot_id == USB_PROT_DIAG_IPA ||
+				is_ext_prot_ether(gsi->prot_id))
+			alt = 1;
 
 		if (alt > 1)
 			goto notify_ep_disable;
@@ -2067,8 +2329,9 @@
 			}
 
 			/* Configure EPs for GSI */
-			if (gsi->d_port.in_ep) {
-				if (gsi->prot_id == IPA_USB_DIAG)
+			if (gsi->d_port.in_ep &&
+				gsi->prot_id <= USB_PROT_DIAG_IPA) {
+				if (gsi->prot_id == USB_PROT_DIAG_IPA)
 					gsi->d_port.in_ep->ep_intr_num = 3;
 				else
 					gsi->d_port.in_ep->ep_intr_num = 2;
@@ -2077,7 +2340,8 @@
 						GSI_EP_OP_CONFIG);
 			}
 
-			if (gsi->d_port.out_ep) {
+			if (gsi->d_port.out_ep &&
+				gsi->prot_id <= USB_PROT_DIAG_IPA) {
 				gsi->d_port.out_ep->ep_intr_num = 1;
 				usb_gsi_ep_op(gsi->d_port.out_ep,
 					&gsi->d_port.out_request,
@@ -2086,7 +2350,17 @@
 
 			gsi->d_port.gadget = cdev->gadget;
 
-			if (gsi->prot_id == IPA_USB_RNDIS) {
+			if (is_ext_prot_ether(gsi->prot_id)) {
+				net = gether_connect(&gsi->d_port.gether_port);
+				if (IS_ERR(net)) {
+					pr_err("%s:gether_connect err:%ld\n",
+					__func__, PTR_ERR(net));
+					goto notify_ep_disable;
+				}
+				gsi->d_port.gether_port.cdc_filter = 0;
+			}
+
+			if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
 				gsi_rndis_open(gsi);
 				net = gsi_rndis_get_netdev("rndis0");
 				if (IS_ERR(net))
@@ -2098,7 +2372,7 @@
 						&gsi->d_port.cdc_filter);
 			}
 
-			if (gsi->prot_id == IPA_USB_ECM)
+			if (gsi->prot_id == USB_PROT_ECM_IPA)
 				gsi->d_port.cdc_filter = DEFAULT_FILTER;
 
 			/*
@@ -2106,7 +2380,7 @@
 			 * handler which is invoked when the host sends the
 			 * GEN_CURRENT_PACKET_FILTER message.
 			 */
-			if (gsi->prot_id != IPA_USB_RNDIS)
+			if (gsi->prot_id != USB_PROT_RNDIS_IPA)
 				post_event(&gsi->d_port,
 						EVT_CONNECT_IN_PROGRESS);
 			queue_work(gsi->d_port.ipa_usb_wq,
@@ -2127,7 +2401,8 @@
 	atomic_set(&gsi->connected, 1);
 
 	/* send 0 len pkt to qti to notify state change */
-	if (gsi->prot_id == IPA_USB_DIAG)
+	if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+				gsi->prot_id == USB_PROT_DPL_ETHER)
 		gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
 
 	return 0;
@@ -2145,10 +2420,11 @@
 
 	atomic_set(&gsi->connected, 0);
 
-	if (gsi->prot_id == IPA_USB_RNDIS)
+	if (gsi->prot_id == USB_PROT_RNDIS_IPA)
 		rndis_uninit(gsi->params);
 
-	if (gsi->prot_id == IPA_USB_RMNET)
+	if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+				gsi->prot_id == USB_PROT_RMNET_ETHER)
 		gsi->rmnet_dtr_status = false;
 
 	/* Disable Control Path */
@@ -2170,7 +2446,15 @@
 
 	gsi->data_interface_up = false;
 
+	gsi->host_supports_flow_control = false;
+
 	log_event_dbg("%s deactivated", gsi->function.name);
+
+	if (is_ext_prot_ether(gsi->prot_id)) {
+		gether_disconnect(&gsi->d_port.gether_port);
+		return;
+	}
+
 	ipa_disconnect_handler(&gsi->d_port);
 	post_event(&gsi->d_port, EVT_DISCONNECTED);
 	queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
@@ -2181,12 +2465,25 @@
 	bool block_db;
 	struct f_gsi *gsi = func_to_gsi(f);
 
-	/* Check if function is already suspended in gsi_func_suspend() */
-	if (f->func_is_suspended) {
+	/* Check if function is already suspended in gsi_func_suspend()
+	 * Or func_suspend would have bailed out earlier if func_remote_wakeup
+	 * wasn't enabled.
+	 */
+	if (f->func_is_suspended && (gsi->d_port.sm_state == STATE_SUSPENDED ||
+			gsi->d_port.sm_state == STATE_SUSPEND_IN_PROGRESS)) {
 		log_event_dbg("%s: func already suspended, return\n", __func__);
 		return;
 	}
 
+	/*
+	 * GPS doesn't use any data interface, hence bail out as there is no
+	 * GSI specific handling needed.
+	 */
+	if (gsi->prot_id == USB_PROT_GPS_CTRL) {
+		log_event_dbg("%s: suspend done\n", __func__);
+		return;
+	}
+
 	block_db = true;
 	usb_gsi_ep_op(gsi->d_port.in_ep, (void *)&block_db,
 			GSI_EP_OP_SET_CLR_BLOCK_DBL);
@@ -2216,13 +2513,20 @@
 	/* Check any pending cpkt, and queue immediately on resume */
 	gsi_ctrl_send_notification(gsi);
 
+	if (gsi->prot_id == USB_PROT_GPS_CTRL) {
+		log_event_dbg("%s: resume done\n", __func__);
+		return;
+	}
+
 	/*
 	 * Linux host does not send RNDIS_MSG_INIT or non-zero
 	 * RNDIS_MESSAGE_PACKET_FILTER after performing bus resume.
+	 * Check whether host supports flow_control are not. If yes
 	 * Trigger state machine explicitly on resume.
 	 */
-	if (gsi->prot_id == IPA_USB_RNDIS &&
-			!usb_gsi_remote_wakeup_allowed(f))
+	if (gsi->prot_id == USB_PROT_RNDIS_IPA &&
+			!usb_gsi_remote_wakeup_allowed(f) &&
+			gsi->host_supports_flow_control)
 		rndis_flow_control(gsi->params, false);
 
 	post_event(&gsi->d_port, EVT_RESUMED);
@@ -2231,6 +2535,14 @@
 	log_event_dbg("%s: completed", __func__);
 }
 
+static int gsi_get_status(struct usb_function *f)
+{
+	unsigned int remote_wakeup_en_status = f->func_wakeup_allowed ? 1 : 0;
+
+	return (remote_wakeup_en_status << FUNC_WAKEUP_ENABLE_SHIFT) |
+		(1 << FUNC_WAKEUP_CAPABLE_SHIFT);
+}
+
 static int gsi_func_suspend(struct usb_function *f, u8 options)
 {
 	bool func_wakeup_allowed;
@@ -2327,7 +2639,7 @@
 		info->data_nop_desc->bInterfaceNumber = gsi->data_id;
 
 	/* allocate instance-specific endpoints */
-	if (info->fs_in_desc) {
+	if (info->fs_in_desc && gsi->prot_id <= USB_PROT_DIAG_IPA) {
 		ep = usb_ep_autoconfig_by_name(cdev->gadget,
 				info->fs_in_desc, info->in_epname);
 		if (!ep)
@@ -2335,9 +2647,17 @@
 		gsi->d_port.in_ep = ep;
 		msm_ep_config(gsi->d_port.in_ep, NULL);
 		ep->driver_data = cdev;	/* claim */
+	} else {
+		if (info->fs_in_desc) {
+			ep = usb_ep_autoconfig(cdev->gadget, info->fs_in_desc);
+			if (!ep)
+				goto fail;
+			gsi->d_port.in_ep = ep;
+			ep->driver_data = cdev; /* claim */
+		}
 	}
 
-	if (info->fs_out_desc) {
+	if (info->fs_out_desc && gsi->prot_id <= USB_PROT_DIAG_IPA) {
 		ep = usb_ep_autoconfig_by_name(cdev->gadget,
 				info->fs_out_desc, info->out_epname);
 		if (!ep)
@@ -2345,6 +2665,14 @@
 		gsi->d_port.out_ep = ep;
 		msm_ep_config(gsi->d_port.out_ep, NULL);
 		ep->driver_data = cdev;	/* claim */
+	} else {
+		if (info->fs_out_desc) {
+			ep = usb_ep_autoconfig(cdev->gadget, info->fs_out_desc);
+			if (!ep)
+				goto fail;
+			gsi->d_port.out_ep = ep;
+			ep->driver_data = cdev; /* claim */
+		}
 	}
 
 	if (info->fs_notify_desc) {
@@ -2475,14 +2803,17 @@
 	struct gsi_function_bind_info info = {0};
 	struct f_gsi *gsi = func_to_gsi(f);
 	struct rndis_params *params;
+	struct net_device *net;
+	char *name = NULL;
 	int status;
 	__u8  class;
 	__u8  subclass;
 	__u8  proto;
 
 
-	if (gsi->prot_id == IPA_USB_RMNET ||
-		gsi->prot_id == IPA_USB_DIAG)
+	if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+		gsi->prot_id == USB_PROT_DIAG_IPA ||
+		is_ext_prot_ether(gsi->prot_id))
 		gsi->ctrl_id = -ENODEV;
 	else {
 		status = gsi->ctrl_id = usb_interface_id(c, f);
@@ -2490,12 +2821,14 @@
 			goto fail;
 	}
 
-	status = gsi->data_id = usb_interface_id(c, f);
-	if (status < 0)
-		goto fail;
+	if (gsi->prot_id != USB_PROT_GPS_CTRL) {
+		status = gsi->data_id = usb_interface_id(c, f);
+		if (status < 0)
+			goto fail;
+	}
 
 	switch (gsi->prot_id) {
-	case IPA_USB_RNDIS:
+	case USB_PROT_RNDIS_IPA:
 		info.string_defs = rndis_gsi_string_defs;
 		info.ctrl_desc = &rndis_gsi_control_intf;
 		info.ctrl_str_idx = 0;
@@ -2641,7 +2974,7 @@
 		info.ctrl_desc->bInterfaceProtocol = proto;
 
 		break;
-	case IPA_USB_MBIM:
+	case USB_PROT_MBIM_IPA:
 		info.string_defs = mbim_gsi_string_defs;
 		info.ctrl_desc = &mbim_gsi_control_intf;
 		info.ctrl_str_idx = 0;
@@ -2688,7 +3021,8 @@
 				c->bConfigurationValue + '0';
 		}
 		break;
-	case IPA_USB_RMNET:
+	case USB_PROT_RMNET_IPA:
+	case USB_PROT_RMNET_ETHER:
 		info.string_defs = rmnet_gsi_string_defs;
 		info.data_desc = &rmnet_gsi_interface_desc;
 		info.data_str_idx = 0;
@@ -2713,8 +3047,9 @@
 		info.out_req_buf_len = GSI_OUT_RMNET_BUF_LEN;
 		info.out_req_num_buf = GSI_NUM_OUT_BUFFERS;
 		info.notify_buf_len = sizeof(struct usb_cdc_notification);
+		name = "usb_rmnet";
 		break;
-	case IPA_USB_ECM:
+	case USB_PROT_ECM_IPA:
 		info.string_defs = ecm_gsi_string_defs;
 		info.ctrl_desc = &ecm_gsi_control_intf;
 		info.ctrl_str_idx = 0;
@@ -2763,7 +3098,8 @@
 		gsi->d_port.ipa_init_params.host_ethaddr[5]);
 		info.string_defs[1].s = gsi->ethaddr;
 		break;
-	case IPA_USB_DIAG:
+	case USB_PROT_DIAG_IPA:
+	case USB_PROT_DPL_ETHER:
 		info.string_defs = qdss_gsi_string_defs;
 		info.data_desc = &qdss_gsi_data_intf_desc;
 		info.data_str_idx = 0;
@@ -2778,6 +3114,19 @@
 		info.in_req_buf_len = 16384;
 		info.in_req_num_buf = GSI_NUM_IN_BUFFERS;
 		info.notify_buf_len = sizeof(struct usb_cdc_notification);
+		name = "dpl_usb";
+		break;
+	case USB_PROT_GPS_CTRL:
+		info.string_defs = gps_string_defs;
+		info.ctrl_str_idx = 0;
+		info.ctrl_desc = &gps_interface_desc;
+		info.fs_notify_desc = &gps_fs_notify_desc;
+		info.hs_notify_desc = &gps_hs_notify_desc;
+		info.ss_notify_desc = &gps_ss_notify_desc;
+		info.fs_desc_hdr = gps_fs_function;
+		info.hs_desc_hdr = gps_hs_function;
+		info.ss_desc_hdr = gps_ss_function;
+		info.notify_buf_len = sizeof(struct usb_cdc_notification);
 		break;
 	default:
 		log_event_err("%s: Invalid prot id %d", __func__,
@@ -2789,6 +3138,32 @@
 	if (status)
 		goto dereg_rndis;
 
+	if (gsi->prot_id == USB_PROT_GPS_CTRL)
+		goto skip_ipa_init;
+
+	if (is_ext_prot_ether(gsi->prot_id)) {
+		if (!name)
+			return -EINVAL;
+
+		gsi->d_port.gether_port.in_ep = gsi->d_port.in_ep;
+		gsi->d_port.gether_port.out_ep = gsi->d_port.out_ep;
+			net = gether_setup_name_default(name);
+			if (IS_ERR(net)) {
+				pr_err("%s: gether_setup failed\n", __func__);
+				return PTR_ERR(net);
+			}
+			gsi->d_port.gether_port.ioport = netdev_priv(net);
+			gether_set_gadget(net, c->cdev->gadget);
+			status = gether_register_netdev(net);
+			if (status < 0) {
+				pr_err("%s: gether_register_netdev failed\n",
+					__func__);
+				free_netdev(net);
+				return status;
+			}
+		goto skip_ipa_init;
+	}
+
 	status = ipa_register_ipa_ready_cb(ipa_ready_callback, gsi);
 	if (!status) {
 		log_event_info("%s: ipa is not ready", __func__);
@@ -2814,6 +3189,7 @@
 
 	gsi->d_port.sm_state = STATE_INITIALIZED;
 
+skip_ipa_init:
 	DBG(cdev, "%s: %s speed IN/%s OUT/%s NOTIFY/%s\n",
 			f->name,
 			gadget_is_superspeed(c->cdev->gadget) ? "super" :
@@ -2836,6 +3212,12 @@
 {
 	struct f_gsi *gsi = func_to_gsi(f);
 
+	if (is_ext_prot_ether(gsi->prot_id)) {
+		gether_cleanup(gsi->d_port.gether_port.ioport);
+		gsi->d_port.gether_port.ioport = NULL;
+		goto skip_ipa_dinit;
+	}
+
 	/*
 	 * Use drain_workqueue to accomplish below conditions:
 	 * 1. Make sure that any running work completed
@@ -2847,12 +3229,13 @@
 	drain_workqueue(gsi->d_port.ipa_usb_wq);
 	ipa_usb_deinit_teth_prot(gsi->prot_id);
 
-	if (gsi->prot_id == IPA_USB_RNDIS) {
+skip_ipa_dinit:
+	if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
 		gsi->d_port.sm_state = STATE_UNINITIALIZED;
 		rndis_deregister(gsi->params);
 	}
 
-	if (gsi->prot_id == IPA_USB_MBIM)
+	if (gsi->prot_id == USB_PROT_MBIM_IPA)
 		mbim_gsi_ext_config_desc.function.subCompatibleID[0] = 0;
 
 	if (gadget_is_superspeed(c->cdev->gadget)) {
@@ -2883,33 +3266,38 @@
 static int gsi_bind_config(struct f_gsi *gsi)
 {
 	int status = 0;
-	enum ipa_usb_teth_prot prot_id = gsi->prot_id;
 
-	log_event_dbg("%s: prot id %d", __func__, prot_id);
+	log_event_dbg("%s: prot id %d", __func__, gsi->prot_id);
 
-	switch (prot_id) {
-	case IPA_USB_RNDIS:
+	switch (gsi->prot_id) {
+	case USB_PROT_RNDIS_IPA:
 		gsi->function.name = "rndis";
 		gsi->function.strings = rndis_gsi_strings;
 		break;
-	case IPA_USB_ECM:
+	case USB_PROT_ECM_IPA:
 		gsi->function.name = "cdc_ethernet";
 		gsi->function.strings = ecm_gsi_strings;
 		break;
-	case IPA_USB_RMNET:
+	case USB_PROT_RMNET_IPA:
+	case USB_PROT_RMNET_ETHER:
 		gsi->function.name = "rmnet";
 		gsi->function.strings = rmnet_gsi_strings;
 		break;
-	case IPA_USB_MBIM:
+	case USB_PROT_MBIM_IPA:
 		gsi->function.name = "mbim";
 		gsi->function.strings = mbim_gsi_strings;
 		break;
-	case IPA_USB_DIAG:
+	case USB_PROT_DIAG_IPA:
+	case USB_PROT_DPL_ETHER:
 		gsi->function.name = "dpl";
 		gsi->function.strings = qdss_gsi_strings;
 		break;
+	case USB_PROT_GPS_CTRL:
+		gsi->function.name = "gps";
+		gsi->function.strings = gps_strings;
+		break;
 	default:
-		log_event_err("%s: invalid prot id %d", __func__, prot_id);
+		log_event_err("%s: invalid prot id %d", __func__, gsi->prot_id);
 		return -EINVAL;
 	}
 
@@ -2922,6 +3310,7 @@
 	gsi->function.disable = gsi_disable;
 	gsi->function.free_func = gsi_free_func;
 	gsi->function.suspend = gsi_suspend;
+	gsi->function.get_status = gsi_get_status;
 	gsi->function.func_suspend = gsi_func_suspend;
 	gsi->function.resume = gsi_resume;
 
@@ -2950,6 +3339,9 @@
 
 	gsi->d_port.ipa_usb_wq = ipa_usb_wq;
 
+	gsi->gsi_rw_timer_interval = DEFAULT_RW_TIMER_INTERVAL;
+	setup_timer(&gsi->gsi_rw_timer, gsi_rw_timer_func, (unsigned long) gsi);
+
 	return gsi;
 }
 
@@ -3164,7 +3556,7 @@
 static int gsi_set_inst_name(struct usb_function_instance *fi,
 	const char *name)
 {
-	int prot_id, name_len, ret = 0;
+	int name_len, prot_id, ret = 0;
 	struct gsi_opts *opts;
 	struct f_gsi *gsi;
 
@@ -3181,7 +3573,7 @@
 		return -EINVAL;
 	}
 
-	if (prot_id == IPA_USB_RNDIS)
+	if (prot_id == USB_PROT_RNDIS_IPA)
 		config_group_init_type_name(&opts->func_inst.group, "",
 					    &gsi_func_rndis_type);
 
@@ -3249,7 +3641,7 @@
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) {
+	for (i = 0; i < USB_PROT_MAX; i++) {
 		__gsi[i] = gsi_function_init();
 		if (IS_ERR(__gsi[i]))
 			return PTR_ERR(__gsi[i]);
@@ -3259,6 +3651,7 @@
 	if (!ipc_log_ctxt)
 		pr_err("%s: Err allocating ipc_log_ctxt\n", __func__);
 
+	usb_gsi_debugfs_init();
 	return usb_function_register(&gsiusb_func);
 }
 module_init(fgsi_init);
@@ -3272,9 +3665,10 @@
 	if (ipc_log_ctxt)
 		ipc_log_context_destroy(ipc_log_ctxt);
 
-	for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+	for (i = 0; i < USB_PROT_MAX; i++)
 		kfree(__gsi[i]);
 
+	usb_gsi_debugfs_exit();
 	usb_function_unregister(&gsiusb_func);
 }
 module_exit(fgsi_exit);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index c6e64fd..c9beb91 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -26,10 +26,17 @@
 #include <linux/debugfs.h>
 #include <linux/ipa_usb.h>
 #include <linux/ipc_logging.h>
+#include <linux/timer.h>
+
+#include "u_ether.h"
 
 #define GSI_RMNET_CTRL_NAME "rmnet_ctrl"
 #define GSI_MBIM_CTRL_NAME "android_mbim"
 #define GSI_DPL_CTRL_NAME "dpl_ctrl"
+#define ETHER_RMNET_CTRL_NAME "rmnet_ctrl0"
+#define ETHER_DPL_CTRL_NAME "dpl_ctrl0"
+#define GSI_GPS_CTRL_NAME "gps"
+
 #define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
 #define GSI_MAX_CTRL_PKT_SIZE 4096
 #define GSI_CTRL_DTR (1 << 0)
@@ -114,6 +121,22 @@
 	RNDIS_ID_MAX,
 };
 
+enum usb_prot_id {
+	/* accelerated: redefined from ipa_usb.h, do not change order */
+	USB_PROT_RNDIS_IPA,
+	USB_PROT_ECM_IPA,
+	USB_PROT_RMNET_IPA,
+	USB_PROT_MBIM_IPA,
+	USB_PROT_DIAG_IPA,
+
+	/* non-accelerated */
+	USB_PROT_RMNET_ETHER,
+	USB_PROT_DPL_ETHER,
+	USB_PROT_GPS_CTRL,
+
+	USB_PROT_MAX,
+};
+
 #define MAXQUEUELEN 128
 struct event_queue {
 	u8 event[MAXQUEUELEN];
@@ -228,6 +251,7 @@
 	enum connection_state sm_state;
 	struct event_queue evt_q;
 	wait_queue_head_t wait_for_ipa_ready;
+	struct gether gether_port;
 
 	/* Track these for debugfs */
 	struct ipa_usb_xdci_chan_params ipa_in_channel_params;
@@ -237,7 +261,7 @@
 
 struct f_gsi {
 	struct usb_function function;
-	enum ipa_usb_teth_prot prot_id;
+	enum usb_prot_id prot_id;
 	int ctrl_id;
 	int data_id;
 	u32 vendorID;
@@ -254,6 +278,13 @@
 	struct gsi_data_port d_port;
 	struct gsi_ctrl_port c_port;
 	bool rmnet_dtr_status;
+
+	/* To test remote wakeup using debugfs */
+	struct timer_list gsi_rw_timer;
+	u8 debugfs_rw_timer_enable;
+	u16 gsi_rw_timer_interval;
+
+	bool host_supports_flow_control;
 };
 
 static inline struct f_gsi *func_to_gsi(struct usb_function *f)
@@ -285,21 +316,27 @@
 			    func_inst.group);
 }
 
-static enum ipa_usb_teth_prot name_to_prot_id(const char *name)
+static int name_to_prot_id(const char *name)
 {
 	if (!name)
 		goto error;
 
 	if (!strncasecmp(name, "rndis", MAX_INST_NAME_LEN))
-		return IPA_USB_RNDIS;
+		return USB_PROT_RNDIS_IPA;
 	if (!strncasecmp(name, "ecm", MAX_INST_NAME_LEN))
-		return IPA_USB_ECM;
+		return USB_PROT_ECM_IPA;
 	if (!strncasecmp(name, "rmnet", MAX_INST_NAME_LEN))
-		return IPA_USB_RMNET;
+		return USB_PROT_RMNET_IPA;
 	if (!strncasecmp(name, "mbim", MAX_INST_NAME_LEN))
-		return IPA_USB_MBIM;
+		return USB_PROT_MBIM_IPA;
 	if (!strncasecmp(name, "dpl", MAX_INST_NAME_LEN))
-		return IPA_USB_DIAG;
+		return USB_PROT_DIAG_IPA;
+	if (!strncasecmp(name, "rmnet.ether", MAX_INST_NAME_LEN))
+		return USB_PROT_RMNET_ETHER;
+	if (!strncasecmp(name, "dpl.ether", MAX_INST_NAME_LEN))
+		return USB_PROT_DPL_ETHER;
+	if (!strncasecmp(name, "gps", MAX_INST_NAME_LEN))
+		return USB_PROT_GPS_CTRL;
 
 error:
 	return -EINVAL;
@@ -309,6 +346,7 @@
 
 #define LOG2_STATUS_INTERVAL_MSEC 5
 #define MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+#define GPS_MAX_NOTIFY_SIZE 64
 
 /* rmnet device descriptors */
 
@@ -1399,4 +1437,91 @@
 	&qdss_gsi_string_table,
 	NULL,
 };
+
+/* gps device descriptor */
+static struct usb_interface_descriptor gps_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor gps_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_descriptor_header *gps_fs_function[] = {
+	(struct usb_descriptor_header *) &gps_interface_desc,
+	(struct usb_descriptor_header *) &gps_fs_notify_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor gps_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_descriptor_header *gps_hs_function[] = {
+	(struct usb_descriptor_header *) &gps_interface_desc,
+	(struct usb_descriptor_header *) &gps_hs_notify_desc,
+	NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor gps_ss_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor gps_ss_notify_comp_desc = {
+	.bLength =		sizeof(gps_ss_notify_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+};
+
+static struct usb_descriptor_header *gps_ss_function[] = {
+	(struct usb_descriptor_header *) &gps_interface_desc,
+	(struct usb_descriptor_header *) &gps_ss_notify_desc,
+	(struct usb_descriptor_header *) &gps_ss_notify_comp_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string gps_string_defs[] = {
+	[0].s = "GPS",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings gps_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		gps_string_defs,
+};
+
+static struct usb_gadget_strings *gps_strings[] = {
+	&gps_string_table,
+	NULL,
+};
 #endif
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 1979156..4b4ce0e 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -413,7 +413,8 @@
 		if (err) {
 			ERROR(midi, "%s: couldn't enqueue request: %d\n",
 				    midi->out_ep->name, err);
-			free_ep_req(midi->out_ep, req);
+			if (req->buf != NULL)
+				free_ep_req(midi->out_ep, req);
 			return err;
 		}
 	}
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 651776d..79ef286 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -137,6 +137,9 @@
 	} perf[MAX_ITERATION];
 	unsigned int dbg_read_index;
 	unsigned int dbg_write_index;
+	unsigned int mtp_rx_req_len;
+	unsigned int mtp_tx_req_len;
+	unsigned int mtp_tx_reqs;
 	struct mutex  read_mutex;
 };
 
@@ -531,16 +534,16 @@
 
 retry_tx_alloc:
 	/* now allocate requests for our endpoints */
-	for (i = 0; i < mtp_tx_reqs; i++) {
+	for (i = 0; i < dev->mtp_tx_reqs; i++) {
 		req = mtp_request_new(dev->ep_in,
-				mtp_tx_req_len + extra_buf_alloc);
+				dev->mtp_tx_req_len + extra_buf_alloc);
 		if (!req) {
-			if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
+			if (dev->mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
 				goto fail;
 			while ((req = mtp_req_get(dev, &dev->tx_idle)))
 				mtp_request_free(req, dev->ep_in);
-			mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
-			mtp_tx_reqs = MTP_TX_REQ_MAX;
+			dev->mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
+			dev->mtp_tx_reqs = MTP_TX_REQ_MAX;
 			goto retry_tx_alloc;
 		}
 		req->complete = mtp_complete_in;
@@ -553,18 +556,18 @@
 	 * operational speed.  Hence assuming super speed max
 	 * packet size.
 	 */
-	if (mtp_rx_req_len % 1024)
-		mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+	if (dev->mtp_rx_req_len % 1024)
+		dev->mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
 
 retry_rx_alloc:
 	for (i = 0; i < RX_REQ_MAX; i++) {
-		req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
+		req = mtp_request_new(dev->ep_out, dev->mtp_rx_req_len);
 		if (!req) {
-			if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
+			if (dev->mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
 				goto fail;
 			for (--i; i >= 0; i--)
 				mtp_request_free(dev->rx_req[i], dev->ep_out);
-			mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+			dev->mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
 			goto retry_rx_alloc;
 		}
 		req->complete = mtp_complete_out;
@@ -609,11 +612,21 @@
 	}
 
 	len = ALIGN(count, dev->ep_out->maxpacket);
-	if (len > mtp_rx_req_len)
+	if (len > dev->mtp_rx_req_len)
 		return -EINVAL;
 
 	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_OFFLINE) {
+		spin_unlock_irq(&dev->lock);
+		return -ENODEV;
+	}
+
 	if (dev->ep_out->desc) {
+		if (!cdev) {
+			spin_unlock_irq(&dev->lock);
+			return -ENODEV;
+		}
+
 		len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
 		if (len > MTP_BULK_BUFFER_SIZE) {
 			spin_unlock_irq(&dev->lock);
@@ -750,8 +763,8 @@
 			break;
 		}
 
-		if (count > mtp_tx_req_len)
-			xfer = mtp_tx_req_len;
+		if (count > dev->mtp_tx_req_len)
+			xfer = dev->mtp_tx_req_len;
 		else
 			xfer = count;
 		if (xfer && copy_from_user(req->buf, buf, xfer)) {
@@ -847,8 +860,8 @@
 			break;
 		}
 
-		if (count > mtp_tx_req_len)
-			xfer = mtp_tx_req_len;
+		if (count > dev->mtp_tx_req_len)
+			xfer = dev->mtp_tx_req_len;
 		else
 			xfer = count;
 
@@ -944,7 +957,7 @@
 			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
 
 			/* some h/w expects size to be aligned to ep's MTU */
-			read_req->length = mtp_rx_req_len;
+			read_req->length = dev->mtp_rx_req_len;
 
 			dev->rx_done = 0;
 			mutex_unlock(&dev->read_mutex);
@@ -1421,6 +1434,9 @@
 		mtp_tx_req_len = 16384;
 	}
 
+	dev->mtp_rx_req_len = mtp_rx_req_len;
+	dev->mtp_tx_req_len = mtp_tx_req_len;
+	dev->mtp_tx_reqs = mtp_tx_reqs;
 	/* allocate interface ID(s) */
 	id = usb_interface_id(c, f);
 	if (id < 0)
@@ -1498,7 +1514,10 @@
 	while ((req = mtp_req_get(dev, &dev->intr_idle)))
 		mtp_request_free(req, dev->ep_intr);
 	mutex_unlock(&dev->read_mutex);
+	spin_lock_irq(&dev->lock);
 	dev->state = STATE_OFFLINE;
+	dev->cdev = NULL;
+	spin_unlock_irq(&dev->lock);
 	kfree(f->os_desc_table);
 	f->os_desc_n = 0;
 	fi_mtp->func_inst.f = NULL;
@@ -1554,7 +1573,9 @@
 	struct usb_composite_dev	*cdev = dev->cdev;
 
 	DBG(cdev, "mtp_function_disable\n");
+	spin_lock_irq(&dev->lock);
 	dev->state = STATE_OFFLINE;
+	spin_unlock_irq(&dev->lock);
 	usb_ep_disable(dev->ep_in);
 	usb_ep_disable(dev->ep_out);
 	usb_ep_disable(dev->ep_intr);
@@ -1581,7 +1602,7 @@
 		seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
 				dev->perf[i].vfs_wbytes,
 				dev->perf[i].vfs_wtime);
-		if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
+		if (dev->perf[i].vfs_wbytes == dev->mtp_rx_req_len) {
 			sum += dev->perf[i].vfs_wtime;
 			if (min > dev->perf[i].vfs_wtime)
 				min = dev->perf[i].vfs_wtime;
@@ -1603,7 +1624,7 @@
 		seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
 				dev->perf[i].vfs_rbytes,
 				dev->perf[i].vfs_rtime);
-		if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
+		if (dev->perf[i].vfs_rbytes == dev->mtp_tx_req_len) {
 			sum += dev->perf[i].vfs_rtime;
 			if (min > dev->perf[i].vfs_rtime)
 				min = dev->perf[i].vfs_rtime;
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 0ce2c407..312ae24 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -1205,7 +1205,7 @@
 	return &usb_qdss->port.function;
 }
 
-DECLARE_USB_FUNCTION_INIT(qdss, qdss_alloc_inst, qdss_alloc);
+DECLARE_USB_FUNCTION(qdss, qdss_alloc_inst, qdss_alloc);
 static int __init usb_qdss_init(void)
 {
 	int ret;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index bc4f9f7..24bc2d4 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -386,10 +386,31 @@
 					struct sk_buff *skb)
 {
 	struct sk_buff *skb2;
+	struct rndis_packet_msg_type *header = NULL;
+	struct f_rndis *rndis = func_to_rndis(&port->func);
 
 	if (!skb)
 		return NULL;
 
+	if (rndis->port.multi_pkt_xfer) {
+		if (port->header) {
+			header = port->header;
+			header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET);
+			header->MessageLength = cpu_to_le32(skb->len +
+							sizeof(*header));
+			header->DataOffset = cpu_to_le32(36);
+			header->DataLength = cpu_to_le32(skb->len);
+			pr_debug("MessageLength:%d DataLength:%d\n",
+						header->MessageLength,
+						header->DataLength);
+			return skb;
+		}
+
+		dev_kfree_skb(skb);
+		pr_err("RNDIS header is NULL.\n");
+		return NULL;
+	}
+
 	skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
 	rndis_add_hdr(skb2);
 
@@ -767,7 +788,7 @@
 	 */
 	if (!rndis_opts->bound) {
 		mutex_lock(&rndis_opts->lock);
-		rndis_opts->net = gether_setup_default();
+		rndis_opts->net = gether_setup_name_default("rndis");
 		if (IS_ERR(rndis_opts->net)) {
 			status = PTR_ERR(rndis_opts->net);
 			mutex_unlock(&rndis_opts->lock);
@@ -818,6 +839,27 @@
 	rndis_data_intf.bInterfaceNumber = status;
 	rndis_union_desc.bSlaveInterface0 = status;
 
+	if (rndis_opts->wceis) {
+		/* "Wireless" RNDIS; auto-detected by Windows */
+		rndis_iad_descriptor.bFunctionClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_iad_descriptor.bFunctionSubClass = 0x01;
+		rndis_iad_descriptor.bFunctionProtocol = 0x03;
+		rndis_control_intf.bInterfaceClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_control_intf.bInterfaceSubClass =	 0x01;
+		rndis_control_intf.bInterfaceProtocol =	 0x03;
+	} else {
+		rndis_iad_descriptor.bFunctionClass = USB_CLASS_COMM;
+		rndis_iad_descriptor.bFunctionSubClass =
+						USB_CDC_SUBCLASS_ETHERNET;
+		rndis_iad_descriptor.bFunctionProtocol = USB_CDC_PROTO_NONE;
+		rndis_control_intf.bInterfaceClass = USB_CLASS_COMM;
+		rndis_control_intf.bInterfaceSubClass =	USB_CDC_SUBCLASS_ACM;
+		rndis_control_intf.bInterfaceProtocol =
+						USB_CDC_ACM_PROTO_VENDOR;
+	}
+
 	status = -ENODEV;
 
 	/* allocate instance-specific endpoints */
@@ -950,11 +992,15 @@
 /* f_rndis_opts_ifname */
 USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(rndis);
 
+/* f_rndis_opts_wceis */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(rndis);
+
 static struct configfs_attribute *rndis_attrs[] = {
 	&rndis_opts_attr_dev_addr,
 	&rndis_opts_attr_host_addr,
 	&rndis_opts_attr_qmult,
 	&rndis_opts_attr_ifname,
+	&rndis_opts_attr_wceis,
 	NULL,
 };
 
@@ -1008,6 +1054,9 @@
 	}
 	opts->rndis_interf_group = rndis_interf_group;
 
+	/* Enable "Wireless" RNDIS by default */
+	opts->wceis = true;
+
 	return &opts->func_inst;
 }
 
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index c330814..626e020 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1156,6 +1156,8 @@
 		dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
 		return ret;
 	}
+	iad_desc.bFirstInterface = ret;
+
 	std_ac_if_desc.bInterfaceNumber = ret;
 	iad_desc.bFirstInterface = ret;
 	agdev->ac_intf = ret;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 9e38809..c75e790 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -246,6 +246,7 @@
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
+static void tx_complete(struct usb_ep *ep, struct usb_request *req);
 
 static int
 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
@@ -307,7 +308,6 @@
 
 	req->buf = skb->data;
 	req->length = size;
-	req->complete = rx_complete;
 	req->context = skb;
 
 	retval = usb_ep_queue(out, req, gfp_flags);
@@ -410,6 +410,7 @@
 {
 	unsigned		i;
 	struct usb_request	*req;
+	bool			usb_in;
 
 	if (!n)
 		return -ENOMEM;
@@ -420,10 +421,22 @@
 		if (i-- == 0)
 			goto extra;
 	}
+
+	if (ep->desc->bEndpointAddress & USB_DIR_IN)
+		usb_in = true;
+	else
+		usb_in = false;
+
 	while (i--) {
 		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
 		if (!req)
 			return list_empty(list) ? -ENOMEM : 0;
+		/* update completion handler */
+		if (usb_in)
+			req->complete = tx_complete;
+		else
+			req->complete = rx_complete;
+
 		list_add(&req->list, list);
 	}
 	return 0;
@@ -575,7 +588,7 @@
 
 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-	struct sk_buff	*skb = req->context;
+	struct sk_buff	*skb;
 	struct eth_dev	*dev = ep->driver_data;
 	struct net_device *net = dev->net;
 	struct usb_request *new_req;
@@ -683,6 +696,7 @@
 			spin_unlock(&dev->req_lock);
 		}
 	} else {
+		skb = req->context;
 		/* Is aggregation already enabled and buffers allocated ? */
 		if (dev->port_usb->multi_pkt_xfer && dev->tx_req_bufsize) {
 			req->buf = kzalloc(dev->tx_req_bufsize
@@ -725,7 +739,7 @@
 	list_for_each(act, &dev->tx_reqs) {
 		req = container_of(act, struct usb_request, list);
 		if (!req->buf)
-			req->buf = kmalloc(dev->tx_req_bufsize
+			req->buf = kzalloc(dev->tx_req_bufsize
 				+ dev->gadget->extra_buf_alloc, GFP_ATOMIC);
 
 		if (!req->buf)
@@ -804,29 +818,6 @@
 	}
 
 	dev->tx_pkts_rcvd++;
-	/*
-	 * no buffer copies needed, unless the network stack did it
-	 * or the hardware can't use skb buffers.
-	 * or there's not enough space for extra headers we need
-	 */
-	spin_lock_irqsave(&dev->lock, flags);
-	if (dev->wrap && dev->port_usb)
-		skb = dev->wrap(dev->port_usb, skb);
-	spin_unlock_irqrestore(&dev->lock, flags);
-
-	if (!skb) {
-		if (dev->port_usb && dev->port_usb->supports_multi_frame) {
-			/*
-			 * Multi frame CDC protocols may store the frame for
-			 * later which is not a dropped frame.
-			 */
-		} else {
-			dev->net->stats.tx_dropped++;
-		}
-
-		/* no error code for dropped packets */
-		return NETDEV_TX_OK;
-	}
 
 	/* Allocate memory for tx_reqs to support multi packet transfer */
 	spin_lock_irqsave(&dev->req_lock, flags);
@@ -861,13 +852,45 @@
 		dev->tx_throttle++;
 		netif_stop_queue(net);
 	}
-
-	dev->tx_skb_hold_count++;
 	spin_unlock_irqrestore(&dev->req_lock, flags);
 
+	/* no buffer copies needed, unless the network stack did it
+	 * or the hardware can't use skb buffers.
+	 * or there's not enough space for extra headers we need
+	 */
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->wrap) {
+		if (dev->port_usb)
+			skb = dev->wrap(dev->port_usb, skb);
+		if (!skb) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			/* Multi frame CDC protocols may store the frame for
+			 * later which is not a dropped frame.
+			 */
+			if (dev->port_usb &&
+					dev->port_usb->supports_multi_frame)
+				goto multiframe;
+			goto drop;
+		}
+	}
+
+	dev->tx_skb_hold_count++;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
 	if (multi_pkt_xfer) {
+		pr_debug("req->length:%d header_len:%u\n"
+				"skb->len:%d skb->data_len:%d\n",
+				req->length, dev->header_len,
+				skb->len, skb->data_len);
+		/* Add RNDIS Header */
+		memcpy(req->buf + req->length, dev->port_usb->header,
+						dev->header_len);
+		/* Increment req length by header size */
+		req->length += dev->header_len;
+		/* Copy received IP data from SKB */
 		memcpy(req->buf + req->length, skb->data, skb->len);
-		req->length = req->length + skb->len;
+		/* Increment req length by skb data length */
+		req->length += skb->len;
 		length = req->length;
 		dev_kfree_skb_any(skb);
 
@@ -927,8 +950,6 @@
 		req->context = skb;
 	}
 
-	req->complete = tx_complete;
-
 	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
 	if (is_fixed && length == fixed_in_len &&
 	    (length % in->maxpacket) == 0)
@@ -977,11 +998,13 @@
 			dev_kfree_skb_any(skb);
 		else
 			req->length = 0;
+drop:
 		dev->net->stats.tx_dropped++;
+multiframe:
 		spin_lock_irqsave(&dev->req_lock, flags);
 		if (list_empty(&dev->tx_reqs))
 			netif_start_queue(net);
-		list_add(&req->list, &dev->tx_reqs);
+		list_add_tail(&req->list, &dev->tx_reqs);
 		spin_unlock_irqrestore(&dev->req_lock, flags);
 	}
 success:
@@ -1443,6 +1466,8 @@
 	else
 		INFO(dev, "MAC %pM\n", dev->dev_mac);
 
+	uether_debugfs_init(dev);
+
 	return status;
 }
 EXPORT_SYMBOL_GPL(gether_register_netdev);
@@ -1594,6 +1619,11 @@
 	if (!dev)
 		return ERR_PTR(-EINVAL);
 
+	/* size of rndis_packet_msg_type is 44 */
+	link->header = kzalloc(44, GFP_ATOMIC);
+	if (!link->header)
+		return ERR_PTR(-ENOMEM);
+
 	if (link->in_ep) {
 		link->in_ep->driver_data = dev;
 		result = usb_ep_enable(link->in_ep);
@@ -1657,8 +1687,12 @@
 	}
 fail0:
 	/* caller is responsible for cleanup on error */
-	if (result < 0)
+	if (result < 0) {
+		kfree(link->header);
+
 		return ERR_PTR(result);
+	}
+
 	return dev->net;
 }
 EXPORT_SYMBOL_GPL(gether_connect);
@@ -1711,6 +1745,9 @@
 			spin_lock(&dev->req_lock);
 		}
 		spin_unlock(&dev->req_lock);
+		/* Free rndis header buffer memory */
+		kfree(link->header);
+		link->header = NULL;
 		link->in_ep->desc = NULL;
 	}
 
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 3ff09d1..a10503a 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -87,6 +87,7 @@
 	/* called on network open/close */
 	void				(*open)(struct gether *);
 	void				(*close)(struct gether *);
+	struct rndis_packet_msg_type	*header;
 };
 
 #define	DEFAULT_FILTER	(USB_CDC_PACKET_TYPE_BROADCAST \
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index 0468459..1dd2ff4 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -188,4 +188,50 @@
 									\
 	CONFIGFS_ATTR_RO(_f_##_opts_, ifname)
 
+#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(_f_)			\
+	static ssize_t _f_##_opts_wceis_show(struct config_item *item,	\
+					     char *page)		\
+	{								\
+		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
+		bool wceis;						\
+									\
+		if (opts->bound == false) {				\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;					\
+		}							\
+									\
+		mutex_lock(&opts->lock);				\
+		wceis = opts->wceis;					\
+		mutex_unlock(&opts->lock);				\
+		return snprintf(page, PAGE_SIZE, "%d", wceis);		\
+	}								\
+									\
+	static ssize_t _f_##_opts_wceis_store(struct config_item *item, \
+					      const char *page, size_t len)\
+	{								\
+		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
+		bool wceis;						\
+		int ret;						\
+									\
+		if (opts->bound == false) {				\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;					\
+		}							\
+									\
+		mutex_lock(&opts->lock);				\
+									\
+		ret = kstrtobool(page, &wceis);				\
+		if (ret)						\
+			goto out;					\
+									\
+		opts->wceis = wceis;					\
+		ret = len;						\
+out:									\
+		mutex_unlock(&opts->lock);				\
+									\
+		return ret;						\
+	}								\
+									\
+	CONFIGFS_ATTR(_f_##_opts_, wceis)
+
 #endif /* __U_ETHER_CONFIGFS_H */
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index 4e2ad04..f829a5e 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -38,6 +38,9 @@
 	 */
 	struct mutex			lock;
 	int				refcnt;
+
+	/* "Wireless" RNDIS; auto-detected by Windows */
+	bool	wceis;
 };
 
 void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net);
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index 7d53a47..2f03334 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -64,7 +64,9 @@
 /* Frees a usb_request previously allocated by alloc_ep_req() */
 static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
 {
+	WARN_ON(req->buf == NULL);
 	kfree(req->buf);
+	req->buf = NULL;
 	usb_ep_free_request(ep, req);
 }
 
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 6959071..0c71938 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -139,10 +139,8 @@
 		goto out;
 
 	ret = ep->ops->disable(ep);
-	if (ret) {
-		ret = ret;
+	if (ret)
 		goto out;
-	}
 
 	ep->enabled = false;
 
@@ -250,6 +248,9 @@
  * arranges to poll once per interval, and the gadget driver usually will
  * have queued some data to transfer at that time.
  *
+ * Note that @req's ->complete() callback must never be called from
+ * within usb_ep_queue() as that can create deadlock situations.
+ *
  * Returns zero, or a negative error code.  Endpoints that are not enabled
  * report errors; errors will also be
  * reported when the usb peripheral is disconnected.
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index aac0ce8..8991a40 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1310,7 +1310,7 @@
 {
 	struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
 
-	if (ep->name)
+	if (ep->ep.name)
 		nuke(ep, -ESHUTDOWN);
 }
 
@@ -1698,7 +1698,7 @@
 		curr_ep = get_ep_by_pipe(udc, i);
 
 		/* If the ep is configured */
-		if (curr_ep->name == NULL) {
+		if (!curr_ep->ep.name) {
 			WARNING("Invalid EP?");
 			continue;
 		}
diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h
index 86d2ada..64eb0f2 100644
--- a/drivers/usb/gadget/udc/goku_udc.h
+++ b/drivers/usb/gadget/udc/goku_udc.h
@@ -28,7 +28,7 @@
 #	define INT_EP1DATASET		0x00040
 #	define INT_EP2DATASET		0x00080
 #	define INT_EP3DATASET		0x00100
-#define INT_EPnNAK(n)		(0x00100 < (n))		/* 0 < n < 4 */
+#define INT_EPnNAK(n)		(0x00100 << (n))	/* 0 < n < 4 */
 #	define INT_EP1NAK		0x00200
 #	define INT_EP2NAK		0x00400
 #	define INT_EP3NAK		0x00800
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 2c39571..216ddee 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -446,7 +446,8 @@
 	struct usb_hcd *hcd = ohci_to_hcd(ohci);
 
 	/* Accept arbitrarily long scatter-gather lists */
-	hcd->self.sg_tablesize = ~0;
+	if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+		hcd->self.sg_tablesize = ~0;
 
 	if (distrust_firmware)
 		ohci->flags |= OHCI_QUIRK_HUB_POWER;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 588e053..040feda 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -975,6 +975,8 @@
 	if (dev->out_ctx)
 		xhci_free_container_ctx(xhci, dev->out_ctx);
 
+	if (dev->udev && dev->udev->slot_id)
+		dev->udev->slot_id = 0;
 	kfree(xhci->devs[slot_id]);
 	xhci->devs[slot_id] = NULL;
 }
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index a1dedf0..75afa8f 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -439,6 +439,39 @@
 	return -EBUSY;
 }
 
+static int xhci_plat_pm_freeze(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (!xhci)
+		return 0;
+
+	dev_dbg(dev, "xhci-plat freeze\n");
+
+	return xhci_suspend(xhci, false);
+}
+
+static int xhci_plat_pm_restore(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	int ret;
+
+	if (!xhci)
+		return 0;
+
+	dev_dbg(dev, "xhci-plat restore\n");
+
+	ret = xhci_resume(xhci, true);
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+	pm_runtime_mark_last_busy(dev);
+
+	return ret;
+}
+
 static int xhci_plat_runtime_suspend(struct device *dev)
 {
 	struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -470,7 +503,9 @@
 }
 
 static const struct dev_pm_ops xhci_plat_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(NULL, NULL)
+	.freeze		= xhci_plat_pm_freeze,
+	.restore	= xhci_plat_pm_restore,
+	.thaw		= xhci_plat_pm_restore,
 	SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume,
 			   xhci_plat_runtime_idle)
 };
@@ -489,7 +524,6 @@
 static struct platform_driver usb_xhci_driver = {
 	.probe	= xhci_plat_probe,
 	.remove	= xhci_plat_remove,
-	.shutdown	= usb_hcd_platform_shutdown,
 	.driver	= {
 		.name = "xhci-hcd",
 		.pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index fe2bbfb..ccad0be 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -947,7 +947,7 @@
 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
 	u32			command;
 
-	if (!hcd->state)
+	if (!hcd->state || xhci->suspended)
 		return 0;
 
 	if (hcd->state != HC_STATE_SUSPENDED ||
@@ -1017,6 +1017,7 @@
 	/* step 5: remove core well power */
 	/* synchronize irq when using MSI-X */
 	xhci_msix_sync_irqs(xhci);
+	xhci->suspended = true;
 
 	return rc;
 }
@@ -1036,7 +1037,7 @@
 	int			retval = 0;
 	bool			comp_timer_running = false;
 
-	if (!hcd->state)
+	if (!hcd->state || !xhci->suspended)
 		return 0;
 
 	/* Wait a bit if either of the roothubs need to settle from the
@@ -1173,6 +1174,7 @@
 
 	/* Re-enable port polling. */
 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+	xhci->suspended = false;
 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
 	usb_hcd_poll_rh_status(xhci->shared_hcd);
 	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -3659,6 +3661,7 @@
 		del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
 	}
 
+	virt_dev->udev = NULL;
 	spin_lock_irqsave(&xhci->lock, flags);
 	/* Don't disable the slot if the host controller is dead. */
 	state = readl(&xhci->op_regs->status);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4c1f556..a5153ca 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1697,6 +1697,7 @@
 	/* Compliance Mode Recovery Data */
 	struct timer_list	comp_mode_recovery_timer;
 	u32			port_status_u0;
+	bool			suspended;
 /* Compliance Mode Timer Triggered every 2 seconds */
 #define COMP_MODE_RCVRY_MSECS 2000
 
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 2d9a806..579aa9a 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1774,6 +1774,7 @@
 	int		vbus;
 	u8		devctl;
 
+	pm_runtime_get_sync(dev);
 	spin_lock_irqsave(&musb->lock, flags);
 	val = musb->a_wait_bcon;
 	vbus = musb_platform_get_vbus_status(musb);
@@ -1787,6 +1788,7 @@
 			vbus = 0;
 	}
 	spin_unlock_irqrestore(&musb->lock, flags);
+	pm_runtime_put_sync(dev);
 
 	return sprintf(buf, "Vbus %s, timeout %lu msec\n",
 			vbus ? "on" : "off", val);
@@ -2483,10 +2485,11 @@
 	musb_generic_disable(musb);
 	spin_unlock_irqrestore(&musb->lock, flags);
 	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+	musb_platform_exit(musb);
+
 	pm_runtime_dont_use_autosuspend(musb->controller);
 	pm_runtime_put_sync(musb->controller);
 	pm_runtime_disable(musb->controller);
-	musb_platform_exit(musb);
 	musb_phy_callback = NULL;
 	if (musb->dma_controller)
 		musb_dma_controller_destroy(musb->dma_controller);
@@ -2710,7 +2713,8 @@
 	if ((devctl & mask) != (musb->context.devctl & mask))
 		musb->port1_status = 0;
 
-	musb_start(musb);
+	musb_enable_interrupts(musb);
+	musb_platform_enable(musb);
 
 	spin_lock_irqsave(&musb->lock, flags);
 	error = musb_run_resume_work(musb);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index a55173c..f1219f6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -442,7 +442,6 @@
 	req = next_request(musb_ep);
 	request = &req->request;
 
-	trace_musb_req_tx(req);
 	csr = musb_readw(epio, MUSB_TXCSR);
 	musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
 
@@ -481,6 +480,8 @@
 		u8	is_dma = 0;
 		bool	short_packet = false;
 
+		trace_musb_req_tx(req);
+
 		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
 			is_dma = 1;
 			csr |= MUSB_TXCSR_P_WZC_BITS;
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 844a309..e85b9c2 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -114,15 +114,19 @@
 		}
 
 		is_in = epnum & USB_DIR_IN;
-		if (is_in) {
-			epnum &= 0x0f;
-			ep = &musb->endpoints[epnum].ep_in;
-		} else {
-			ep = &musb->endpoints[epnum].ep_out;
+		epnum &= 0x0f;
+		if (epnum >= MUSB_C_NUM_EPS) {
+			handled = -EINVAL;
+			break;
 		}
+
+		if (is_in)
+			ep = &musb->endpoints[epnum].ep_in;
+		else
+			ep = &musb->endpoints[epnum].ep_out;
 		regs = musb->endpoints[epnum].regs;
 
-		if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
+		if (!ep->desc) {
 			handled = -EINVAL;
 			break;
 		}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 4303389..e2bc915 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1023,7 +1023,9 @@
 			/* set tx_reinit and schedule the next qh */
 			ep->tx_reinit = 1;
 		}
-		musb_start_urb(musb, is_in, next_qh);
+
+		if (next_qh)
+			musb_start_urb(musb, is_in, next_qh);
 	}
 }
 
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index d21823b..6685f05 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -753,6 +753,7 @@
 {
 	int i;
 	union power_supply_propval val;
+	bool pps_found = false;
 	u32 first_pdo = pd->received_pdos[0];
 
 	if (PD_SRC_PDO_TYPE(first_pdo) != PD_SRC_PDO_TYPE_FIXED) {
@@ -768,10 +769,8 @@
 	power_supply_set_property(pd->usb_psy,
 			POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
 
-	if (pd->spec_rev == USBPD_REV_30 && !rev3_sink_only) {
-		bool pps_found = false;
-
-		/* downgrade to 2.0 if no PPS */
+	/* Check for PPS APDOs */
+	if (pd->spec_rev == USBPD_REV_30) {
 		for (i = 1; i < PD_MAX_DATA_OBJ; i++) {
 			if ((PD_SRC_PDO_TYPE(pd->received_pdos[i]) ==
 					PD_SRC_PDO_TYPE_AUGMENTED) &&
@@ -780,10 +779,18 @@
 				break;
 			}
 		}
-		if (!pps_found)
+
+		/* downgrade to 2.0 if no PPS */
+		if (!pps_found && !rev3_sink_only)
 			pd->spec_rev = USBPD_REV_20;
 	}
 
+	val.intval = pps_found ?
+			POWER_SUPPLY_PD_PPS_ACTIVE :
+			POWER_SUPPLY_PD_ACTIVE;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
 	/* Select the first PDO (vSafe5V) immediately. */
 	pd_select_pdo(pd, 1, 0, 0);
 
@@ -2140,7 +2147,7 @@
 				usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
 				usbpd_set_state(pd, PE_SRC_DISABLED);
 
-				val.intval = 0;
+				val.intval = POWER_SUPPLY_PD_INACTIVE;
 				power_supply_set_property(pd->usb_psy,
 						POWER_SUPPLY_PROP_PD_ACTIVE,
 						&val);
@@ -2160,7 +2167,7 @@
 		pd->current_state = PE_SRC_SEND_CAPABILITIES_WAIT;
 		kick_sm(pd, SENDER_RESPONSE_TIME);
 
-		val.intval = 1;
+		val.intval = POWER_SUPPLY_PD_ACTIVE;
 		power_supply_set_property(pd->usb_psy,
 				POWER_SUPPLY_PROP_PD_ACTIVE, &val);
 		break;
@@ -2352,10 +2359,6 @@
 			pd->src_cap_id++;
 
 			usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
-
-			val.intval = 1;
-			power_supply_set_property(pd->usb_psy,
-					POWER_SUPPLY_PROP_PD_ACTIVE, &val);
 		} else if (pd->hard_reset_count < 3) {
 			usbpd_set_state(pd, PE_SNK_HARD_RESET);
 		} else {
@@ -2366,7 +2369,7 @@
 					POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
 					&val);
 
-			val.intval = 0;
+			val.intval = POWER_SUPPLY_PD_INACTIVE;
 			power_supply_set_property(pd->usb_psy,
 					POWER_SUPPLY_PROP_PD_ACTIVE, &val);
 		}
@@ -3577,7 +3580,7 @@
 {
 	struct usbpd *pd = dev_get_drvdata(dev);
 	int pos = PD_RDO_OBJ_POS(pd->rdo);
-	int type = PD_SRC_PDO_TYPE(pd->received_pdos[pos]);
+	int type = PD_SRC_PDO_TYPE(pd->received_pdos[pos - 1]);
 	int len;
 
 	len = scnprintf(buf, PAGE_SIZE, "Request Data Object\n"
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index e28173b..6dfca9c 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -71,7 +71,6 @@
 #define SQ_CTRL1_CHIRP_DISABLE		0x20
 #define SQ_CTRL2_CHIRP_DISABLE		0x80
 
-#define PORT_TUNE1_OVERRIDE_VAL		0xc5
 #define DEBUG_CTRL1_OVERRIDE_VAL	0x09
 
 /* PERIPH_SS_PHY_REFGEN_NORTH_BG_CTRL register bits */
@@ -149,10 +148,12 @@
 	/* override TUNEX registers value */
 	struct dentry		*root;
 	u8			tune[5];
+	u8                      bias_ctrl2;
 
 	struct hrtimer		timer;
 	int			soc_min_rev;
 	bool			host_chirp_erratum;
+	bool			override_bias_ctrl2;
 };
 
 #ifdef CONFIG_NVMEM
@@ -472,6 +473,7 @@
 static void qusb_phy_host_init(struct usb_phy *phy)
 {
 	u8 reg;
+	int p_index;
 	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
 
 	dev_dbg(phy->dev, "%s\n", __func__);
@@ -479,21 +481,51 @@
 	qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq,
 			qphy->host_init_seq_len, 0);
 
-	/* If soc revision is mentioned and host_chirp_erratum flag is set
-	 * then override TUNE1 and DEBUG_CTRL1
-	 */
-	if (qphy->soc_min_rev && qphy->host_chirp_erratum) {
-		writel_relaxed(PORT_TUNE1_OVERRIDE_VAL,
-				qphy->base + qphy->phy_reg[PORT_TUNE1]);
-		writel_relaxed(DEBUG_CTRL1_OVERRIDE_VAL,
-				qphy->base + qphy->phy_reg[DEBUG_CTRL1]);
+	if (qphy->efuse_reg) {
+		if (!qphy->tune_val)
+			qusb_phy_get_tune1_param(qphy);
+	} else {
+		/* For non fused chips we need to write the TUNE1 param as
+		 * specified in DT otherwise we will end up writing 0 to
+		 * to TUNE1
+		 */
+		qphy->tune_val = readb_relaxed(qphy->base +
+					qphy->phy_reg[PORT_TUNE1]);
 	}
 
-	if (qphy->refgen_north_bg_reg)
+	/* If soc revision is mentioned and host_chirp_erratum flag is set
+	 * then override TUNE1 and DEBUG_CTRL1 while honouring efuse values
+	 */
+	if (qphy->soc_min_rev && qphy->host_chirp_erratum) {
+		writel_relaxed(qphy->tune_val | BIT(7),
+			qphy->base + qphy->phy_reg[PORT_TUNE1]);
+		pr_debug("%s(): Programming TUNE1 parameter as:%x\n",
+			__func__, readb_relaxed(qphy->base +
+					qphy->phy_reg[PORT_TUNE1]));
+		writel_relaxed(DEBUG_CTRL1_OVERRIDE_VAL,
+			qphy->base + qphy->phy_reg[DEBUG_CTRL1]);
+	} else {
+		writel_relaxed(qphy->tune_val,
+			qphy->base + qphy->phy_reg[PORT_TUNE1]);
+	}
+
+	/* if debugfs based tunex params are set, use that value. */
+	for (p_index = 0; p_index < 5; p_index++) {
+		if (qphy->tune[p_index])
+			writel_relaxed(qphy->tune[p_index],
+				qphy->base + qphy->phy_reg[PORT_TUNE1] +
+							(4 * p_index));
+	}
+
+	if (qphy->refgen_north_bg_reg && qphy->override_bias_ctrl2)
 		if (readl_relaxed(qphy->refgen_north_bg_reg) & BANDGAP_BYPASS)
 			writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL,
 				qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
 
+	if (qphy->bias_ctrl2)
+		writel_relaxed(qphy->bias_ctrl2,
+				qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
+
 	/* Ensure above write is completed before turning ON ref clk */
 	wmb();
 
@@ -580,11 +612,15 @@
 							(4 * p_index));
 	}
 
-	if (qphy->refgen_north_bg_reg)
+	if (qphy->refgen_north_bg_reg && qphy->override_bias_ctrl2)
 		if (readl_relaxed(qphy->refgen_north_bg_reg) & BANDGAP_BYPASS)
 			writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL,
 				qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
 
+	if (qphy->bias_ctrl2)
+		writel_relaxed(qphy->bias_ctrl2,
+				qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
+
 	/* ensure above writes are completed before re-enabling PHY */
 	wmb();
 
@@ -946,11 +982,21 @@
 			dev_err(qphy->phy.dev,
 				"can't create debugfs entry for %s\n", name);
 			debugfs_remove_recursive(qphy->root);
-			ret = ENOMEM;
+			ret = -ENOMEM;
 			goto create_err;
 		}
 	}
 
+	file = debugfs_create_x8("bias_ctrl2", 0644, qphy->root,
+						&qphy->bias_ctrl2);
+	if (IS_ERR_OR_NULL(file)) {
+		dev_err(qphy->phy.dev,
+			"can't create debugfs entry for bias_ctrl2\n");
+		debugfs_remove_recursive(qphy->root);
+		ret = -ENOMEM;
+		goto create_err;
+	}
+
 create_err:
 	return ret;
 }
@@ -1190,6 +1236,9 @@
 	qphy->host_chirp_erratum = of_property_read_bool(dev->of_node,
 					"qcom,host-chirp-erratum");
 
+	qphy->override_bias_ctrl2 = of_property_read_bool(dev->of_node,
+					"qcom,override-bias-ctrl2");
+
 	ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
 					 (u32 *) qphy->vdd_levels,
 					 ARRAY_SIZE(qphy->vdd_levels));
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 9c33c6e..47db12b 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -706,7 +706,9 @@
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
 
 			/* Disable PHY */
-			writel_relaxed(POWER_DOWN,
+			writel_relaxed(POWER_DOWN |
+				readl_relaxed(qphy->base +
+					QUSB2PHY_PORT_POWERDOWN),
 				qphy->base + QUSB2PHY_PORT_POWERDOWN);
 			/* Make sure that above write is completed */
 			wmb();
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index ad3bbf7..1795d24 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -25,10 +25,10 @@
 #include <linux/clk.h>
 #include <linux/reset.h>
 
-enum core_ldo_levels {
-	CORE_LEVEL_NONE = 0,
-	CORE_LEVEL_MIN,
-	CORE_LEVEL_MAX,
+enum ldo_levels {
+	VOLTAGE_LEVEL_NONE = 0,
+	VOLTAGE_LEVEL_MIN,
+	VOLTAGE_LEVEL_MAX,
 };
 
 #define INIT_MAX_TIME_USEC			1000
@@ -38,6 +38,8 @@
 #define USB_SSPHY_1P2_VOL_MAX		1200000 /* uV */
 #define USB_SSPHY_HPM_LOAD		23000	/* uA */
 
+#define USB_SSPHY_LOAD_DEFAULT		-1
+
 /* USB3PHY_PCIE_USB3_PCS_PCS_STATUS bit */
 #define PHYSTATUS				BIT(6)
 
@@ -119,6 +121,9 @@
 	int			vdd_levels[3]; /* none, low, high */
 	struct regulator	*core_ldo;
 	int			core_voltage_levels[3];
+	struct regulator	*fpc_redrive_ldo;
+	int			redrive_voltage_levels[3];
+	int			redrive_load;
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
 	struct clk		*aux_clk;
@@ -226,6 +231,33 @@
 	}
 }
 
+static int msm_ldo_enable(struct msm_ssphy_qmp *phy,
+		struct regulator *ldo, int *voltage_levels, int load)
+{
+	int ret = 0;
+
+	dev_dbg(phy->phy.dev,
+		"ldo: min_vol:%duV max_vol:%duV\n",
+		voltage_levels[VOLTAGE_LEVEL_MIN],
+		voltage_levels[VOLTAGE_LEVEL_MAX]);
+
+	if (load > 0) {
+		ret = regulator_set_load(ldo, load);
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = regulator_set_voltage(ldo,
+			voltage_levels[VOLTAGE_LEVEL_MIN],
+			voltage_levels[VOLTAGE_LEVEL_MAX]);
+	if (ret)
+		return ret;
+
+	ret = regulator_enable(ldo);
+
+	return ret;
+}
+
 static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on)
 {
 	int min, rc = 0;
@@ -245,74 +277,65 @@
 	if (!on)
 		goto disable_regulators;
 
-	rc = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
-				    phy->vdd_levels[2]);
-	if (rc) {
-		dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
-		return rc;
+	if (phy->fpc_redrive_ldo) {
+		rc = msm_ldo_enable(phy, phy->fpc_redrive_ldo,
+				phy->redrive_voltage_levels,
+				phy->redrive_load);
+		if (rc < 0) {
+			dev_err(phy->phy.dev,
+				"enable phy->fpc_redrive_ldo failed\n");
+			return rc;
+		}
+
+		dev_dbg(phy->phy.dev,
+			"fpc redrive ldo: min_vol:%duV max_vol:%duV\n",
+			phy->redrive_voltage_levels[VOLTAGE_LEVEL_MIN],
+			phy->redrive_voltage_levels[VOLTAGE_LEVEL_MAX]);
 	}
 
-	dev_dbg(phy->phy.dev, "min_vol:%d max_vol:%d\n",
-		phy->vdd_levels[min], phy->vdd_levels[2]);
-
-	rc = regulator_enable(phy->vdd);
-	if (rc) {
-		dev_err(phy->phy.dev,
-			"regulator_enable(phy->vdd) failed, ret=%d",
-			rc);
-		goto unconfig_vdd;
-	}
-
-	rc = regulator_set_load(phy->core_ldo, USB_SSPHY_HPM_LOAD);
+	rc = msm_ldo_enable(phy, phy->vdd, phy->vdd_levels,
+			USB_SSPHY_LOAD_DEFAULT);
 	if (rc < 0) {
-		dev_err(phy->phy.dev, "Unable to set HPM of core_ldo\n");
+		dev_err(phy->phy.dev, "enable phy->vdd failed\n");
+		goto disable_fpc_redrive;
+	}
+
+	dev_dbg(phy->phy.dev,
+		"vdd ldo: min_vol:%duV max_vol:%duV\n",
+		phy->vdd_levels[VOLTAGE_LEVEL_MIN],
+		phy->vdd_levels[VOLTAGE_LEVEL_MAX]);
+
+	rc = msm_ldo_enable(phy, phy->core_ldo, phy->core_voltage_levels,
+			USB_SSPHY_HPM_LOAD);
+	if (rc < 0) {
+		dev_err(phy->phy.dev, "enable phy->core_ldo failed\n");
 		goto disable_vdd;
 	}
 
-	rc = regulator_set_voltage(phy->core_ldo,
-			phy->core_voltage_levels[CORE_LEVEL_MIN],
-			phy->core_voltage_levels[CORE_LEVEL_MAX]);
-	if (rc) {
-		dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
-		goto put_core_ldo_lpm;
-	}
-
-	rc = regulator_enable(phy->core_ldo);
-	if (rc) {
-		dev_err(phy->phy.dev, "Unable to enable core_ldo\n");
-		goto unset_core_ldo;
-	}
+	dev_dbg(phy->phy.dev,
+		"core ldo: min_vol:%duV max_vol:%duV\n",
+		phy->core_voltage_levels[VOLTAGE_LEVEL_MIN],
+		phy->core_voltage_levels[VOLTAGE_LEVEL_MAX]);
 
 	return 0;
 
 disable_regulators:
 	rc = regulator_disable(phy->core_ldo);
 	if (rc)
-		dev_err(phy->phy.dev, "Unable to disable core_ldo\n");
-
-unset_core_ldo:
-	rc = regulator_set_voltage(phy->core_ldo,
-			phy->core_voltage_levels[CORE_LEVEL_NONE],
-			phy->core_voltage_levels[CORE_LEVEL_MAX]);
-	if (rc)
-		dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
-
-put_core_ldo_lpm:
-	rc = regulator_set_load(phy->core_ldo, 0);
-	if (rc < 0)
-		dev_err(phy->phy.dev, "Unable to set LPM of core_ldo\n");
+		dev_err(phy->phy.dev, "disable phy->core_ldo failed\n");
 
 disable_vdd:
 	rc = regulator_disable(phy->vdd);
 	if (rc)
-		dev_err(phy->phy.dev, "regulator_disable(phy->vdd) failed, ret=%d",
-			rc);
+		dev_err(phy->phy.dev, "disable phy->vdd failed\n");
 
-unconfig_vdd:
-	rc = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
-				    phy->vdd_levels[2]);
-	if (rc)
-		dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
+disable_fpc_redrive:
+	if (phy->fpc_redrive_ldo) {
+		rc = regulator_disable(phy->fpc_redrive_ldo);
+		if (rc)
+			dev_err(phy->phy.dev,
+				"disable phy->fpc_redrive_ldo failed\n");
+	}
 
 	return rc < 0 ? rc : 0;
 }
@@ -634,6 +657,9 @@
 	if (suspend) {
 		if (phy->cable_connected)
 			msm_ssusb_qmp_enable_autonomous(phy, 1);
+		else
+			writel_relaxed(0x00,
+			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
 
 		/* Make sure above write completed with PHY */
 		wmb();
@@ -944,9 +970,9 @@
 	}
 
 	/* Set default core voltage values */
-	phy->core_voltage_levels[CORE_LEVEL_NONE] = 0;
-	phy->core_voltage_levels[CORE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN;
-	phy->core_voltage_levels[CORE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX;
+	phy->core_voltage_levels[VOLTAGE_LEVEL_NONE] = 0;
+	phy->core_voltage_levels[VOLTAGE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN;
+	phy->core_voltage_levels[VOLTAGE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX;
 
 	if (of_get_property(dev->of_node, "qcom,core-voltage-level", &len) &&
 		len == sizeof(phy->core_voltage_levels)) {
@@ -990,6 +1016,39 @@
 		goto err;
 	}
 
+	phy->fpc_redrive_ldo = devm_regulator_get_optional(dev, "fpc-redrive");
+	if (IS_ERR(phy->fpc_redrive_ldo)) {
+		phy->fpc_redrive_ldo = NULL;
+		dev_dbg(dev, "no FPC re-drive ldo regulator\n");
+	} else {
+		if (of_get_property(dev->of_node,
+				"qcom,redrive-voltage-level", &len) &&
+				len == sizeof(phy->redrive_voltage_levels)) {
+			ret = of_property_read_u32_array(dev->of_node,
+					"qcom,redrive-voltage-level",
+					(u32 *) phy->redrive_voltage_levels,
+					len / sizeof(u32));
+			if (ret) {
+				dev_err(dev,
+					"err qcom,redrive-voltage-level\n");
+				goto err;
+			}
+		} else {
+			ret = -EINVAL;
+			dev_err(dev, "err inputs for redrive-voltage-level\n");
+			goto err;
+		}
+
+		ret = of_property_read_u32(dev->of_node, "qcom,redrive-load",
+				&phy->redrive_load);
+		if (ret) {
+			dev_err(&pdev->dev, "unable to read redrive load\n");
+			goto err;
+		}
+
+		dev_dbg(dev, "Get FPC re-drive ldo regulator\n");
+	}
+
 	platform_set_drvdata(pdev, phy);
 
 	if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 39086bf..b8f4907 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -213,6 +213,7 @@
 	bool enable_streaming;
 	bool enable_axi_prefetch;
 	bool vbus_low_as_hostmode;
+	bool phy_id_high_as_peripheral;
 };
 
 #define SDP_CHECK_DELAY_MS 10000 /* in ms */
@@ -257,11 +258,26 @@
 MODULE_PARM_DESC(lpm_disconnect_thresh,
 	"Delay before entering LPM on USB disconnect");
 
+static bool floated_charger_enable;
+module_param(floated_charger_enable, bool, 0644);
+MODULE_PARM_DESC(floated_charger_enable,
+	"Whether to enable floated charger");
+
 /* by default debugging is enabled */
 static unsigned int enable_dbg_log = 1;
 module_param(enable_dbg_log, uint, 0644);
 MODULE_PARM_DESC(enable_dbg_log, "Debug buffer events");
 
+/* Max current to be drawn for DCP charger */
+static int dcp_max_current = IDEV_CHG_MAX;
+module_param(dcp_max_current, int, 0644);
+MODULE_PARM_DESC(dcp_max_current, "max current drawn for DCP charger");
+
+static bool chg_detection_for_float_charger;
+module_param(chg_detection_for_float_charger, bool, 0644);
+MODULE_PARM_DESC(chg_detection_for_float_charger,
+	"Whether to do PHY based charger detection for float chargers");
+
 static struct msm_otg *the_msm_otg;
 static bool debug_bus_voting_enabled;
 
@@ -1324,6 +1340,7 @@
 	struct msm_otg_platform_data *pdata = motg->pdata;
 	int cnt;
 	bool host_bus_suspend, device_bus_suspend, sm_work_busy;
+	bool host_pc_charger;
 	u32 cmd_val;
 	u32 portsc, config2;
 	u32 func_ctrl;
@@ -1351,6 +1368,9 @@
 	if (host_bus_suspend)
 		msm_otg_perf_vote_update(motg, false);
 
+	host_pc_charger = (motg->chg_type == USB_SDP_CHARGER) ||
+				(motg->chg_type == USB_CDP_CHARGER);
+
 	/* !BSV, but its handling is in progress by otg sm_work */
 	sm_work_busy = !test_bit(B_SESS_VLD, &motg->inputs) &&
 			phy->otg->state == OTG_STATE_B_PERIPHERAL;
@@ -1377,8 +1397,8 @@
 	 * Don't abort suspend in case of dcp detected by PMIC
 	 */
 
-	if ((test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend) ||
-							sm_work_busy) {
+	if ((test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend &&
+				host_pc_charger) || sm_work_busy) {
 		msm_otg_dbg_log_event(phy, "LPM ENTER ABORTED",
 						motg->inputs, 0);
 		enable_irq(motg->irq);
@@ -1815,8 +1835,11 @@
 	union power_supply_propval pval = {0};
 
 	if (!psy) {
-		dev_err(motg->phy.dev, "no usb power supply registered\n");
-		return -ENODEV;
+		psy = power_supply_get_by_name("usb");
+		if (!psy) {
+			dev_err(motg->phy.dev, "Could not get usb power_supply\n");
+			return -ENODEV;
+		}
 	}
 
 	power_supply_get_property(psy, POWER_SUPPLY_PROP_REAL_TYPE, &pval);
@@ -1824,7 +1847,44 @@
 	return pval.intval;
 }
 
-static void msm_otg_notify_chg_current(struct msm_otg *motg, unsigned int mA)
+static int msm_otg_notify_chg_type(struct msm_otg *motg)
+{
+	static int charger_type;
+	union power_supply_propval propval;
+	int ret = 0;
+	/*
+	 * TODO
+	 * Unify OTG driver charger types and power supply charger types
+	 */
+	if (charger_type == motg->chg_type)
+		return 0;
+
+	if (motg->chg_type == USB_SDP_CHARGER)
+		charger_type = POWER_SUPPLY_TYPE_USB;
+	else if (motg->chg_type == USB_CDP_CHARGER)
+		charger_type = POWER_SUPPLY_TYPE_USB_CDP;
+	else if (motg->chg_type == USB_DCP_CHARGER ||
+			motg->chg_type == USB_NONCOMPLIANT_CHARGER)
+		charger_type = POWER_SUPPLY_TYPE_USB_DCP;
+	else if (motg->chg_type == USB_FLOATED_CHARGER)
+		charger_type = POWER_SUPPLY_TYPE_USB_FLOAT;
+	else
+		charger_type = POWER_SUPPLY_TYPE_UNKNOWN;
+
+	pr_debug("Trying to set usb power supply type %d\n", charger_type);
+
+	propval.intval = charger_type;
+	ret = power_supply_set_property(psy, POWER_SUPPLY_PROP_REAL_TYPE,
+								&propval);
+	if (ret)
+		dev_dbg(motg->phy.dev, "power supply error when setting property\n");
+
+	msm_otg_dbg_log_event(&motg->phy, "SET USB PWR SUPPLY TYPE",
+			motg->chg_type, charger_type);
+	return ret;
+}
+
+static void msm_otg_notify_charger(struct msm_otg *motg, unsigned int mA)
 {
 	struct usb_gadget *g = motg->phy.otg->gadget;
 	union power_supply_propval pval = {0};
@@ -1833,6 +1893,16 @@
 	if (g && g->is_a_peripheral)
 		return;
 
+	dev_dbg(motg->phy.dev, "Requested curr from USB = %u\n", mA);
+
+	psy_type = get_psy_type(motg);
+	if (psy_type == -ENODEV)
+		return;
+
+	if (msm_otg_notify_chg_type(motg))
+		dev_dbg(motg->phy.dev, "Failed notifying %d charger type to PMIC\n",
+							motg->chg_type);
+
 	psy_type = get_psy_type(motg);
 	if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
 		if (!mA)
@@ -1842,9 +1912,7 @@
 		goto set_prop;
 	}
 
-	dev_dbg(motg->phy.dev, "Requested curr from USB = %u\n", mA);
-
-	if (motg->cur_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
+	if (motg->cur_power == mA)
 		return;
 
 	dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA);
@@ -1863,15 +1931,12 @@
 	motg->cur_power = mA;
 }
 
-static void msm_otg_notify_chg_current_work(struct work_struct *w)
+static void msm_otg_notify_charger_work(struct work_struct *w)
 {
 	struct msm_otg *motg = container_of(w,
-				struct msm_otg, notify_chg_current_work);
-	/*
-	 * Gadget driver uses set_power method to notify about the
-	 * available current based on suspend/configured states.
-	 */
-	msm_otg_notify_chg_current(motg, motg->notify_current_mA);
+				struct msm_otg, notify_charger_work);
+
+	msm_otg_notify_charger(motg, motg->notify_current_mA);
 }
 
 static int msm_otg_set_power(struct usb_phy *phy, unsigned int mA)
@@ -1879,7 +1944,14 @@
 	struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
 
 	motg->notify_current_mA = mA;
-	schedule_work(&motg->notify_chg_current_work);
+	/*
+	 * Gadget driver uses set_power method to notify about the
+	 * available current based on suspend/configured states.
+	 */
+	if (motg->chg_type == USB_SDP_CHARGER ||
+	    get_psy_type(motg) == POWER_SUPPLY_TYPE_USB ||
+	    get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT)
+		queue_work(motg->otg_wq, &motg->notify_charger_work);
 
 	return 0;
 }
@@ -2280,6 +2352,301 @@
 		return true;
 }
 
+static bool msm_chg_check_secondary_det(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+	u32 chg_det;
+
+	chg_det = ulpi_read(phy, 0x87);
+
+	return (chg_det & 1);
+}
+
+static void msm_chg_enable_secondary_det(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+
+	/*
+	 * Configure DM as current source, DP as current sink
+	 * and enable battery charging comparators.
+	 */
+	ulpi_write(phy, 0x8, 0x85);
+	ulpi_write(phy, 0x2, 0x85);
+	ulpi_write(phy, 0x1, 0x85);
+}
+
+static bool msm_chg_check_primary_det(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+	u32 chg_det;
+	bool ret = false;
+
+	chg_det = ulpi_read(phy, 0x87);
+	ret = chg_det & 1;
+	/* Turn off VDP_SRC */
+	ulpi_write(phy, 0x3, 0x86);
+	msleep(20);
+
+	return ret;
+}
+
+static void msm_chg_enable_primary_det(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+
+	/*
+	 * Configure DP as current source, DM as current sink
+	 * and enable battery charging comparators.
+	 */
+	ulpi_write(phy, 0x2, 0x85);
+	ulpi_write(phy, 0x1, 0x85);
+}
+
+static bool msm_chg_check_dcd(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+	u32 line_state;
+
+	line_state = ulpi_read(phy, 0x87);
+
+	return line_state & 2;
+}
+
+static void msm_chg_disable_dcd(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+
+	ulpi_write(phy, 0x10, 0x86);
+	/*
+	 * Disable the Rdm_down after
+	 * the DCD is completed.
+	 */
+	ulpi_write(phy, 0x04, 0x0C);
+}
+
+static void msm_chg_enable_dcd(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+
+	/*
+	 * Idp_src and Rdm_down are de-coupled
+	 * on Femto PHY. If Idp_src alone is
+	 * enabled, DCD timeout is observed with
+	 * wall charger. But a genuine DCD timeout
+	 * may be incorrectly interpreted. Also
+	 * BC1.2 compliance testers expect Rdm_down
+	 * to enabled during DCD. Enable Rdm_down
+	 * explicitly before enabling the DCD.
+	 */
+	ulpi_write(phy, 0x04, 0x0B);
+	ulpi_write(phy, 0x10, 0x85);
+}
+
+static void msm_chg_block_on(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+	u32 func_ctrl;
+
+	/* put the controller in non-driving mode */
+	func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+	func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+	func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+	ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+
+	/* disable DP and DM pull down resistors */
+	ulpi_write(phy, 0x6, 0xC);
+	/* Clear charger detecting control bits */
+	ulpi_write(phy, 0x1F, 0x86);
+	/* Clear alt interrupt latch and enable bits */
+	ulpi_write(phy, 0x1F, 0x92);
+	ulpi_write(phy, 0x1F, 0x95);
+	udelay(100);
+}
+
+static void msm_chg_block_off(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+	u32 func_ctrl;
+
+	/* Clear charger detecting control bits */
+	ulpi_write(phy, 0x3F, 0x86);
+	/* Clear alt interrupt latch and enable bits */
+	ulpi_write(phy, 0x1F, 0x92);
+	ulpi_write(phy, 0x1F, 0x95);
+	/* re-enable DP and DM pull down resistors */
+	ulpi_write(phy, 0x6, 0xB);
+
+	/* put the controller in normal mode */
+	func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+	func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+	func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+	ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+}
+
+#define MSM_CHG_DCD_TIMEOUT		(750 * HZ/1000) /* 750 msec */
+#define MSM_CHG_DCD_POLL_TIME		(50 * HZ/1000) /* 50 msec */
+#define MSM_CHG_PRIMARY_DET_TIME	(50 * HZ/1000) /* TVDPSRC_ON */
+#define MSM_CHG_SECONDARY_DET_TIME	(50 * HZ/1000) /* TVDMSRC_ON */
+
+static void msm_chg_detect_work(struct work_struct *w)
+{
+	struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
+	struct usb_phy *phy = &motg->phy;
+	bool is_dcd = false, tmout, vout, queue_sm_work = false;
+	static bool dcd;
+	u32 line_state, dm_vlgc;
+	unsigned long delay = 0;
+
+	dev_dbg(phy->dev, "chg detection work\n");
+	msm_otg_dbg_log_event(phy, "CHG DETECTION WORK",
+			motg->chg_state, get_pm_runtime_counter(phy->dev));
+
+	switch (motg->chg_state) {
+	case USB_CHG_STATE_UNDEFINED:
+		pm_runtime_get_sync(phy->dev);
+		msm_chg_block_on(motg);
+	case USB_CHG_STATE_IN_PROGRESS:
+		if (!motg->vbus_state) {
+			motg->chg_state = USB_CHG_STATE_UNDEFINED;
+			motg->chg_type = USB_INVALID_CHARGER;
+			msm_chg_block_off(motg);
+			pm_runtime_put_sync(phy->dev);
+			return;
+		}
+
+		msm_chg_enable_dcd(motg);
+		motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
+		motg->dcd_time = 0;
+		delay = MSM_CHG_DCD_POLL_TIME;
+		break;
+	case USB_CHG_STATE_WAIT_FOR_DCD:
+		if (!motg->vbus_state) {
+			motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+			break;
+		}
+
+		is_dcd = msm_chg_check_dcd(motg);
+		motg->dcd_time += MSM_CHG_DCD_POLL_TIME;
+		tmout = motg->dcd_time >= MSM_CHG_DCD_TIMEOUT;
+		if (is_dcd || tmout) {
+			if (is_dcd)
+				dcd = true;
+			else
+				dcd = false;
+			msm_chg_disable_dcd(motg);
+			msm_chg_enable_primary_det(motg);
+			delay = MSM_CHG_PRIMARY_DET_TIME;
+			motg->chg_state = USB_CHG_STATE_DCD_DONE;
+		} else {
+			delay = MSM_CHG_DCD_POLL_TIME;
+		}
+		break;
+	case USB_CHG_STATE_DCD_DONE:
+		if (!motg->vbus_state) {
+			motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+			break;
+		}
+
+		vout = msm_chg_check_primary_det(motg);
+		line_state = readl_relaxed(USB_PORTSC) & PORTSC_LS;
+		dm_vlgc = line_state & PORTSC_LS_DM;
+		if (vout && !dm_vlgc) { /* VDAT_REF < DM < VLGC */
+			if (line_state) { /* DP > VLGC */
+				motg->chg_type = USB_NONCOMPLIANT_CHARGER;
+				motg->chg_state = USB_CHG_STATE_DETECTED;
+			} else {
+				msm_chg_enable_secondary_det(motg);
+				delay = MSM_CHG_SECONDARY_DET_TIME;
+				motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
+			}
+		} else { /* DM < VDAT_REF || DM > VLGC */
+			if (line_state) /* DP > VLGC or/and DM > VLGC */
+				motg->chg_type = USB_NONCOMPLIANT_CHARGER;
+			else if (!dcd && floated_charger_enable)
+				motg->chg_type = USB_FLOATED_CHARGER;
+			else
+				motg->chg_type = USB_SDP_CHARGER;
+
+			motg->chg_state = USB_CHG_STATE_DETECTED;
+		}
+		break;
+	case USB_CHG_STATE_PRIMARY_DONE:
+		if (!motg->vbus_state) {
+			motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+			break;
+		}
+
+		vout = msm_chg_check_secondary_det(motg);
+		if (vout)
+			motg->chg_type = USB_DCP_CHARGER;
+		else
+			motg->chg_type = USB_CDP_CHARGER;
+		motg->chg_state = USB_CHG_STATE_SECONDARY_DONE;
+		/* fall through */
+	case USB_CHG_STATE_SECONDARY_DONE:
+		motg->chg_state = USB_CHG_STATE_DETECTED;
+	case USB_CHG_STATE_DETECTED:
+		if (!motg->vbus_state) {
+			motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+			break;
+		}
+
+		msm_chg_block_off(motg);
+
+		/* Enable VDP_SRC in case of DCP charger */
+		if (motg->chg_type == USB_DCP_CHARGER) {
+			ulpi_write(phy, 0x2, 0x85);
+			msm_otg_notify_charger(motg, dcp_max_current);
+		} else if (motg->chg_type == USB_NONCOMPLIANT_CHARGER)
+			msm_otg_notify_charger(motg, dcp_max_current);
+		else if (motg->chg_type == USB_FLOATED_CHARGER ||
+					motg->chg_type == USB_CDP_CHARGER)
+			msm_otg_notify_charger(motg, IDEV_CHG_MAX);
+
+		msm_otg_dbg_log_event(phy, "CHG WORK PUT: CHG_TYPE",
+			motg->chg_type, get_pm_runtime_counter(phy->dev));
+		/* to match _get at the start of chg_det_work */
+		pm_runtime_mark_last_busy(phy->dev);
+		pm_runtime_put_autosuspend(phy->dev);
+		motg->chg_state = USB_CHG_STATE_QUEUE_SM_WORK;
+		break;
+	case USB_CHG_STATE_QUEUE_SM_WORK:
+		if (!motg->vbus_state) {
+			pm_runtime_get_sync(phy->dev);
+			/* Turn off VDP_SRC if charger is DCP type */
+			if (motg->chg_type == USB_DCP_CHARGER)
+				ulpi_write(phy, 0x2, 0x86);
+
+			motg->chg_state = USB_CHG_STATE_UNDEFINED;
+			if (motg->chg_type == USB_SDP_CHARGER ||
+			    motg->chg_type == USB_CDP_CHARGER)
+				queue_sm_work = true;
+
+			motg->chg_type = USB_INVALID_CHARGER;
+			msm_otg_notify_charger(motg, 0);
+			motg->cur_power = 0;
+			msm_chg_block_off(motg);
+			pm_runtime_mark_last_busy(phy->dev);
+			pm_runtime_put_autosuspend(phy->dev);
+			if (queue_sm_work)
+				queue_work(motg->otg_wq, &motg->sm_work);
+			else
+				return;
+		}
+
+		if (motg->chg_type == USB_CDP_CHARGER ||
+		    motg->chg_type == USB_SDP_CHARGER)
+			queue_work(motg->otg_wq, &motg->sm_work);
+
+		return;
+	default:
+		return;
+	}
+
+	msm_otg_dbg_log_event(phy, "CHG WORK: QUEUE", motg->chg_type, delay);
+	queue_delayed_work(motg->otg_wq, &motg->chg_work, delay);
+}
+
 /*
  * We support OTG, Peripheral only and Host only configurations. In case
  * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
@@ -2325,10 +2692,17 @@
 				else
 					clear_bit(ID, &motg->inputs);
 			} else if (motg->phy_irq) {
-				if (msm_otg_read_phy_id_state(motg))
+				if (msm_otg_read_phy_id_state(motg)) {
 					set_bit(ID, &motg->inputs);
-				else
+					if (pdata->phy_id_high_as_peripheral)
+						set_bit(B_SESS_VLD,
+								&motg->inputs);
+				} else {
 					clear_bit(ID, &motg->inputs);
+					if (pdata->phy_id_high_as_peripheral)
+						clear_bit(B_SESS_VLD,
+								&motg->inputs);
+				}
 			}
 		}
 		break;
@@ -2374,32 +2748,31 @@
 static void msm_otg_sm_work(struct work_struct *w)
 {
 	struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
+	struct usb_phy *phy = &motg->phy;
 	struct usb_otg *otg = motg->phy.otg;
 	struct device *dev = otg->usb_phy->dev;
 	bool work = 0;
 	int ret;
 
 	pr_debug("%s work\n", usb_otg_state_string(otg->state));
-	msm_otg_dbg_log_event(&motg->phy, "SM WORK:",
-			otg->state, motg->inputs);
+	msm_otg_dbg_log_event(phy, "SM WORK:", otg->state, motg->inputs);
 
 	/* Just resume h/w if reqd, pm_count is handled based on state/inputs */
 	if (motg->resume_pending) {
-		pm_runtime_get_sync(otg->usb_phy->dev);
+		pm_runtime_get_sync(dev);
 		if (atomic_read(&motg->in_lpm)) {
 			dev_err(dev, "SM WORK: USB is in LPM\n");
-			msm_otg_dbg_log_event(&motg->phy,
-					"SM WORK: USB IS IN LPM",
+			msm_otg_dbg_log_event(phy, "SM WORK: USB IS IN LPM",
 					otg->state, motg->inputs);
 			msm_otg_resume(motg);
 		}
 		motg->resume_pending = false;
-		pm_runtime_put_noidle(otg->usb_phy->dev);
+		pm_runtime_put_noidle(dev);
 	}
 
 	switch (otg->state) {
 	case OTG_STATE_UNDEFINED:
-		pm_runtime_get_sync(otg->usb_phy->dev);
+		pm_runtime_get_sync(dev);
 		msm_otg_reset(otg->usb_phy);
 		/* Add child device only after block reset */
 		ret = of_platform_populate(motg->pdev->dev.of_node, NULL, NULL,
@@ -2411,21 +2784,20 @@
 		otg->state = OTG_STATE_B_IDLE;
 		if (!test_bit(B_SESS_VLD, &motg->inputs) &&
 				test_bit(ID, &motg->inputs)) {
-			msm_otg_dbg_log_event(&motg->phy,
-				"PM RUNTIME: UNDEF PUT",
-				get_pm_runtime_counter(otg->usb_phy->dev), 0);
-			pm_runtime_put_sync(otg->usb_phy->dev);
+			msm_otg_dbg_log_event(phy, "PM RUNTIME: UNDEF PUT",
+				get_pm_runtime_counter(dev), 0);
+			pm_runtime_put_sync(dev);
 			break;
 		}
-		pm_runtime_put(otg->usb_phy->dev);
+		pm_runtime_put(dev);
 		/* FALL THROUGH */
 	case OTG_STATE_B_IDLE:
 		if (!test_bit(ID, &motg->inputs) && otg->host) {
 			pr_debug("!id\n");
-			msm_otg_dbg_log_event(&motg->phy, "!ID",
+			msm_otg_dbg_log_event(phy, "!ID",
 					motg->inputs, otg->state);
 			if (!otg->host) {
-				msm_otg_dbg_log_event(&motg->phy,
+				msm_otg_dbg_log_event(phy,
 					"SM WORK: Host Not Set",
 					otg->state, motg->inputs);
 				break;
@@ -2435,10 +2807,10 @@
 			otg->state = OTG_STATE_A_HOST;
 		} else if (test_bit(B_SESS_VLD, &motg->inputs)) {
 			pr_debug("b_sess_vld\n");
-			msm_otg_dbg_log_event(&motg->phy, "B_SESS_VLD",
+			msm_otg_dbg_log_event(phy, "B_SESS_VLD",
 					motg->inputs, otg->state);
 			if (!otg->gadget) {
-				msm_otg_dbg_log_event(&motg->phy,
+				msm_otg_dbg_log_event(phy,
 					"SM WORK: Gadget Not Set",
 					otg->state, motg->inputs);
 				break;
@@ -2446,25 +2818,24 @@
 
 			if (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT)
 				queue_delayed_work(motg->otg_wq,
-					&motg->sdp_check,
-					msecs_to_jiffies(SDP_CHECK_DELAY_MS));
+				  &motg->sdp_check,
+				  msecs_to_jiffies(SDP_CHECK_DELAY_MS));
 
 			pm_runtime_get_sync(otg->usb_phy->dev);
 			msm_otg_start_peripheral(otg, 1);
 			otg->state = OTG_STATE_B_PERIPHERAL;
 		} else {
 			pr_debug("Cable disconnected\n");
-			msm_otg_dbg_log_event(&motg->phy, "RT: Cable DISC",
-				get_pm_runtime_counter(otg->usb_phy->dev), 0);
-
-			msm_otg_notify_chg_current(motg, 0);
+			msm_otg_dbg_log_event(phy, "RT: Cable DISC",
+				get_pm_runtime_counter(dev), 0);
+			msm_otg_notify_charger(motg, 0);
 		}
 		break;
 	case OTG_STATE_B_PERIPHERAL:
 		if (!test_bit(B_SESS_VLD, &motg->inputs)) {
 			cancel_delayed_work_sync(&motg->sdp_check);
 			msm_otg_start_peripheral(otg, 0);
-			msm_otg_dbg_log_event(&motg->phy, "RT PM: B_PERI A PUT",
+			msm_otg_dbg_log_event(phy, "RT PM: B_PERI A PUT",
 				get_pm_runtime_counter(dev), 0);
 			/* _put for _get done on cable connect in B_IDLE */
 			pm_runtime_mark_last_busy(dev);
@@ -2474,8 +2845,7 @@
 			work = 1;
 		} else if (test_bit(A_BUS_SUSPEND, &motg->inputs)) {
 			pr_debug("a_bus_suspend\n");
-			msm_otg_dbg_log_event(&motg->phy,
-				"BUS_SUSPEND: PM RT PUT",
+			msm_otg_dbg_log_event(phy, "BUS_SUSPEND: PM RT PUT",
 				get_pm_runtime_counter(dev), 0);
 			otg->state = OTG_STATE_B_SUSPEND;
 			/* _get on connect in B_IDLE or host resume in B_SUSP */
@@ -2493,8 +2863,7 @@
 		} else if (!test_bit(A_BUS_SUSPEND, &motg->inputs)) {
 			pr_debug("!a_bus_suspend\n");
 			otg->state = OTG_STATE_B_PERIPHERAL;
-			msm_otg_dbg_log_event(&motg->phy,
-				"BUS_RESUME: PM RT GET",
+			msm_otg_dbg_log_event(phy, "BUS_RESUME: PM RT GET",
 				get_pm_runtime_counter(dev), 0);
 			pm_runtime_get_sync(dev);
 		}
@@ -2611,7 +2980,26 @@
 		else
 			set_bit(ID, &motg->inputs);
 	}
-	msm_otg_kick_sm_work(motg);
+
+	/*
+	 * Enable PHY based charger detection in 2 cases:
+	 * 1. PMI not capable of doing charger detection and provides VBUS
+	 *    notification with UNKNOWN psy type.
+	 * 2. Data lines have been cut off from PMI, in which case it provides
+	 *    VBUS notification with FLOAT psy type and we want to do PHY based
+	 *    charger detection by setting 'chg_detection_for_float_charger'.
+	 */
+	if (test_bit(B_SESS_VLD, &motg->inputs) && !motg->chg_detection) {
+		if ((get_psy_type(motg) == POWER_SUPPLY_TYPE_UNKNOWN) ||
+		    (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT &&
+		     chg_detection_for_float_charger))
+			motg->chg_detection = true;
+	}
+
+	if (motg->chg_detection)
+		queue_delayed_work(motg->otg_wq, &motg->chg_work, 0);
+	else
+		msm_otg_kick_sm_work(motg);
 }
 
 static void msm_id_status_w(struct work_struct *w)
@@ -2637,6 +3025,8 @@
 			gpio_direction_input(motg->pdata->switch_sel_gpio);
 		if (!test_and_set_bit(ID, &motg->inputs)) {
 			pr_debug("ID set\n");
+			if (motg->pdata->phy_id_high_as_peripheral)
+				set_bit(B_SESS_VLD, &motg->inputs);
 			msm_otg_dbg_log_event(&motg->phy, "ID SET",
 					motg->inputs, motg->phy.otg->state);
 			work = 1;
@@ -2646,6 +3036,8 @@
 			gpio_direction_output(motg->pdata->switch_sel_gpio, 1);
 		if (test_and_clear_bit(ID, &motg->inputs)) {
 			pr_debug("ID clear\n");
+			if (motg->pdata->phy_id_high_as_peripheral)
+				clear_bit(B_SESS_VLD, &motg->inputs);
 			msm_otg_dbg_log_event(&motg->phy, "ID CLEAR",
 					motg->inputs, motg->phy.otg->state);
 			work = 1;
@@ -3424,6 +3816,10 @@
 
 	pdata->vbus_low_as_hostmode = of_property_read_bool(node,
 					"qcom,vbus-low-as-hostmode");
+
+	pdata->phy_id_high_as_peripheral = of_property_read_bool(node,
+					"qcom,phy-id-high-as-peripheral");
+
 	return pdata;
 }
 
@@ -3865,11 +4261,11 @@
 	motg->id_state = USB_ID_FLOAT;
 	set_bit(ID, &motg->inputs);
 	INIT_WORK(&motg->sm_work, msm_otg_sm_work);
+	INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
 	INIT_DELAYED_WORK(&motg->id_status_work, msm_id_status_w);
 	INIT_DELAYED_WORK(&motg->perf_vote_work, msm_otg_perf_vote_work);
 	INIT_DELAYED_WORK(&motg->sdp_check, check_for_sdp_connection);
-	INIT_WORK(&motg->notify_chg_current_work,
-			 msm_otg_notify_chg_current_work);
+	INIT_WORK(&motg->notify_charger_work, msm_otg_notify_charger_work);
 	motg->otg_wq = alloc_ordered_workqueue("k_otg", 0);
 	if (!motg->otg_wq) {
 		pr_err("%s: Unable to create workqueue otg_wq\n",
@@ -4079,12 +4475,8 @@
 	}
 
 	psy = power_supply_get_by_name("usb");
-	if (!psy) {
-		dev_dbg(&pdev->dev, "Could not get usb power_supply\n");
-		ret = -EPROBE_DEFER;
-		goto otg_remove_devices;
-	}
-
+	if (!psy)
+		dev_warn(&pdev->dev, "Could not get usb power_supply\n");
 
 	ret = msm_otg_extcon_register(motg);
 	if (ret)
@@ -4139,7 +4531,6 @@
 put_psy:
 	if (psy)
 		power_supply_put(psy);
-otg_remove_devices:
 	if (pdev->dev.of_node)
 		msm_otg_setup_devices(pdev, motg->pdata->mode, false);
 remove_cdev:
@@ -4220,12 +4611,13 @@
 	if (psy)
 		power_supply_put(psy);
 	msm_otg_debugfs_cleanup();
+	cancel_delayed_work_sync(&motg->chg_work);
 	cancel_delayed_work_sync(&motg->sdp_check);
 	cancel_delayed_work_sync(&motg->id_status_work);
 	cancel_delayed_work_sync(&motg->perf_vote_work);
 	msm_otg_perf_vote_update(motg, false);
 	cancel_work_sync(&motg->sm_work);
-	cancel_work_sync(&motg->notify_chg_current_work);
+	cancel_work_sync(&motg->notify_charger_work);
 	destroy_workqueue(motg->otg_wq);
 
 	pm_runtime_resume(&pdev->dev);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 584ae8c..77c3ebe 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,6 +62,7 @@
 		- Fundamental Software dongle.
 		- Google USB serial devices
 		- HP4x calculators
+		- Libtransistor USB console
 		- a number of Motorola phones
 		- Motorola Tetra devices
 		- Novatel Wireless GPS receivers
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 3178d8a..90d7c6e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -152,6 +152,7 @@
 	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+	{ USB_DEVICE(0x155A, 0x1006) },	/* ELDAT Easywave RX09 */
 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
 	{ USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
 	{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
@@ -210,6 +211,7 @@
 	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
 	{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
 	{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+	{ USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
 	{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
 	{ } /* Terminating Entry */
 };
@@ -743,7 +745,7 @@
 	unsigned int cflag;
 	struct cp210x_flow_ctl flow_ctl;
 	u32 baud;
-	u16 bits;
+	u16 bits = 0;
 	u32 ctl_hs;
 
 	cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0c743e4..2e2f7363 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -773,6 +773,7 @@
 		.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
 	{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
 	{ USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
+	{ USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
@@ -935,6 +936,7 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_FHE_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
 	{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
@@ -1909,7 +1911,8 @@
 		return ftdi_jtag_probe(serial);
 
 	if (udev->product &&
-		(!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+		(!strcmp(udev->product, "Arrow USB Blaster") ||
+		 !strcmp(udev->product, "BeagleBone/XDS100V2") ||
 		 !strcmp(udev->product, "SNAP Connect E10")))
 		return ftdi_jtag_probe(serial);
 
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 543d280..76a10b2 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -922,6 +922,9 @@
 /*
  * RT Systems programming cables for various ham radios
  */
+/* This device uses the VID of FTDI */
+#define RTSYSTEMS_USB_VX8_PID   0x9e50  /* USB-VX8 USB to 7 pin modular plug for Yaesu VX-8 radio */
+
 #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
 #define RTSYSTEMS_USB_S03_PID	0x9001	/* RTS-03 USB to Serial Adapter */
 #define RTSYSTEMS_USB_59_PID	0x9e50	/* USB-59 USB to 8 pin plug */
@@ -1441,6 +1444,12 @@
 #define FTDI_CINTERION_MC55I_PID	0xA951
 
 /*
+ * Product: FirmwareHubEmulator
+ * Manufacturer: Harman Becker Automotive Systems
+ */
+#define FTDI_FHE_PID		0xA9A0
+
+/*
  * Product: Comet Caller ID decoder
  * Manufacturer: Crucible Technologies
  */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1799aa0..d982c45 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -236,6 +236,8 @@
 /* These Quectel products use Qualcomm's vendor ID */
 #define QUECTEL_PRODUCT_UC20			0x9003
 #define QUECTEL_PRODUCT_UC15			0x9090
+/* These u-blox products use Qualcomm's vendor ID */
+#define UBLOX_PRODUCT_R410M			0x90b2
 /* These Yuga products use Qualcomm's vendor ID */
 #define YUGA_PRODUCT_CLM920_NC5			0x9625
 
@@ -244,6 +246,7 @@
 #define QUECTEL_PRODUCT_EC21			0x0121
 #define QUECTEL_PRODUCT_EC25			0x0125
 #define QUECTEL_PRODUCT_BG96			0x0296
+#define QUECTEL_PRODUCT_EP06			0x0306
 
 #define CMOTECH_VENDOR_ID			0x16d8
 #define CMOTECH_PRODUCT_6001			0x6001
@@ -550,147 +553,15 @@
 #define WETELECOM_PRODUCT_6802			0x6802
 #define WETELECOM_PRODUCT_WMD300		0x6803
 
-struct option_blacklist_info {
-	/* bitmask of interface numbers blacklisted for send_setup */
-	const unsigned long sendsetup;
-	/* bitmask of interface numbers that are reserved */
-	const unsigned long reserved;
-};
 
-static const struct option_blacklist_info four_g_w14_blacklist = {
-	.sendsetup = BIT(0) | BIT(1),
-};
+/* Device flags */
 
-static const struct option_blacklist_info four_g_w100_blacklist = {
-	.sendsetup = BIT(1) | BIT(2),
-	.reserved = BIT(3),
-};
+/* Interface does not support modem-control requests */
+#define NCTRL(ifnum)	((BIT(ifnum) & 0xff) << 8)
 
-static const struct option_blacklist_info alcatel_x200_blacklist = {
-	.sendsetup = BIT(0) | BIT(1),
-	.reserved = BIT(4),
-};
+/* Interface is reserved */
+#define RSVD(ifnum)	((BIT(ifnum) & 0xff) << 0)
 
-static const struct option_blacklist_info zte_0037_blacklist = {
-	.sendsetup = BIT(0) | BIT(1),
-};
-
-static const struct option_blacklist_info zte_k3765_z_blacklist = {
-	.sendsetup = BIT(0) | BIT(1) | BIT(2),
-	.reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
-	.sendsetup = BIT(0) | BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info zte_mc2718_z_blacklist = {
-	.sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
-	.sendsetup = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
-	.reserved = BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_me3620_xl_blacklist = {
-	.reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info zte_zm8620_x_blacklist = {
-	.reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info huawei_cdc12_blacklist = {
-	.reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info net_intf0_blacklist = {
-	.reserved = BIT(0),
-};
-
-static const struct option_blacklist_info net_intf1_blacklist = {
-	.reserved = BIT(1),
-};
-
-static const struct option_blacklist_info net_intf2_blacklist = {
-	.reserved = BIT(2),
-};
-
-static const struct option_blacklist_info net_intf3_blacklist = {
-	.reserved = BIT(3),
-};
-
-static const struct option_blacklist_info net_intf4_blacklist = {
-	.reserved = BIT(4),
-};
-
-static const struct option_blacklist_info net_intf5_blacklist = {
-	.reserved = BIT(5),
-};
-
-static const struct option_blacklist_info net_intf6_blacklist = {
-	.reserved = BIT(6),
-};
-
-static const struct option_blacklist_info zte_mf626_blacklist = {
-	.sendsetup = BIT(0) | BIT(1),
-	.reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_1255_blacklist = {
-	.reserved = BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info simcom_sim7100e_blacklist = {
-	.reserved = BIT(5) | BIT(6),
-};
-
-static const struct option_blacklist_info telit_me910_blacklist = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(3),
-};
-
-static const struct option_blacklist_info telit_le910_blacklist = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info telit_le920_blacklist = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(1) | BIT(5),
-};
-
-static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(1),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
-	.sendsetup = BIT(2),
-	.reserved = BIT(0) | BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
-	.reserved = BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
-	.reserved = BIT(1) | BIT(4),
-};
 
 static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -724,26 +595,26 @@
 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
 	{ USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+	  .driver_info = RSVD(1) | RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+	  .driver_info = RSVD(1) | RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff),	/* Huawei E1820 */
-		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+	  .driver_info = RSVD(1) | RSVD(2) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
@@ -1188,65 +1059,70 @@
 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
 	/* Quectel products using Qualcomm vendor ID */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	/* Yuga products use Qualcomm vendor ID */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
-	  .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
+	  .driver_info = RSVD(1) | RSVD(4) },
+	/* u-blox products using Qualcomm vendor ID */
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
+	  .driver_info = RSVD(1) | RSVD(3) },
 	/* Quectel products using Quectel vendor ID */
 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
+	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
+	  .driver_info = RSVD(4) | RSVD(5) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
@@ -1254,38 +1130,38 @@
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
-		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
-		.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
-		.driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
+	  .driver_info = NCTRL(0) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
-		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
-		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(5) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
-		.driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+	  .driver_info = NCTRL(0) | RSVD(1) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
-		.driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+	  .driver_info = NCTRL(0) | RSVD(1) },
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
@@ -1301,58 +1177,58 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff,
-	  0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff),
+	  .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&zte_0037_blacklist },
+	  .driver_info = NCTRL(0) | NCTRL(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
@@ -1377,26 +1253,26 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
@@ -1412,50 +1288,50 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1572,23 +1448,23 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
+	  .driver_info = RSVD(3) | RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
@@ -1603,7 +1479,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1639,17 +1515,17 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff),  /* ZTE MF91 */
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1667,8 +1543,8 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
-	  0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff),
+	  .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
 
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
@@ -1679,20 +1555,20 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
@@ -1844,19 +1720,19 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
-	 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+	 .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
-	 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+	 .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
-	 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+	 .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) },
 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
-	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+	 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
-	 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+	 .driver_info = RSVD(2) | RSVD(3) | RSVD(4) },
 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
-	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+	 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
-	 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+	 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1876,37 +1752,34 @@
 	{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
-	  .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+	  .driver_info = RSVD(5) | RSVD(6) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
-	  .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
-	},
+	  .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
-	  .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
-	  .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
-	  .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
 	{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
-  	  .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
-  	},
+	  .driver_info = NCTRL(0) | NCTRL(1) },
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
-	  .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
-	},
+	  .driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) },
 	{USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
-	 .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
+	 .driver_info = RSVD(3)},
 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
 	{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1932,14 +1805,14 @@
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
-		.driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
+	  .driver_info = RSVD(4) | RSVD(5) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
@@ -1949,20 +1822,20 @@
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
 	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -2039,9 +1912,9 @@
 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) },	/* TP-Link LTE Module */
 	{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000),					/* TP-Link MA260 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) },	/* D-Link DWM-156 (variant) */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) },	/* D-Link DWM-156 (variant) */
@@ -2052,9 +1925,9 @@
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) },			/* D-Link DWM-157 C1 */
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
@@ -2114,7 +1987,7 @@
 	struct usb_interface_descriptor *iface_desc =
 				&serial->interface->cur_altsetting->desc;
 	struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
-	const struct option_blacklist_info *blacklist;
+	unsigned long device_flags = id->driver_info;
 
 	/* Never bind to the CD-Rom emulation interface	*/
 	if (iface_desc->bInterfaceClass == 0x08)
@@ -2125,9 +1998,7 @@
 	 * the same class/subclass/protocol as the serial interfaces.  Look at
 	 * the Windows driver .INF files for reserved interface numbers.
 	 */
-	blacklist = (void *)id->driver_info;
-	if (blacklist && test_bit(iface_desc->bInterfaceNumber,
-						&blacklist->reserved))
+	if (device_flags & RSVD(iface_desc->bInterfaceNumber))
 		return -ENODEV;
 	/*
 	 * Don't bind network interface on Samsung GT-B3730, it is handled by
@@ -2138,8 +2009,8 @@
 	    iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
 		return -ENODEV;
 
-	/* Store the blacklist info so we can use it during attach. */
-	usb_set_serial_data(serial, (void *)blacklist);
+	/* Store the device flags so we can use them during attach. */
+	usb_set_serial_data(serial, (void *)device_flags);
 
 	return 0;
 }
@@ -2147,22 +2018,21 @@
 static int option_attach(struct usb_serial *serial)
 {
 	struct usb_interface_descriptor *iface_desc;
-	const struct option_blacklist_info *blacklist;
 	struct usb_wwan_intf_private *data;
+	unsigned long device_flags;
 
 	data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
-	/* Retrieve blacklist info stored at probe. */
-	blacklist = usb_get_serial_data(serial);
+	/* Retrieve device flags stored at probe. */
+	device_flags = (unsigned long)usb_get_serial_data(serial);
 
 	iface_desc = &serial->interface->cur_altsetting->desc;
 
-	if (!blacklist || !test_bit(iface_desc->bInterfaceNumber,
-						&blacklist->sendsetup)) {
+	if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
 		data->use_send_setup = 1;
-	}
+
 	spin_lock_init(&data->susp_lock);
 
 	usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 6aa7ff2..2674da4 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -66,6 +66,11 @@
 					0x01) }
 DEVICE(google, GOOGLE_IDS);
 
+/* Libtransistor USB console */
+#define LIBTRANSISTOR_IDS()			\
+	{ USB_DEVICE(0x1209, 0x8b00) }
+DEVICE(libtransistor, LIBTRANSISTOR_IDS);
+
 /* ViVOpay USB Serial Driver */
 #define VIVOPAY_IDS()			\
 	{ USB_DEVICE(0x1d5f, 0x1004) }	/* ViVOpay 8800 */
@@ -113,6 +118,7 @@
 	&funsoft_device,
 	&flashloader_device,
 	&google_device,
+	&libtransistor_device,
 	&vivopay_device,
 	&moto_modem_device,
 	&motorola_tetra_device,
@@ -129,6 +135,7 @@
 	FUNSOFT_IDS(),
 	FLASHLOADER_IDS(),
 	GOOGLE_IDS(),
+	LIBTRANSISTOR_IDS(),
 	VIVOPAY_IDS(),
 	MOTO_IDS(),
 	MOTOROLA_TETRA_IDS(),
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 337a0be..dbc3801 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -338,47 +338,48 @@
 		goto exit;
 	}
 
-	if (retval == sizeof(*connection_info)) {
-			connection_info = (struct visor_connection_info *)
-							transfer_buffer;
-
-		num_ports = le16_to_cpu(connection_info->num_ports);
-		for (i = 0; i < num_ports; ++i) {
-			switch (
-			   connection_info->connections[i].port_function_id) {
-			case VISOR_FUNCTION_GENERIC:
-				string = "Generic";
-				break;
-			case VISOR_FUNCTION_DEBUGGER:
-				string = "Debugger";
-				break;
-			case VISOR_FUNCTION_HOTSYNC:
-				string = "HotSync";
-				break;
-			case VISOR_FUNCTION_CONSOLE:
-				string = "Console";
-				break;
-			case VISOR_FUNCTION_REMOTE_FILE_SYS:
-				string = "Remote File System";
-				break;
-			default:
-				string = "unknown";
-				break;
-			}
-			dev_info(dev, "%s: port %d, is for %s use\n",
-				serial->type->description,
-				connection_info->connections[i].port, string);
-		}
+	if (retval != sizeof(*connection_info)) {
+		dev_err(dev, "Invalid connection information received from device\n");
+		retval = -ENODEV;
+		goto exit;
 	}
-	/*
-	* Handle devices that report invalid stuff here.
-	*/
+
+	connection_info = (struct visor_connection_info *)transfer_buffer;
+
+	num_ports = le16_to_cpu(connection_info->num_ports);
+
+	/* Handle devices that report invalid stuff here. */
 	if (num_ports == 0 || num_ports > 2) {
 		dev_warn(dev, "%s: No valid connect info available\n",
 			serial->type->description);
 		num_ports = 2;
 	}
 
+	for (i = 0; i < num_ports; ++i) {
+		switch (connection_info->connections[i].port_function_id) {
+		case VISOR_FUNCTION_GENERIC:
+			string = "Generic";
+			break;
+		case VISOR_FUNCTION_DEBUGGER:
+			string = "Debugger";
+			break;
+		case VISOR_FUNCTION_HOTSYNC:
+			string = "HotSync";
+			break;
+		case VISOR_FUNCTION_CONSOLE:
+			string = "Console";
+			break;
+		case VISOR_FUNCTION_REMOTE_FILE_SYS:
+			string = "Remote File System";
+			break;
+		default:
+			string = "unknown";
+			break;
+		}
+		dev_info(dev, "%s: port %d, is for %s use\n",
+			serial->type->description,
+			connection_info->connections[i].port, string);
+	}
 	dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
 		num_ports);
 
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 4340b49..4d6eb48 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1942,6 +1942,8 @@
 	bcb->CDB[0] = 0xEF;
 
 	result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
+	if (us->srb != NULL)
+		scsi_set_resid(us->srb, 0);
 	info->BIN_FLAG = flag;
 	kfree(buf);
 
@@ -2295,21 +2297,22 @@
 
 static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
 {
-	int result = 0;
+	int result = USB_STOR_XFER_GOOD;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
 
 	/*US_DEBUG(usb_stor_show_command(us, srb)); */
 	scsi_set_resid(srb, 0);
-	if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) {
+	if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready)))
 		result = ene_init(us);
-	} else {
+	if (result == USB_STOR_XFER_GOOD) {
+		result = USB_STOR_TRANSPORT_ERROR;
 		if (info->SD_Status.Ready)
 			result = sd_scsi_irp(us, srb);
 
 		if (info->MS_Status.Ready)
 			result = ms_scsi_irp(us, srb);
 	}
-	return 0;
+	return result;
 }
 
 static struct scsi_host_template ene_ub6250_host_template;
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 910f027..84c0599 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -87,6 +87,7 @@
 	struct stub_device *sdev;
 	struct usb_device *udev;
 	char shutdown_busid;
+	spinlock_t busid_lock;
 };
 
 /* stub_priv is allocated from stub_priv_cache */
@@ -97,6 +98,7 @@
 
 /* stub_main.c */
 struct bus_id_priv *get_busid_priv(const char *busid);
+void put_busid_priv(struct bus_id_priv *bid);
 int del_match_busid(char *busid);
 void stub_device_cleanup_urbs(struct stub_device *sdev);
 
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 3550224..8e629b6 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -314,9 +314,9 @@
 	struct stub_device *sdev = NULL;
 	const char *udev_busid = dev_name(&udev->dev);
 	struct bus_id_priv *busid_priv;
-	int rc;
+	int rc = 0;
 
-	dev_dbg(&udev->dev, "Enter\n");
+	dev_dbg(&udev->dev, "Enter probe\n");
 
 	/* check we should claim or not by busid_table */
 	busid_priv = get_busid_priv(udev_busid);
@@ -331,13 +331,15 @@
 		 * other matched drivers by the driver core.
 		 * See driver_probe_device() in driver/base/dd.c
 		 */
-		return -ENODEV;
+		rc = -ENODEV;
+		goto call_put_busid_priv;
 	}
 
 	if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
 		dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
 			 udev_busid);
-		return -ENODEV;
+		rc = -ENODEV;
+		goto call_put_busid_priv;
 	}
 
 	if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
@@ -345,13 +347,16 @@
 			"%s is attached on vhci_hcd... skip!\n",
 			udev_busid);
 
-		return -ENODEV;
+		rc = -ENODEV;
+		goto call_put_busid_priv;
 	}
 
 	/* ok, this is my device */
 	sdev = stub_device_alloc(udev);
-	if (!sdev)
-		return -ENOMEM;
+	if (!sdev) {
+		rc = -ENOMEM;
+		goto call_put_busid_priv;
+	}
 
 	dev_info(&udev->dev,
 		"usbip-host: register new device (bus %u dev %u)\n",
@@ -383,7 +388,9 @@
 	}
 	busid_priv->status = STUB_BUSID_ALLOC;
 
-	return 0;
+	rc = 0;
+	goto call_put_busid_priv;
+
 err_files:
 	usb_hub_release_port(udev->parent, udev->portnum,
 			     (struct usb_dev_state *) udev);
@@ -393,6 +400,9 @@
 
 	busid_priv->sdev = NULL;
 	stub_device_free(sdev);
+
+call_put_busid_priv:
+	put_busid_priv(busid_priv);
 	return rc;
 }
 
@@ -418,7 +428,7 @@
 	struct bus_id_priv *busid_priv;
 	int rc;
 
-	dev_dbg(&udev->dev, "Enter\n");
+	dev_dbg(&udev->dev, "Enter disconnect\n");
 
 	busid_priv = get_busid_priv(udev_busid);
 	if (!busid_priv) {
@@ -431,7 +441,7 @@
 	/* get stub_device */
 	if (!sdev) {
 		dev_err(&udev->dev, "could not get device");
-		return;
+		goto call_put_busid_priv;
 	}
 
 	dev_set_drvdata(&udev->dev, NULL);
@@ -446,12 +456,12 @@
 				  (struct usb_dev_state *) udev);
 	if (rc) {
 		dev_dbg(&udev->dev, "unable to release port\n");
-		return;
+		goto call_put_busid_priv;
 	}
 
 	/* If usb reset is called from event handler */
 	if (usbip_in_eh(current))
-		return;
+		goto call_put_busid_priv;
 
 	/* shutdown the current connection */
 	shutdown_busid(busid_priv);
@@ -462,12 +472,11 @@
 	busid_priv->sdev = NULL;
 	stub_device_free(sdev);
 
-	if (busid_priv->status == STUB_BUSID_ALLOC) {
+	if (busid_priv->status == STUB_BUSID_ALLOC)
 		busid_priv->status = STUB_BUSID_ADDED;
-	} else {
-		busid_priv->status = STUB_BUSID_OTHER;
-		del_match_busid((char *)udev_busid);
-	}
+
+call_put_busid_priv:
+	put_busid_priv(busid_priv);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 325b4c0..fa90496 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -28,6 +28,7 @@
 #define DRIVER_DESC "USB/IP Host Driver"
 
 struct kmem_cache *stub_priv_cache;
+
 /*
  * busid_tables defines matching busids that usbip can grab. A user can change
  * dynamically what device is locally used and what device is exported to a
@@ -39,6 +40,8 @@
 
 static void init_busid_table(void)
 {
+	int i;
+
 	/*
 	 * This also sets the bus_table[i].status to
 	 * STUB_BUSID_OTHER, which is 0.
@@ -46,6 +49,9 @@
 	memset(busid_table, 0, sizeof(busid_table));
 
 	spin_lock_init(&busid_table_lock);
+
+	for (i = 0; i < MAX_BUSID; i++)
+		spin_lock_init(&busid_table[i].busid_lock);
 }
 
 /*
@@ -57,15 +63,20 @@
 	int i;
 	int idx = -1;
 
-	for (i = 0; i < MAX_BUSID; i++)
+	for (i = 0; i < MAX_BUSID; i++) {
+		spin_lock(&busid_table[i].busid_lock);
 		if (busid_table[i].name[0])
 			if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
 				idx = i;
+				spin_unlock(&busid_table[i].busid_lock);
 				break;
 			}
+		spin_unlock(&busid_table[i].busid_lock);
+	}
 	return idx;
 }
 
+/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
 struct bus_id_priv *get_busid_priv(const char *busid)
 {
 	int idx;
@@ -73,13 +84,22 @@
 
 	spin_lock(&busid_table_lock);
 	idx = get_busid_idx(busid);
-	if (idx >= 0)
+	if (idx >= 0) {
 		bid = &(busid_table[idx]);
+		/* get busid_lock before returning */
+		spin_lock(&bid->busid_lock);
+	}
 	spin_unlock(&busid_table_lock);
 
 	return bid;
 }
 
+void put_busid_priv(struct bus_id_priv *bid)
+{
+	if (bid)
+		spin_unlock(&bid->busid_lock);
+}
+
 static int add_match_busid(char *busid)
 {
 	int i;
@@ -92,15 +112,19 @@
 		goto out;
 	}
 
-	for (i = 0; i < MAX_BUSID; i++)
+	for (i = 0; i < MAX_BUSID; i++) {
+		spin_lock(&busid_table[i].busid_lock);
 		if (!busid_table[i].name[0]) {
 			strlcpy(busid_table[i].name, busid, BUSID_SIZE);
 			if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
 			    (busid_table[i].status != STUB_BUSID_REMOV))
 				busid_table[i].status = STUB_BUSID_ADDED;
 			ret = 0;
+			spin_unlock(&busid_table[i].busid_lock);
 			break;
 		}
+		spin_unlock(&busid_table[i].busid_lock);
+	}
 
 out:
 	spin_unlock(&busid_table_lock);
@@ -121,6 +145,8 @@
 	/* found */
 	ret = 0;
 
+	spin_lock(&busid_table[idx].busid_lock);
+
 	if (busid_table[idx].status == STUB_BUSID_OTHER)
 		memset(busid_table[idx].name, 0, BUSID_SIZE);
 
@@ -128,6 +154,7 @@
 	    (busid_table[idx].status != STUB_BUSID_ADDED))
 		busid_table[idx].status = STUB_BUSID_REMOV;
 
+	spin_unlock(&busid_table[idx].busid_lock);
 out:
 	spin_unlock(&busid_table_lock);
 
@@ -140,9 +167,12 @@
 	char *out = buf;
 
 	spin_lock(&busid_table_lock);
-	for (i = 0; i < MAX_BUSID; i++)
+	for (i = 0; i < MAX_BUSID; i++) {
+		spin_lock(&busid_table[i].busid_lock);
 		if (busid_table[i].name[0])
 			out += sprintf(out, "%s ", busid_table[i].name);
+		spin_unlock(&busid_table[i].busid_lock);
+	}
 	spin_unlock(&busid_table_lock);
 	out += sprintf(out, "\n");
 
@@ -184,6 +214,51 @@
 static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
 		   store_match_busid);
 
+static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
+{
+	int ret;
+
+	/* device_attach() callers should hold parent lock for USB */
+	if (busid_priv->udev->dev.parent)
+		device_lock(busid_priv->udev->dev.parent);
+	ret = device_attach(&busid_priv->udev->dev);
+	if (busid_priv->udev->dev.parent)
+		device_unlock(busid_priv->udev->dev.parent);
+	if (ret < 0) {
+		dev_err(&busid_priv->udev->dev, "rebind failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+static void stub_device_rebind(void)
+{
+#if IS_MODULE(CONFIG_USBIP_HOST)
+	struct bus_id_priv *busid_priv;
+	int i;
+
+	/* update status to STUB_BUSID_OTHER so probe ignores the device */
+	spin_lock(&busid_table_lock);
+	for (i = 0; i < MAX_BUSID; i++) {
+		if (busid_table[i].name[0] &&
+		    busid_table[i].shutdown_busid) {
+			busid_priv = &(busid_table[i]);
+			busid_priv->status = STUB_BUSID_OTHER;
+		}
+	}
+	spin_unlock(&busid_table_lock);
+
+	/* now run rebind - no need to hold locks. driver files are removed */
+	for (i = 0; i < MAX_BUSID; i++) {
+		if (busid_table[i].name[0] &&
+		    busid_table[i].shutdown_busid) {
+			busid_priv = &(busid_table[i]);
+			do_rebind(busid_table[i].name, busid_priv);
+		}
+	}
+#endif
+}
+
 static ssize_t rebind_store(struct device_driver *dev, const char *buf,
 				 size_t count)
 {
@@ -201,11 +276,17 @@
 	if (!bid)
 		return -ENODEV;
 
-	ret = device_attach(&bid->udev->dev);
-	if (ret < 0) {
-		dev_err(&bid->udev->dev, "rebind failed\n");
+	/* mark the device for deletion so probe ignores it during rescan */
+	bid->status = STUB_BUSID_OTHER;
+	/* release the busid lock */
+	put_busid_priv(bid);
+
+	ret = do_rebind((char *) buf, bid);
+	if (ret < 0)
 		return ret;
-	}
+
+	/* delete device from busid_table */
+	del_match_busid((char *) buf);
 
 	return count;
 }
@@ -328,6 +409,9 @@
 	 */
 	usb_deregister_device_driver(&stub_driver);
 
+	/* initiate scan to attach devices */
+	stub_device_rebind();
+
 	kmem_cache_destroy(stub_priv_cache);
 }
 
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index f0b955f..109e65b 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -258,7 +258,7 @@
 #define	VUDC_EVENT_ERROR_USB	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
 #define	VUDC_EVENT_ERROR_MALLOC	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
 
-#define	VDEV_EVENT_REMOVED	(USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
+#define	VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
 #define	VDEV_EVENT_DOWN		(USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
 #define	VDEV_EVENT_ERROR_TCP	(USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
 #define	VDEV_EVENT_ERROR_MALLOC	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index f163566..f8f7f38 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -105,10 +105,6 @@
 			unset_event(ud, USBIP_EH_UNUSABLE);
 		}
 
-		/* Stop the error handler. */
-		if (ud->event & USBIP_EH_BYE)
-			usbip_dbg_eh("removed %p\n", ud);
-
 		wake_up(&ud->eh_waitq);
 	}
 }
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 9f1ec43..7b8a957 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -810,6 +810,7 @@
 {
 	__le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
 				  offset + PCI_EXP_DEVCTL);
+	int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
 
 	count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
 	if (count < 0)
@@ -835,6 +836,27 @@
 			pci_try_reset_function(vdev->pdev);
 	}
 
+	/*
+	 * MPS is virtualized to the user, writes do not change the physical
+	 * register since determining a proper MPS value requires a system wide
+	 * device view.  The MRRS is largely independent of MPS, but since the
+	 * user does not have that system-wide view, they might set a safe, but
+	 * inefficiently low value.  Here we allow writes through to hardware,
+	 * but we set the floor to the physical device MPS setting, so that
+	 * we can at least use full TLPs, as defined by the MPS value.
+	 *
+	 * NB, if any devices actually depend on an artificially low MRRS
+	 * setting, this will need to be revisited, perhaps with a quirk
+	 * though pcie_set_readrq().
+	 */
+	if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
+		readrq = 128 <<
+			((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
+		readrq = max(readrq, pcie_get_mps(vdev->pdev));
+
+		pcie_set_readrq(vdev->pdev, readrq);
+	}
+
 	return count;
 }
 
@@ -853,11 +875,12 @@
 	 * Allow writes to device control fields, except devctl_phantom,
 	 * which could confuse IOMMU, MPS, which can break communication
 	 * with other physical devices, and the ARI bit in devctl2, which
-	 * is set at probe time.  FLR gets virtualized via our writefn.
+	 * is set at probe time.  FLR and MRRS get virtualized via our
+	 * writefn.
 	 */
 	p_setw(perm, PCI_EXP_DEVCTL,
-	       PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD,
-	       ~PCI_EXP_DEVCTL_PHANTOM);
+	       PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
+	       PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
 	p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
 	return 0;
 }
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e5b7652..487586e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -524,7 +524,7 @@
 
 	if (!len && vq->busyloop_timeout) {
 		/* Both tx vq and rx socket were polled here */
-		mutex_lock(&vq->mutex);
+		mutex_lock_nested(&vq->mutex, 1);
 		vhost_disable_notify(&net->dev, vq);
 
 		preempt_disable();
@@ -657,7 +657,7 @@
 	struct iov_iter fixup;
 	__virtio16 num_buffers;
 
-	mutex_lock(&vq->mutex);
+	mutex_lock_nested(&vq->mutex, 0);
 	sock = vq->private_data;
 	if (!sock)
 		goto out;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index cd38f5a..fce49eb 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -211,8 +211,7 @@
 	if (mask)
 		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
 	if (mask & POLLERR) {
-		if (poll->wqh)
-			remove_wait_queue(poll->wqh, &poll->wait);
+		vhost_poll_stop(poll);
 		ret = -EINVAL;
 	}
 
@@ -1176,14 +1175,14 @@
 /* Caller should have vq mutex and device mutex */
 int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 {
-	if (vq->iotlb) {
-		/* When device IOTLB was used, the access validation
-		 * will be validated during prefetching.
-		 */
+	if (!vq_log_access_ok(vq, vq->log_base))
+		return 0;
+
+	/* Access validation occurs at prefetch time with IOTLB */
+	if (vq->iotlb)
 		return 1;
-	}
-	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
-		vq_log_access_ok(vq, vq->log_base);
+
+	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
 }
 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
 
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 16c8fcf..8c835f1 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -134,7 +134,7 @@
 {
 	int rc;
 	struct backlight_device *bd = to_backlight_device(dev);
-	unsigned long power;
+	unsigned long power, old_power;
 
 	rc = kstrtoul(buf, 0, &power);
 	if (rc)
@@ -145,10 +145,16 @@
 	if (bd->ops) {
 		pr_debug("set power to %lu\n", power);
 		if (bd->props.power != power) {
+			old_power = bd->props.power;
 			bd->props.power = power;
-			backlight_update_status(bd);
+			rc = backlight_update_status(bd);
+			if (rc)
+				bd->props.power = old_power;
+			else
+				rc = count;
+		} else {
+			rc = count;
 		}
-		rc = count;
 	}
 	mutex_unlock(&bd->ops_lock);
 
@@ -176,8 +182,7 @@
 		else {
 			pr_debug("set brightness to %lu\n", brightness);
 			bd->props.brightness = brightness;
-			backlight_update_status(bd);
-			rc = 0;
+			rc = backlight_update_status(bd);
 		}
 	}
 	mutex_unlock(&bd->ops_lock);
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index d7c239e..f557406 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -177,7 +177,7 @@
 	struct spi_message msg;
 	struct spi_transfer xfer = {
 		.len		= 1,
-		.cs_change	= 1,
+		.cs_change	= 0,
 		.tx_buf		= lcd->buf,
 	};
 
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index eab1f84..e4bd63e 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -369,7 +369,7 @@
 
 	spi_message_init(m);
 
-	x->cs_change = 1;
+	x->cs_change = 0;
 	x->tx_buf = &lcd->buf[0];
 	spi_message_add_tail(x, m);
 
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 6a41ea9..4dc5ee8 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -49,7 +49,7 @@
 	struct spi_message msg;
 	struct spi_transfer xfer = {
 		.len		= 1,
-		.cs_change	= 1,
+		.cs_change	= 0,
 		.tx_buf		= buf,
 	};
 
diff --git a/drivers/video/fbdev/msm/Kconfig b/drivers/video/fbdev/msm/Kconfig
index e8f902b..a5e0662 100644
--- a/drivers/video/fbdev/msm/Kconfig
+++ b/drivers/video/fbdev/msm/Kconfig
@@ -82,6 +82,16 @@
 	MHL (Mobile High-Definition Link) technology
 	uses USB connector to output HDMI content
 
+config FB_MSM_MDSS_SPI_PANEL
+        depends on FB_MSM_MDSS
+        bool "Support SPI panel feature"
+        default n
+        ---help---
+        The MDSS SPI Panel provides support for transmittimg SPI signals of
+        MDSS frame buffer data to connected panel. Limited by SPI rate, the
+        current max fps only reach to 27 fps, and limited by MDP hardware
+        architecture only supply on MDP3
+
 config FB_MSM_MDSS_MHL3
 	depends on FB_MSM_MDSS_HDMI_PANEL
 	bool "MHL3 SII8620 Support"
diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile
index 81d4953..26a940c 100644
--- a/drivers/video/fbdev/msm/Makefile
+++ b/drivers/video/fbdev/msm/Makefile
@@ -46,6 +46,11 @@
 obj-$(CONFIG_FB_MSM_MDSS) += mdss-dsi.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_panel.o
 
+ifeq ($(CONFIG_SPI_QUP), y)
+obj-$(CONFIG_FB_MSM_MDSS_SPI_PANEL) += mdss_spi_client.o
+obj-$(CONFIG_FB_MSM_MDSS_SPI_PANEL) += mdss_spi_panel.o
+endif
+
 ifneq ($(CONFIG_FB_MSM_MDSS_MDP3), y)
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_util.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_edid.o
diff --git a/drivers/video/fbdev/msm/dsi_host_v2.c b/drivers/video/fbdev/msm/dsi_host_v2.c
index 33775ec..2cca469 100644
--- a/drivers/video/fbdev/msm/dsi_host_v2.c
+++ b/drivers/video/fbdev/msm/dsi_host_v2.c
@@ -753,8 +753,8 @@
 			}
 
 			if (dchdr->wait)
-				usleep_range(dchdr->wait * 1000,
-					     dchdr->wait * 1000);
+				usleep_range((dchdr->wait * 1000),
+					     (dchdr->wait * 1000) + 10);
 
 			mdss_dsi_buf_init(tp);
 			len = 0;
diff --git a/drivers/video/fbdev/msm/dsi_status_6g.c b/drivers/video/fbdev/msm/dsi_status_6g.c
index 88bf0aa..8a32902 100644
--- a/drivers/video/fbdev/msm/dsi_status_6g.c
+++ b/drivers/video/fbdev/msm/dsi_status_6g.c
@@ -18,6 +18,7 @@
 
 #include "mdss_dsi.h"
 #include "mdss_mdp.h"
+#include "mdss_debug.h"
 
 /*
  * mdss_check_te_status() - Check the status of panel for TE based ESD.
@@ -155,6 +156,7 @@
 		ctl->ops.wait_pingpong(ctl, NULL);
 
 	pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__);
+	MDSS_XLOG(mipi->mode);
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
 	ret = ctrl_pdata->check_status(ctrl_pdata);
diff --git a/drivers/video/fbdev/msm/dsi_status_v2.c b/drivers/video/fbdev/msm/dsi_status_v2.c
index 35b0984..87720cf 100644
--- a/drivers/video/fbdev/msm/dsi_status_v2.c
+++ b/drivers/video/fbdev/msm/dsi_status_v2.c
@@ -18,6 +18,7 @@
 
 #include "mdss_dsi.h"
 #include "mdp3_ctrl.h"
+#include "mdss_spi_panel.h"
 
 /*
  * mdp3_check_te_status() - Check the status of panel for TE based ESD.
@@ -165,3 +166,79 @@
 	mdss_fb_report_panel_dead(pdsi_status->mfd);
 }
 
+#if defined(CONFIG_FB_MSM_MDSS_SPI_PANEL)
+void mdp3_check_spi_panel_status(struct work_struct *work, uint32_t interval)
+{
+	struct dsi_status_data *pdsi_status = NULL;
+	struct mdss_panel_data *pdata = NULL;
+	struct spi_panel_data *ctrl_pdata = NULL;
+	struct mdp3_session_data *mdp3_session = NULL;
+	int ret = 0;
+
+	pdsi_status = container_of(to_delayed_work(work),
+			struct dsi_status_data, check_status);
+
+	if (!pdsi_status || !(pdsi_status->mfd)) {
+		pr_err("%s: mfd not available\n", __func__);
+		return;
+	}
+
+	pdata = dev_get_platdata(&pdsi_status->mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("%s: panel data not available\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data, panel_data);
+	if (!ctrl_pdata || !ctrl_pdata->check_status) {
+		pr_err("%s: Dsi ctrl or status_check callback not available\n",
+				__func__);
+		return;
+	}
+
+	mdp3_session = pdsi_status->mfd->mdp.private1;
+	if (!mdp3_session) {
+		pr_err("%s: Display is off\n", __func__);
+		return;
+	}
+
+	if (mdp3_session->in_splash_screen) {
+		schedule_delayed_work(&pdsi_status->check_status,
+			msecs_to_jiffies(interval));
+		pr_debug("%s: cont splash is on\n", __func__);
+		return;
+	}
+
+	mutex_lock(&mdp3_session->lock);
+	if (!mdp3_session->status) {
+		pr_debug("%s: display off already\n", __func__);
+		mutex_unlock(&mdp3_session->lock);
+		return;
+	}
+
+	if (!ret)
+		ret = ctrl_pdata->check_status(ctrl_pdata);
+	else
+		pr_err("%s:wait_for_dma_done error\n", __func__);
+	mutex_unlock(&mdp3_session->lock);
+
+	if (mdss_fb_is_power_on_interactive(pdsi_status->mfd)) {
+		if (ret > 0) {
+			schedule_delayed_work(&pdsi_status->check_status,
+				msecs_to_jiffies(interval));
+		} else {
+			char *envp[2] = {"PANEL_ALIVE=0", NULL};
+
+			pdata->panel_info.panel_dead = true;
+			ret = kobject_uevent_env(
+			&pdsi_status->mfd->fbi->dev->kobj, KOBJ_CHANGE, envp);
+			pr_err("%s:panel has gone bad, sending uevent - %s\n",
+			 __func__, envp[0]);
+		}
+	}
+}
+#else
+void mdp3_check_spi_panel_status(struct work_struct *work, uint32_t interval)
+{
+}
+#endif
diff --git a/drivers/video/fbdev/msm/mdp3.c b/drivers/video/fbdev/msm/mdp3.c
index 591c240..5e9e49c 100644
--- a/drivers/video/fbdev/msm/mdp3.c
+++ b/drivers/video/fbdev/msm/mdp3.c
@@ -56,6 +56,7 @@
 #include "mdss_debug.h"
 #include "mdss_smmu.h"
 #include "mdss.h"
+#include "mdss_spi_panel.h"
 
 #ifndef EXPORT_COMPAT
 #define EXPORT_COMPAT(x)
@@ -109,6 +110,7 @@
 
 static struct mdss_panel_intf pan_types[] = {
 	{"dsi", MDSS_PANEL_INTF_DSI},
+	{"spi", MDSS_PANEL_INTF_SPI},
 };
 static char mdss_mdp3_panel[MDSS_MAX_PANEL_LEN];
 
@@ -127,6 +129,13 @@
 	},
 };
 
+#ifndef CONFIG_FB_MSM_MDSS_SPI_PANEL
+void mdss_spi_panel_bl_ctrl_update(struct mdss_panel_data *pdata, u32 bl_level)
+{
+
+}
+#endif
+
 static irqreturn_t mdp3_irq_handler(int irq, void *ptr)
 {
 	int i = 0;
@@ -1350,7 +1359,11 @@
 						client == MDP3_CLIENT_DMA_P)
 				mdss_smmu_unmap_dma_buf(data->tab_clone,
 					dom, dir, data->srcp_dma_buf);
-			else
+			else if (client == MDP3_CLIENT_SPI) {
+				ion_unmap_kernel(iclient, data->srcp_ihdl);
+				ion_free(iclient, data->srcp_ihdl);
+				data->srcp_ihdl = NULL;
+			} else
 				mdss_smmu_unmap_dma_buf(data->srcp_table,
 					dom, dir, data->srcp_dma_buf);
 			data->mapped = false;
@@ -1416,7 +1429,15 @@
 				data->srcp_dma_buf = NULL;
 				return ret;
 			}
-
+			if (client == MDP3_CLIENT_SPI) {
+				data->srcp_ihdl = ion_import_dma_buf(iclient,
+					data->srcp_dma_buf);
+				if (IS_ERR_OR_NULL(data->srcp_ihdl)) {
+					pr_err("error on ion_import_fd\n");
+					data->srcp_ihdl = NULL;
+					return -EIO;
+				}
+			}
 			data->srcp_attachment =
 			mdss_smmu_dma_buf_attach(data->srcp_dma_buf,
 					&mdp3_res->pdev->dev, dom);
@@ -1449,6 +1470,25 @@
 					data->tab_clone, dom,
 					&data->addr, &data->len,
 					DMA_BIDIRECTIONAL);
+			} else if (client == MDP3_CLIENT_SPI) {
+				void *vaddr;
+
+					if (ion_handle_get_size(iclient,
+						data->srcp_ihdl,
+						(size_t *)&data->len) < 0) {
+						pr_err("get size failed\n");
+						return -EINVAL;
+					}
+					 vaddr = ion_map_kernel(iclient,
+						data->srcp_ihdl);
+					if (IS_ERR_OR_NULL(vaddr)) {
+						pr_err("Mapping failed\n");
+						mdp3_put_img(data, client);
+						return -EINVAL;
+					}
+					data->addr = (dma_addr_t) vaddr;
+					data->len -= img->offset;
+					return 0;
 			} else {
 				ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf,
 					data->srcp_table, dom, &data->addr,
@@ -1741,6 +1781,8 @@
 	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
 		status = MDP3_REG_READ(MDP3_REG_DSI_VIDEO_EN);
 		rc = status & 0x1;
+	} else if (pdata->panel_info.type == SPI_PANEL) {
+		rc = is_spi_panel_continuous_splash_on(pdata);
 	} else {
 		status = MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG);
 		status &= 0x180000;
@@ -1777,7 +1819,11 @@
 	mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, mdp_clk_rate,
 			MDP3_CLIENT_DMA_P);
 
-	rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
+	/*DMA not used on SPI interface, remove DMA bus voting*/
+	if (panel_info->type == SPI_PANEL)
+		rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, 0, 0);
+	else
+		rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
 	bus_handle->restore_ab[MDP3_CLIENT_DMA_P] = ab;
 	bus_handle->restore_ib[MDP3_CLIENT_DMA_P] = ib;
 
@@ -1795,6 +1841,8 @@
 
 	if (panel_info->type == MIPI_VIDEO_PANEL)
 		mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_VIDEO].active = 1;
+	else if (panel_info->type == SPI_PANEL)
+		mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_SPI_CMD].active = 1;
 	else
 		mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_CMD].active = 1;
 
@@ -2111,9 +2159,41 @@
 static DEVICE_ATTR(smart_blit, 0664,
 			mdp3_show_smart_blit, mdp3_store_smart_blit);
 
+static ssize_t mdp3_store_twm(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	u32 data = -1;
+	ssize_t rc = 0;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc) {
+		pr_err("kstrtoint failed. rc=%d\n", rc);
+		return rc;
+	}
+	mdp3_res->twm_en = data ? true : false;
+	pr_err("TWM :  %s\n", (mdp3_res->twm_en) ?
+		"ENABLED" : "DISABLED");
+	return len;
+}
+
+static ssize_t mdp3_show_twm(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+
+	pr_err("TWM :  %s\n", (mdp3_res->twm_en) ?
+		"ENABLED" : "DISABLED");
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", mdp3_res->twm_en);
+	return ret;
+}
+
+static DEVICE_ATTR(twm_enable, 0664,
+		mdp3_show_twm, mdp3_store_twm);
+
 static struct attribute *mdp3_fs_attrs[] = {
 	&dev_attr_caps.attr,
 	&dev_attr_smart_blit.attr,
+	&dev_attr_twm_enable.attr,
 	NULL
 };
 
@@ -2378,6 +2458,7 @@
 		mdp3_dynamic_clock_gating_ctrl;
 	mdp3_res->mdss_util->panel_intf_type = mdp3_panel_intf_type;
 	mdp3_res->mdss_util->panel_intf_status = mdp3_panel_get_intf_status;
+	mdp3_res->twm_en = false;
 
 	if (mdp3_res->mdss_util->param_check(mdss_mdp3_panel)) {
 		mdp3_res->mdss_util->display_disabled = true;
@@ -2452,6 +2533,9 @@
 	mdp3_res->mdss_util->mdp_probe_done = true;
 	pr_debug("%s: END\n", __func__);
 
+	if (mdp3_res->pan_cfg.pan_intf == MDSS_PANEL_INTF_SPI)
+		mdp3_interface.check_dsi_status = mdp3_check_spi_panel_status;
+
 probe_done:
 	if (IS_ERR_VALUE(rc))
 		kfree(mdp3_res->mdp3_hw.irq_info);
diff --git a/drivers/video/fbdev/msm/mdp3.h b/drivers/video/fbdev/msm/mdp3.h
index 6fb39a7..6b56052 100644
--- a/drivers/video/fbdev/msm/mdp3.h
+++ b/drivers/video/fbdev/msm/mdp3.h
@@ -84,6 +84,7 @@
 	MDP3_CLIENT_DSI = 1,
 	MDP3_CLIENT_PPP,
 	MDP3_CLIENT_IOMMU,
+	MDP3_CLIENT_SPI,
 	MDP3_CLIENT_MAX,
 };
 
@@ -208,7 +209,9 @@
 	bool solid_fill_vote_en;
 	struct list_head reg_bus_clist;
 	struct mutex reg_bus_lock;
-
+	int bklt_level;
+	int bklt_update;
+	bool twm_en;
 	u32 max_bw;
 
 	u8 ppp_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
@@ -267,6 +270,7 @@
 void mdp3_enable_regulator(int enable);
 void mdp3_check_dsi_ctrl_status(struct work_struct *work,
 				uint32_t interval);
+void mdp3_check_spi_panel_status(struct work_struct *work, uint32_t interval);
 int mdp3_dynamic_clock_gating_ctrl(int enable);
 int mdp3_footswitch_ctrl(int enable);
 int mdp3_qos_remapper_setup(struct mdss_panel_data *panel);
@@ -279,6 +283,8 @@
 void mdp3_clear_irq(u32 interrupt_mask);
 int mdp3_enable_panic_ctrl(void);
 
+void mdss_spi_panel_bl_ctrl_update(struct mdss_panel_data *pdata, u32 bl_level);
+
 int mdp3_layer_pre_commit(struct msm_fb_data_type *mfd,
 	struct file *file, struct mdp_layer_commit_v1 *commit);
 int mdp3_layer_atomic_validate(struct msm_fb_data_type *mfd,
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index 589f2df..c976c0e 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -23,11 +23,13 @@
 #include <linux/dma-buf.h>
 #include <linux/pm_runtime.h>
 #include <linux/iommu.h>
+#include <linux/msm_ion.h>
 
 #include "mdp3_ctrl.h"
 #include "mdp3.h"
 #include "mdp3_ppp.h"
 #include "mdss_smmu.h"
+#include "mdss_spi_panel.h"
 #include "mdss_sync.h"
 
 #define VSYNC_EXPIRE_TICK	4
@@ -72,7 +74,7 @@
 	bufq->pop_idx = 0;
 }
 
-void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq)
+void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq, int client)
 {
 	int count = bufq->count;
 
@@ -83,7 +85,7 @@
 		struct mdp3_img_data *data = &bufq->img_data[bufq->pop_idx];
 
 		bufq->pop_idx = (bufq->pop_idx + 1) % MDP3_MAX_BUF_QUEUE;
-		mdp3_put_img(data, MDP3_CLIENT_DMA_P);
+		mdp3_put_img(data, client);
 	}
 	bufq->count = 0;
 	bufq->push_idx = 0;
@@ -122,6 +124,18 @@
 	return bufq->count;
 }
 
+int mdp3_get_ion_client(struct msm_fb_data_type *mfd)
+{
+	int intf_type;
+
+	intf_type = mdp3_ctrl_get_intf_type(mfd);
+
+	if (intf_type == MDP3_DMA_OUTPUT_SEL_SPI_CMD)
+		return MDP3_CLIENT_SPI;
+	else
+		return MDP3_CLIENT_DMA_P;
+}
+
 void mdp3_ctrl_notifier_register(struct mdp3_session_data *ses,
 	struct notifier_block *notifier)
 {
@@ -174,6 +188,7 @@
 		return;
 
 	mutex_lock(&session->lock);
+	MDSS_XLOG(0x111);
 	if (session->vsync_enabled ||
 		atomic_read(&session->vsync_countdown) > 0) {
 		mutex_unlock(&session->lock);
@@ -182,10 +197,17 @@
 		return;
 	}
 
+	if (!session->clk_on) {
+		mutex_unlock(&session->lock);
+		pr_debug("%s: Clk shut down is done\n", __func__);
+		MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
+		return;
+	}
 	if (session->intf->active) {
 retry_dma_done:
 		rc = wait_for_completion_timeout(&session->dma_completion,
 							WAIT_DMA_TIMEOUT);
+		MDSS_XLOG(0x222);
 		if (rc <= 0) {
 			struct mdss_panel_data *panel;
 
@@ -196,6 +218,7 @@
 				if (--retry_count) {
 					pr_err("dmap is busy, retry %d\n",
 						retry_count);
+					MDSS_XLOG(__LINE__, retry_count);
 					goto retry_dma_done;
 				}
 				pr_err("dmap is still busy, bug_on\n");
@@ -299,8 +322,11 @@
 	struct mdp3_notification vsync_client;
 	struct mdp3_notification *arg = NULL;
 	bool mod_vsync_timer = false;
+	int intf_type;
 
 	pr_debug("mdp3_ctrl_vsync_enable =%d\n", enable);
+
+	intf_type = mdp3_ctrl_get_intf_type(mfd);
 	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
 	if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
 		!mdp3_session->intf)
@@ -338,18 +364,25 @@
 		}
 	}
 
-	mdp3_clk_enable(1, 0);
-	mdp3_session->dma->vsync_enable(mdp3_session->dma, arg);
-	mdp3_clk_enable(0, 0);
+	if (intf_type == MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
+		mdp3_spi_vsync_enable(mdp3_session->panel, arg);
+	} else {
+		mdp3_clk_enable(1, 0);
+		mdp3_session->dma->vsync_enable(mdp3_session->dma, arg);
+		mdp3_clk_enable(0, 0);
+	}
 
 	/*
 	 * Need to fake vsync whenever dsi interface is not
 	 * active or when dsi clocks are currently off
 	 */
-	if (mod_vsync_timer) {
+	if (mod_vsync_timer && (intf_type != MDP3_DMA_OUTPUT_SEL_SPI_CMD)) {
 		mod_timer(&mdp3_session->vsync_timer,
 			jiffies + msecs_to_jiffies(mdp3_session->vsync_period));
-	} else if (!enable) {
+	} else if (enable && !mdp3_session->clk_on) {
+		mdp3_ctrl_reset_countdown(mdp3_session, mfd);
+		mdp3_ctrl_clk_enable(mfd, 1);
+	} else if (!enable && (intf_type != MDP3_DMA_OUTPUT_SEL_SPI_CMD)) {
 		del_timer(&mdp3_session->vsync_timer);
 	}
 
@@ -380,7 +413,7 @@
 		return -EFAULT;
 	p_req = p + sizeof(req_list_header);
 	count = req_list_header.count;
-	if (count < 0 || count >= MAX_BLIT_REQ)
+	if (count < 0 || count > MAX_BLIT_REQ)
 		return -EINVAL;
 	rc = mdp3_ppp_parse_req(p_req, &req_list_header, 1);
 	if (!rc)
@@ -399,7 +432,7 @@
 		return -EFAULT;
 	p_req = p + sizeof(struct mdp_blit_req_list);
 	count = req_list_header.count;
-	if (count < 0 || count >= MAX_BLIT_REQ)
+	if (count < 0 || count > MAX_BLIT_REQ)
 		return -EINVAL;
 	req_list_header.sync.acq_fen_fd_cnt = 0;
 	rc = mdp3_ppp_parse_req(p_req, &req_list_header, 0);
@@ -588,14 +621,32 @@
 static int mdp3_ctrl_res_req_bus(struct msm_fb_data_type *mfd, int status)
 {
 	int rc = 0;
+	u32 vtotal = 0;
+	int frame_rate = DEFAULT_FRAME_RATE;
 
 	if (status) {
+		struct mdss_panel_info *panel_info = mfd->panel_info;
 		u64 ab = 0;
 		u64 ib = 0;
 
+		frame_rate = mdss_panel_get_framerate(mfd->panel_info,
+			FPS_RESOLUTION_HZ);
 		mdp3_calc_dma_res(mfd->panel_info, NULL, &ab, &ib,
 			ppp_bpp(mfd->fb_imgType));
-		rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
+		vtotal = panel_info->yres + panel_info->lcdc.v_back_porch +
+			panel_info->lcdc.v_front_porch +
+			panel_info->lcdc.v_pulse_width;
+		ab = panel_info->xres * vtotal * ppp_bpp(mfd->fb_imgType);
+		ab *= frame_rate;
+		ib = ab;
+
+		/*DMA not used on SPI interface, remove DMA bus voting*/
+		if (mdp3_ctrl_get_intf_type(mfd) ==
+			 MDP3_DMA_OUTPUT_SEL_SPI_CMD)
+			rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, 0, 0);
+		else
+			rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P,
+				ab, ib);
 	} else {
 		rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, 0, 0);
 	}
@@ -644,6 +695,9 @@
 	case LCDC_PANEL:
 		type = MDP3_DMA_OUTPUT_SEL_LCDC;
 		break;
+	case SPI_PANEL:
+		type = MDP3_DMA_OUTPUT_SEL_SPI_CMD;
+		break;
 	default:
 		type = MDP3_DMA_OUTPUT_SEL_MAX;
 	}
@@ -661,8 +715,11 @@
 	case MDP_RGB_888:
 		format = MDP3_DMA_IBUF_FORMAT_RGB888;
 		break;
+	case MDP_XRGB_8888:
 	case MDP_ARGB_8888:
 	case MDP_RGBA_8888:
+	case MDP_BGRA_8888:
+	case MDP_RGBX_8888:
 		format = MDP3_DMA_IBUF_FORMAT_XRGB8888;
 		break;
 	default:
@@ -705,7 +762,8 @@
 
 	cfg.type = mdp3_ctrl_get_intf_type(mfd);
 	if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
-		cfg.type == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		cfg.type == MDP3_DMA_OUTPUT_SEL_LCDC ||
+		cfg.type == MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
 		video->hsync_period = hsync_period;
 		video->hsync_pulse_width = h_pulse_width;
 		video->vsync_period = vsync_period;
@@ -753,7 +811,7 @@
 	struct fb_var_screeninfo *var;
 	struct mdp3_dma_output_config outputConfig;
 	struct mdp3_dma_source sourceConfig;
-	int frame_rate = mfd->panel_info->mipi.frame_rate;
+	int frame_rate = DEFAULT_FRAME_RATE;
 	int vbp, vfp, vspw;
 	int vtotal, vporch;
 	struct mdp3_notification dma_done_callback;
@@ -762,6 +820,7 @@
 
 	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
 
+	frame_rate = mdss_panel_get_framerate(panel_info, FPS_RESOLUTION_HZ);
 	vbp = panel_info->lcdc.v_back_porch;
 	vfp = panel_info->lcdc.v_front_porch;
 	vspw = panel_info->lcdc.v_pulse_width;
@@ -802,7 +861,7 @@
 		sourceConfig.stride = fix->line_length;
 	}
 
-	te.frame_rate = panel_info->mipi.frame_rate;
+	te.frame_rate = frame_rate;
 	te.hw_vsync_mode = panel_info->mipi.hw_vsync_mode;
 	te.tear_check_en = panel_info->te.tear_check_en;
 	te.sync_cfg_height = panel_info->te.sync_cfg_height;
@@ -983,9 +1042,15 @@
 	return rc;
 }
 
+static bool mdp3_is_twm_en(void)
+{
+	return mdp3_res->twm_en;
+}
+
 static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
 {
 	int rc = 0;
+	int client = 0;
 	bool intf_stopped = true;
 	struct mdp3_session_data *mdp3_session;
 	struct mdss_panel_data *panel;
@@ -1009,8 +1074,10 @@
 	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mdss_fb_is_power_on_ulp(mfd),
 		mfd->panel_power_state);
 	panel = mdp3_session->panel;
-	mutex_lock(&mdp3_session->lock);
 
+	cancel_work_sync(&mdp3_session->clk_off_work);
+	mutex_lock(&mdp3_session->lock);
+	MDSS_XLOG(0x111);
 	pr_debug("Requested power state = %d\n", mfd->panel_power_state);
 	if (mdss_fb_is_power_on_lp(mfd)) {
 		/*
@@ -1025,8 +1092,10 @@
 			pr_debug("fb%d is off already", mfd->index);
 			goto off_error;
 		}
-		if (panel && panel->set_backlight)
+		if (panel && panel->set_backlight) {
+			if (!mdp3_is_twm_en())
 			panel->set_backlight(panel, 0);
+		}
 	}
 
 	/*
@@ -1034,12 +1103,13 @@
 	 * events need to be sent to the interface so that the
 	 * panel can be configured in low power mode
 	 */
-	if (panel->event_handler)
-		rc = panel->event_handler(panel, MDSS_EVENT_BLANK,
-			(void *) (long int)mfd->panel_power_state);
-	if (rc)
-		pr_err("EVENT_BLANK error (%d)\n", rc);
-
+	if (!mdp3_is_twm_en()) {
+		if (panel->event_handler)
+			rc = panel->event_handler(panel, MDSS_EVENT_BLANK,
+				(void *) (long int)mfd->panel_power_state);
+		if (rc)
+			pr_err("EVENT_BLANK error (%d)\n", rc);
+	}
 	if (intf_stopped) {
 		if (!mdp3_session->clk_on)
 			mdp3_ctrl_clk_enable(mfd, 1);
@@ -1065,9 +1135,27 @@
 		mdp3_irq_deregister();
 	}
 
-	if (panel->event_handler)
-		rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF,
-			(void *) (long int)mfd->panel_power_state);
+	if (panel->event_handler) {
+		if (mdp3_is_twm_en()) {
+			pr_info("TWM active skip panel off, disable disp_en\n");
+			if (gpio_is_valid(panel->panel_en_gpio)) {
+				rc = gpio_direction_output(
+					panel->panel_en_gpio, 1);
+			if (rc) {
+				pr_err("%s:set dir for gpio(%d) FAIL\n",
+					__func__, panel->panel_en_gpio);
+			} else {
+				gpio_set_value((panel->panel_en_gpio), 0);
+				usleep_range(100, 110);
+				pr_debug("%s:set disp_en_gpio_%d Low\n",
+					__func__, panel->panel_en_gpio);
+				}
+			}
+		} else {
+			rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF,
+				(void *) (long int)mfd->panel_power_state);
+		}
+	}
 	if (rc)
 		pr_err("EVENT_PANEL_OFF error (%d)\n", rc);
 
@@ -1116,10 +1204,11 @@
 				pr_err("%s: pm_runtime_put failed (rc %d)\n",
 					__func__, rc);
 		}
-		mdp3_bufq_deinit(&mdp3_session->bufq_out);
+		client = mdp3_get_ion_client(mfd);
+		mdp3_bufq_deinit(&mdp3_session->bufq_out, client);
 		if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) {
 			mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
-			mdp3_bufq_deinit(&mdp3_session->bufq_in);
+			mdp3_bufq_deinit(&mdp3_session->bufq_in, client);
 		}
 	}
 
@@ -1149,6 +1238,10 @@
 		mdp3_ctrl_clk_enable(mdp3_session->mfd, 0);
 	}
 off_error:
+	if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) {
+		mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
+		mdp3_bufq_deinit(&mdp3_session->bufq_in, client);
+	}
 	MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
 	mutex_unlock(&mdp3_session->lock);
 	/* Release the last reference to the runtime device */
@@ -1296,14 +1389,16 @@
 	struct fb_info *fbi = mfd->fbi;
 	struct fb_fix_screeninfo *fix;
 	int format;
+	int client;
 
 	fix = &fbi->fix;
 	format = mdp3_ctrl_get_source_format(mfd->fb_imgType);
 	mutex_lock(&mdp3_session->lock);
 
+	client = mdp3_get_ion_client(mfd);
 	if (mdp3_session->overlay.id == ndx && ndx == 1) {
 		mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
-		mdp3_bufq_deinit(&mdp3_session->bufq_in);
+		mdp3_bufq_deinit(&mdp3_session->bufq_in, client);
 	} else {
 		rc = -EINVAL;
 	}
@@ -1322,18 +1417,20 @@
 	struct msmfb_data *img = &req->data;
 	struct mdp3_img_data data;
 	struct mdp3_dma *dma = mdp3_session->dma;
+	int client;
 
+	client = mdp3_get_ion_client(mfd);
 	memset(&data, 0, sizeof(struct mdp3_img_data));
-	if (mfd->panel.type == MIPI_CMD_PANEL)
+	if (mfd->panel.type == MIPI_CMD_PANEL || client == MDP3_CLIENT_SPI)
 		is_panel_type_cmd = true;
 	if (is_panel_type_cmd) {
-		rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
+		rc = mdp3_iommu_enable(client);
 		if (rc) {
 			pr_err("fail to enable iommu\n");
 			return rc;
 		}
 	}
-	rc = mdp3_get_img(img, &data, MDP3_CLIENT_DMA_P);
+	rc = mdp3_get_img(img, &data, client);
 	if (rc) {
 		pr_err("fail to get overlay buffer\n");
 		goto err;
@@ -1343,14 +1440,14 @@
 		pr_err("buf size(0x%lx) is smaller than dma config(0x%x)\n",
 			data.len, (dma->source_config.stride *
 			dma->source_config.height));
-		mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+		mdp3_put_img(&data, client);
 		rc = -EINVAL;
 		goto err;
 	}
 	rc = mdp3_bufq_push(&mdp3_session->bufq_in, &data);
 	if (rc) {
 		pr_err("fail to queue the overlay buffer, buffer drop\n");
-		mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+		mdp3_put_img(&data, client);
 		goto err;
 	}
 	rc = 0;
@@ -1409,8 +1506,11 @@
 	struct mdp3_img_data *data;
 	struct mdss_panel_info *panel_info;
 	int rc = 0;
+	int client;
 	static bool splash_done;
 	struct mdss_panel_data *panel;
+	int frame_rate = DEFAULT_FRAME_RATE;
+	int stride;
 
 	if (!mfd || !mfd->mdp.private1)
 		return -EINVAL;
@@ -1420,6 +1520,9 @@
 	if (!mdp3_session || !mdp3_session->dma)
 		return -EINVAL;
 
+	frame_rate = mdss_panel_get_framerate(panel_info, FPS_RESOLUTION_HZ);
+	client = mdp3_get_ion_client(mfd);
+
 	if (mdp3_bufq_count(&mdp3_session->bufq_in) == 0) {
 		pr_debug("no buffer in queue yet\n");
 		return -EPERM;
@@ -1456,6 +1559,7 @@
 	}
 	mutex_unlock(&mdp3_res->fs_idle_pc_lock);
 
+	cancel_work_sync(&mdp3_session->clk_off_work);
 	mutex_lock(&mdp3_session->lock);
 
 	if (!mdp3_session->status) {
@@ -1463,13 +1567,23 @@
 		mutex_unlock(&mdp3_session->lock);
 		return -EPERM;
 	}
-
+	MDSS_XLOG(0x111);
 	mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN);
 	data = mdp3_bufq_pop(&mdp3_session->bufq_in);
 	if (data) {
 		mdp3_ctrl_reset_countdown(mdp3_session, mfd);
 		mdp3_ctrl_clk_enable(mfd, 1);
-		if (mdp3_session->dma->update_src_cfg &&
+		stride = mdp3_session->dma->source_config.stride;
+		if (mdp3_ctrl_get_intf_type(mfd) ==
+			MDP3_DMA_OUTPUT_SEL_SPI_CMD){
+			mdp3_session->intf->active = false;
+			msm_ion_do_cache_op(mdp3_res->ion_client,
+				data->srcp_ihdl, (void *)(int)data->addr,
+					data->len, ION_IOC_INV_CACHES);
+			rc = mdss_spi_panel_kickoff(mdp3_session->panel,
+				(void *)(int)data->addr, (int)data->len,
+					stride);
+		} else if (mdp3_session->dma->update_src_cfg &&
 				panel_info->partial_update_enabled) {
 			panel->panel_info.roi.x = mdp3_session->dma->roi.x;
 			panel->panel_info.roi.y = mdp3_session->dma->roi.y;
@@ -1489,13 +1603,15 @@
 				MDP_NOTIFY_FRAME_TIMEOUT);
 		} else {
 			if (mdp3_ctrl_get_intf_type(mfd) ==
-						MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
+				MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+				mdp3_ctrl_get_intf_type(mfd) ==
+					MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
 				mdp3_ctrl_notify(mdp3_session,
 					MDP_NOTIFY_FRAME_DONE);
 			}
 		}
 		mdp3_session->dma_active = 1;
-		init_completion(&mdp3_session->dma_completion);
+		reinit_completion(&mdp3_session->dma_completion);
 		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
 		mdp3_bufq_push(&mdp3_session->bufq_out, data);
 	}
@@ -1504,16 +1620,16 @@
 		mdp3_release_splash_memory(mfd);
 		data = mdp3_bufq_pop(&mdp3_session->bufq_out);
 		if (data)
-			mdp3_put_img(data, MDP3_CLIENT_DMA_P);
+			mdp3_put_img(data, client);
 	}
 
 	if (mdp3_session->first_commit) {
 		/*wait to ensure frame is sent to panel*/
 		if (panel_info->mipi.post_init_delay)
-			msleep(((1000 / panel_info->mipi.frame_rate) + 1) *
+			msleep(((1000 / frame_rate) + 1) *
 					panel_info->mipi.post_init_delay);
 		else
-			msleep(1000 / panel_info->mipi.frame_rate);
+			msleep((1000 / frame_rate) + 1);
 		mdp3_session->first_commit = false;
 		if (panel)
 			rc |= panel->event_handler(panel,
@@ -1528,6 +1644,12 @@
 		mdp3_session->esd_recovery = false;
 	}
 
+	/*Update backlight only if its changed*/
+	if (mdp3_res->bklt_level && mdp3_res->bklt_update) {
+		mdss_spi_panel_bl_ctrl_update(panel, mdp3_res->bklt_level);
+		mdp3_res->bklt_update = false;
+	}
+
 	/* start vsync tick countdown for cmd mode if vsync isn't enabled */
 	if (mfd->panel.type == MIPI_CMD_PANEL && !mdp3_session->vsync_enabled)
 		mdp3_ctrl_vsync_enable(mdp3_session->mfd, 0);
@@ -1644,7 +1766,7 @@
 			}
 		}
 		mdp3_session->dma_active = 1;
-		init_completion(&mdp3_session->dma_completion);
+		reinit_completion(&mdp3_session->dma_completion);
 		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
 	} else {
 		pr_debug("mdp3_ctrl_pan_display no memory, stop interface");
@@ -1654,17 +1776,18 @@
 	}
 
 	panel = mdp3_session->panel;
-	if (mdp3_session->first_commit) {
-		/*wait to ensure frame is sent to panel*/
-		if (panel_info->mipi.post_init_delay)
-			msleep(((1000 / panel_info->mipi.frame_rate) + 1) *
-					panel_info->mipi.post_init_delay);
-		else
-			msleep(1000 / panel_info->mipi.frame_rate);
-		mdp3_session->first_commit = false;
-		if (panel)
-			panel->event_handler(panel, MDSS_EVENT_POST_PANEL_ON,
-					NULL);
+	if (mdp3_ctrl_get_intf_type(mfd) != MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
+		if (mdp3_session->first_commit) {
+			if (panel_info->mipi.init_delay)
+				msleep(((1000 / panel_info->mipi.frame_rate)
+				+ 1) * panel_info->mipi.post_init_delay);
+			else
+				msleep(1000 / panel_info->mipi.frame_rate);
+					mdp3_session->first_commit = false;
+			if (panel)
+				panel->event_handler(panel,
+					MDSS_EVENT_POST_PANEL_ON, NULL);
+		}
 	}
 
 	mdp3_session->vsync_before_commit = 0;
@@ -1675,6 +1798,11 @@
 		mdp3_session->esd_recovery = false;
 	}
 
+	/*Update backlight only if its changed*/
+	if (mdp3_res->bklt_level && mdp3_res->bklt_update) {
+		mdss_spi_panel_bl_ctrl_update(panel, mdp3_res->bklt_level);
+		mdp3_res->bklt_update = false;
+	}
 
 pan_error:
 	mutex_unlock(&mdp3_session->lock);
@@ -1715,7 +1843,8 @@
 	switch (metadata->op) {
 	case metadata_op_frame_rate:
 		metadata->data.panel_frame_rate =
-			mfd->panel_info->mipi.frame_rate;
+			 mdss_panel_get_framerate(mfd->panel_info,
+				FPS_RESOLUTION_HZ);
 		break;
 	case metadata_op_get_caps:
 		metadata->data.caps.mdp_rev = 305;
@@ -2685,6 +2814,8 @@
 		}
 		mutex_unlock(&mdp3_res->fs_idle_pc_lock);
 		rc = mdp3_ctrl_async_blit_req(mfd, argp);
+		if (!rc)
+			cancel_work_sync(&mdp3_session->clk_off_work);
 		break;
 	case MSMFB_BLIT:
 		mutex_lock(&mdp3_res->fs_idle_pc_lock);
@@ -2692,6 +2823,8 @@
 			mdp3_ctrl_reset(mfd);
 		mutex_unlock(&mdp3_res->fs_idle_pc_lock);
 		rc = mdp3_ctrl_blit_req(mfd, argp);
+		if (!rc)
+			cancel_work_sync(&mdp3_session->clk_off_work);
 		break;
 	case MSMFB_METADATA_GET:
 		rc = copy_from_user(&metadata, argp, sizeof(metadata));
@@ -2848,6 +2981,7 @@
 	struct msm_mdp_interface *mdp3_interface = &mfd->mdp;
 	struct mdp3_session_data *mdp3_session = NULL;
 	u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
+	int frame_rate = DEFAULT_FRAME_RATE;
 	int rc;
 	int splash_mismatch = 0;
 	struct sched_param sched = { .sched_priority = 16 };
@@ -2857,6 +2991,8 @@
 	if (rc)
 		splash_mismatch = 1;
 
+	frame_rate = mdss_panel_get_framerate(mfd->panel_info,
+		FPS_RESOLUTION_HZ);
 	mdp3_interface->on_fnc = mdp3_ctrl_on;
 	mdp3_interface->off_fnc = mdp3_ctrl_off;
 	mdp3_interface->do_histogram = NULL;
@@ -2870,6 +3006,7 @@
 	mdp3_interface->configure_panel = mdp3_update_panel_info;
 	mdp3_interface->input_event_handler = NULL;
 	mdp3_interface->signal_retire_fence = NULL;
+	mdp3_interface->is_twm_en = mdp3_is_twm_en;
 
 	mdp3_session = kzalloc(sizeof(struct mdp3_session_data), GFP_KERNEL);
 	if (!mdp3_session)
@@ -2934,10 +3071,11 @@
 	init_timer(&mdp3_session->vsync_timer);
 	mdp3_session->vsync_timer.function = mdp3_vsync_timer_func;
 	mdp3_session->vsync_timer.data = (u32)mdp3_session;
-	mdp3_session->vsync_period = 1000 / mfd->panel_info->mipi.frame_rate;
+	mdp3_session->vsync_period = 1000 / frame_rate;
 	mfd->mdp.private1 = mdp3_session;
 	init_completion(&mdp3_session->dma_completion);
-	if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+	if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+		intf_type != MDP3_DMA_OUTPUT_SEL_SPI_CMD)
 		mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done;
 
 	rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.h b/drivers/video/fbdev/msm/mdp3_ctrl.h
index b7b667b..5193af1 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.h
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.h
@@ -84,12 +84,13 @@
 	struct work_struct retire_work;
 };
 
-void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq);
+void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq, int client);
 int mdp3_ctrl_init(struct msm_fb_data_type *mfd);
 int mdp3_bufq_push(struct mdp3_buffer_queue *bufq,
 			struct mdp3_img_data *data);
 int mdp3_ctrl_get_source_format(u32 imgType);
 int mdp3_ctrl_get_pack_pattern(u32 imgType);
 int mdp3_ctrl_reset(struct msm_fb_data_type *mfd);
+int mdp3_get_ion_client(struct msm_fb_data_type *mfd);
 
 #endif /* MDP3_CTRL_H */
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
index 089d32d..b223c87 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.c
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -114,7 +114,8 @@
 	}
 
 	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
-		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC ||
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
 		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
 			mdp3_irq_enable(MDP3_INTR_LCDC_START_OF_FRAME);
 	} else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
@@ -150,7 +151,8 @@
 	}
 
 	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
-		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC ||
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
 		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
 			mdp3_irq_disable(MDP3_INTR_LCDC_START_OF_FRAME);
 	} else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
@@ -665,6 +667,7 @@
 	ATRACE_BEGIN(__func__);
 	pr_debug("mdp3_dmap_update\n");
 
+	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__);
 	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
 		cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
 		if (intf->active) {
@@ -757,6 +760,7 @@
 	unsigned long flag;
 	int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
 
+	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__);
 	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
 		cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
 		if (intf->active)
@@ -965,6 +969,8 @@
 	val = MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS);
 	pr_err("%s DMAP Status %s\n", __func__,
 		(val & MDP3_DMA_P_BUSY_BIT) ? "BUSY":"IDLE");
+	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__,
+		(val & MDP3_DMA_P_BUSY_BIT) ? 1:0);
 	return val & MDP3_DMA_P_BUSY_BIT;
 }
 
@@ -1056,7 +1062,7 @@
 	MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
 	MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, 0xfffffff);
 
-	init_completion(&dma->dma_comp);
+	reinit_completion(&dma->dma_comp);
 	dma->vsync_client.handler = NULL;
 	return ret;
 }
@@ -1264,6 +1270,22 @@
 	return 0;
 }
 
+static int spi_cmd_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+	return 0;
+}
+
+static int spi_cmd_start(struct mdp3_intf *intf)
+{
+	intf->active = true;
+	return 0;
+}
+
+static int spi_cmd_stop(struct mdp3_intf *intf)
+{
+	intf->active = false;
+	return 0;
+}
 int mdp3_intf_init(struct mdp3_intf *intf)
 {
 	switch (intf->cfg.type) {
@@ -1282,6 +1304,11 @@
 		intf->start = dsi_cmd_start;
 		intf->stop = dsi_cmd_stop;
 		break;
+	case MDP3_DMA_OUTPUT_SEL_SPI_CMD:
+		intf->config = spi_cmd_config;
+		intf->start = spi_cmd_start;
+		intf->stop = spi_cmd_stop;
+		break;
 
 	default:
 		return -EINVAL;
diff --git a/drivers/video/fbdev/msm/mdp3_dma.h b/drivers/video/fbdev/msm/mdp3_dma.h
index 24caedb9..ec327b6 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.h
+++ b/drivers/video/fbdev/msm/mdp3_dma.h
@@ -47,6 +47,7 @@
 	MDP3_DMA_OUTPUT_SEL_DSI_CMD,
 	MDP3_DMA_OUTPUT_SEL_LCDC,
 	MDP3_DMA_OUTPUT_SEL_DSI_VIDEO,
+	MDP3_DMA_OUTPUT_SEL_SPI_CMD,
 	MDP3_DMA_OUTPUT_SEL_MAX
 };
 
diff --git a/drivers/video/fbdev/msm/mdp3_layer.c b/drivers/video/fbdev/msm/mdp3_layer.c
index 0078466..98b5c19 100644
--- a/drivers/video/fbdev/msm/mdp3_layer.c
+++ b/drivers/video/fbdev/msm/mdp3_layer.c
@@ -197,6 +197,7 @@
 	struct msmfb_data img;
 	bool is_panel_type_cmd = false;
 	struct mdp3_img_data data;
+	int intf_type;
 	int rc = 0;
 
 	layer = &input_layer[0];
@@ -208,23 +209,24 @@
 		goto err;
 	}
 
+	intf_type = mdp3_get_ion_client(mfd);
 	memset(&img, 0, sizeof(img));
 	img.memory_id = buffer->planes[0].fd;
 	img.offset = buffer->planes[0].offset;
 
 	memset(&data, 0, sizeof(struct mdp3_img_data));
 
-	if (mfd->panel.type == MIPI_CMD_PANEL)
+	if (mfd->panel.type == MIPI_CMD_PANEL || intf_type == MDP3_CLIENT_SPI)
 		is_panel_type_cmd = true;
 	if (is_panel_type_cmd) {
-		rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
+		rc = mdp3_iommu_enable(intf_type);
 		if (rc) {
 			pr_err("fail to enable iommu\n");
 			return rc;
 		}
 	}
 
-	rc = mdp3_get_img(&img, &data, MDP3_CLIENT_DMA_P);
+	rc = mdp3_get_img(&img, &data, intf_type);
 	if (rc) {
 		pr_err("fail to get overlay buffer\n");
 		goto err;
@@ -260,7 +262,7 @@
 	struct mdp3_session_data *mdp3_session;
 	struct mdp3_dma *dma;
 	int layer_count = commit->input_layer_cnt;
-	int stride, format;
+	int stride, format, client;
 
 	/* Handle NULL commit */
 	if (!layer_count) {
@@ -273,7 +275,8 @@
 
 	mutex_lock(&mdp3_session->lock);
 
-	mdp3_bufq_deinit(&mdp3_session->bufq_in);
+	client = mdp3_get_ion_client(mfd);
+	mdp3_bufq_deinit(&mdp3_session->bufq_in, client);
 
 	layer_list = commit->input_layers;
 	layer = &layer_list[0];
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.c b/drivers/video/fbdev/msm/mdp3_ppp.c
index 9253a69..5459510 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp.c
+++ b/drivers/video/fbdev/msm/mdp3_ppp.c
@@ -196,12 +196,20 @@
 
 	if (((req->src_rect.x + req->src_rect.w) > req->src.width) ||
 	    ((req->src_rect.y + req->src_rect.h) > req->src.height)) {
+		pr_err("%s: src roi (x=%d,y=%d,w=%d, h=%d) WxH(%dx%d)\n",
+			__func__, req->src_rect.x, req->src_rect.y,
+			 req->src_rect.w, req->src_rect.h, req->src.width,
+			 req->src.height);
 		pr_err("%s: src roi larger than boundary\n", __func__);
 		return -EINVAL;
 	}
 
 	if (((req->dst_rect.x + req->dst_rect.w) > req->dst.width) ||
 	    ((req->dst_rect.y + req->dst_rect.h) > req->dst.height)) {
+		pr_err("%s: dst roi (x=%d,y=%d,w=%d, h=%d) WxH(%dx%d)\n",
+			__func__, req->dst_rect.x, req->dst_rect.y,
+			req->dst_rect.w, req->dst_rect.h, req->dst.width,
+			req->dst.height);
 		pr_err("%s: dst roi larger than boundary\n", __func__);
 		return -EINVAL;
 	}
@@ -534,6 +542,7 @@
 {
 	struct mdss_panel_info *panel_info = mfd->panel_info;
 	int i, lcount = 0;
+	int frame_rate = DEFAULT_FRAME_RATE;
 	struct mdp_blit_req *req;
 	struct bpp_info bpp;
 	u64 old_solid_fill_pixel = 0;
@@ -548,6 +557,7 @@
 
 	ATRACE_BEGIN(__func__);
 	lcount = lreq->count;
+	frame_rate = mdss_panel_get_framerate(panel_info, FPS_RESOLUTION_HZ);
 	if (lcount == 0) {
 		pr_err("Blit with request count 0, continue to recover!!!\n");
 		ATRACE_END(__func__);
@@ -575,11 +585,11 @@
 		is_blit_optimization_possible(lreq, i);
 		req = &(lreq->req_list[i]);
 
-		if (req->fps > 0 && req->fps <= panel_info->mipi.frame_rate) {
+		if (req->fps > 0 && req->fps <= frame_rate) {
 			if (fps == 0)
 				fps = req->fps;
 			else
-				fps = panel_info->mipi.frame_rate;
+				fps = frame_rate;
 		}
 
 		mdp3_get_bpp_info(req->src.format, &bpp);
@@ -637,7 +647,7 @@
 	}
 
 	if (fps == 0)
-		fps = panel_info->mipi.frame_rate;
+		fps = frame_rate;
 
 	if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
 		honest_ppp_ab = ppp_res.solid_fill_byte * 4;
@@ -1454,6 +1464,18 @@
 			(!(check_if_rgb(bg_req.src.format))) &&
 			(!(hw_woraround_active))) {
 			/*
+			 * Disable SMART blit for BG(YUV) layer when
+			 * Scaling on BG layer
+			 * Rotation on BG layer
+			 * UD flip on BG layer
+			 */
+			if ((is_scaling_needed(bg_req)) && (
+				bg_req.flags & MDP_ROT_90) &&
+				(bg_req.flags & MDP_FLIP_UD)) {
+				pr_debug("YUV layer with ROT+UD_FLIP+Scaling Not supported\n");
+				return false;
+			}
+			/*
 			 * swap blit requests at index 0 and 1. YUV layer at
 			 * index 0 is replaced with UI layer request present
 			 * at index 1. Since UI layer will be in  background
diff --git a/drivers/video/fbdev/msm/mdp3_ppp_data.c b/drivers/video/fbdev/msm/mdp3_ppp_data.c
index ac88d9b..dd2cce0 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp_data.c
+++ b/drivers/video/fbdev/msm/mdp3_ppp_data.c
@@ -89,8 +89,8 @@
 };
 
 const uint32_t swapped_pack_patt_lut[MDP_IMGTYPE_LIMIT] = {
-	[MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
-	[MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
 	[MDP_RGB_888] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
 	[MDP_BGR_888] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
 	[MDP_BGRA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index e6d55b5..6fa7906 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -163,6 +163,7 @@
 	MDSS_QUIRK_NEED_SECURE_MAP,
 	MDSS_QUIRK_SRC_SPLIT_ALWAYS,
 	MDSS_QUIRK_HDR_SUPPORT_ENABLED,
+	MDSS_QUIRK_MDP_CLK_SET_RATE,
 	MDSS_QUIRK_MAX,
 };
 
@@ -289,6 +290,7 @@
 	u32 max_mdp_clk_rate;
 	struct mdss_util_intf *mdss_util;
 	struct mdss_panel_data *pdata;
+	unsigned long mdp_clk_rate;
 
 	struct platform_device *pdev;
 	struct mdss_io_data mdss_io;
@@ -414,6 +416,7 @@
 	u32 enable_gate;
 	u32 enable_bw_release;
 	u32 enable_rotator_bw_release;
+	u32 enable_cdp;
 	u32 serialize_wait4pp;
 	u32 wait4autorefresh;
 	u32 lines_before_active;
@@ -466,6 +469,7 @@
 	u32 nmax_concurrent_ad_hw;
 	struct workqueue_struct *ad_calc_wq;
 	u32 ad_debugen;
+	bool mem_retain;
 
 	struct mdss_intr hist_intr;
 
@@ -522,6 +526,7 @@
 
 	u32 splash_intf_sel;
 	u32 splash_split_disp;
+	struct mult_factor bus_throughput_factor;
 };
 
 extern struct mdss_data_type *mdss_res;
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c
index 9a9f5e4..a81f149 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.c
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.c
@@ -129,6 +129,7 @@
 		commit32->commit_v1.input_layer_cnt;
 	commit->commit_v1.left_roi = commit32->commit_v1.left_roi;
 	commit->commit_v1.right_roi = commit32->commit_v1.right_roi;
+	commit->commit_v1.bl_level = commit32->commit_v1.bl_level;
 	memcpy(&commit->commit_v1.reserved, &commit32->commit_v1.reserved,
 		count);
 }
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.h b/drivers/video/fbdev/msm/mdss_compat_utils.h
index ebae393..819106b 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.h
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.h
@@ -539,6 +539,7 @@
 	compat_caddr_t		dest_scaler;
 	uint32_t                dest_scaler_cnt;
 	compat_caddr_t		frc_info;
+	uint32_t		bl_level; /* BL level to be updated in commit */
 	uint32_t		reserved[MDP_LAYER_COMMIT_V1_PAD];
 };
 
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
index f38d40c..86f3bce 100644
--- a/drivers/video/fbdev/msm/mdss_debug.c
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -46,6 +46,42 @@
 
 #define INVALID_XIN_ID     0xFF
 
+static u32 dsi_dbg_bus_sdm660[] = {
+	0x0001, 0x1001, 0x0001, 0x0011,
+	0x1021, 0x0021, 0x0031, 0x0041,
+	0x0051, 0x0061, 0x3061, 0x0061,
+	0x2061, 0x2061, 0x1061, 0x1061,
+	0x1061, 0x0071, 0x0071, 0x0071,
+	0x0081, 0x0081, 0x00A1, 0x00A1,
+	0x10A1, 0x20A1, 0x30A1, 0x10A1,
+	0x10A1, 0x30A1, 0x20A1, 0x00B1,
+	0x00C1, 0x00C1, 0x10C1, 0x20C1,
+	0x30C1, 0x00D1, 0x00D1, 0x20D1,
+	0x30D1, 0x00E1, 0x00E1, 0x00E1,
+	0x00F1, 0x00F1, 0x0101, 0x0101,
+	0x1101, 0x2101, 0x3101, 0x0111,
+	0x0141, 0x1141, 0x0141, 0x1141,
+	0x1141, 0x0151, 0x0151, 0x1151,
+	0x2151, 0x3151, 0x0161, 0x0161,
+	0x1161, 0x0171, 0x0171, 0x0181,
+	0x0181, 0x0191, 0x0191, 0x01A1,
+	0x01A1, 0x01B1, 0x01B1, 0x11B1,
+	0x21B1, 0x01C1, 0x01C1, 0x11C1,
+	0x21C1, 0x31C1, 0x01D1, 0x01D1,
+	0x01D1, 0x01D1, 0x11D1, 0x21D1,
+	0x21D1, 0x01E1, 0x01E1, 0x01F1,
+	0x01F1, 0x0201, 0x0201, 0x0211,
+	0x0221, 0x0231, 0x0241, 0x0251,
+	0x0281, 0x0291, 0x0281, 0x0291,
+	0x02A1, 0x02B1, 0x02C1, 0x0321,
+	0x0321, 0x1321, 0x2321, 0x3321,
+	0x0331, 0x0331, 0x1331, 0x0341,
+	0x0341, 0x1341, 0x2341, 0x3341,
+	0x0351, 0x0361, 0x0361, 0x1361,
+	0x2361, 0x0371, 0x0381, 0x0391,
+	0x03C1, 0x03D1, 0x03E1, 0x03F1,
+};
+
 static DEFINE_MUTEX(mdss_debug_lock);
 
 static char panel_reg[2] = {DEFAULT_READ_PANEL_POWER_MODE_REG, 0x00};
@@ -1073,7 +1109,7 @@
 	struct mdss_data_type *mdata = file->private_data;
 	struct mdss_max_bw_settings *temp_settings;
 	int len = 0, i;
-	char buf[256];
+	char buf[256] = {'\0'};
 
 	if (!mdata)
 		return -ENODEV;
@@ -1377,6 +1413,38 @@
 			}
 		} else {
 			if (block_id <= DISPLAY_MISR_HDMI) {
+				/*
+				 * In Dual LM single display configuration,
+				 * the interface number (i.e. block_id)
+				 * might not be the one given from ISR.
+				 * We should always check with the actual
+				 * intf_num from ctl.
+				 */
+				struct msm_fb_data_type *mfd = NULL;
+
+				/*
+				 * ISR pass in NULL ctl, so we need to get it
+				 * from the mdata.
+				 */
+				if (!ctl && mdata->mixer_intf)
+					ctl = mdata->mixer_intf->ctl;
+				if (ctl)
+					mfd = ctl->mfd;
+				if (mfd && is_dual_lm_single_display(mfd)) {
+					switch (ctl->intf_num) {
+					case MDSS_MDP_INTF1:
+						block_id = DISPLAY_MISR_DSI0;
+						break;
+					case MDSS_MDP_INTF2:
+						block_id = DISPLAY_MISR_DSI1;
+						break;
+					default:
+						pr_err("Unmatch INTF for Dual LM single display configuration, INTF:%d\n",
+								ctl->intf_num);
+						return NULL;
+					}
+				}
+
 				intf_base = (char *)mdss_mdp_get_intf_base_addr(
 						mdata, block_id);
 
@@ -1390,11 +1458,15 @@
 
 					/*
 					 * extra offset required for
-					 * cmd misr in 8996
+					 * cmd misr in 8996 and mdss3.x
 					 */
 					if (IS_MDSS_MAJOR_MINOR_SAME(
 						  mdata->mdp_rev,
-						  MDSS_MDP_HW_REV_107)) {
+						  MDSS_MDP_HW_REV_107) ||
+						(mdata->mdp_rev ==
+							MDSS_MDP_HW_REV_300) ||
+						(mdata->mdp_rev ==
+							MDSS_MDP_HW_REV_301)) {
 						ctrl_reg += 0x8;
 						value_reg += 0x8;
 					}
@@ -1786,6 +1858,24 @@
 
 }
 
+void mdss_dsi_debug_bus_init(struct mdss_dsi_data *sdata)
+{
+	if (!sdata)
+		return;
+
+	sdata->dbg_bus = NULL;
+	sdata->dbg_bus_size = 0;
+
+	switch (sdata->shared_data->hw_rev) {
+	case MDSS_DSI_HW_REV_201:
+		sdata->dbg_bus = dsi_dbg_bus_sdm660;
+		sdata->dbg_bus_size = ARRAY_SIZE(dsi_dbg_bus_sdm660);
+		break;
+	default:
+		break;
+	}
+}
+
 int mdss_dump_misr_data(char **buf, u32 size)
 {
 	struct mdss_mdp_misr_map  *dsi0_map;
diff --git a/drivers/video/fbdev/msm/mdss_debug.h b/drivers/video/fbdev/msm/mdss_debug.h
index 0d482c0..64df339 100644
--- a/drivers/video/fbdev/msm/mdss_debug.h
+++ b/drivers/video/fbdev/msm/mdss_debug.h
@@ -78,8 +78,8 @@
 #define MDSS_XLOG_IOMMU(...) mdss_xlog(__func__, __LINE__, MDSS_XLOG_IOMMU, \
 		##__VA_ARGS__, DATA_LIMITER)
 
-#define ATRACE_END(name) trace_mdss_mark_write(current->tgid, name, 0)
-#define ATRACE_BEGIN(name) trace_mdss_mark_write(current->tgid, name, 1)
+#define ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
 #define ATRACE_FUNC() ATRACE_BEGIN(__func__)
 
 #define ATRACE_INT(name, value) \
@@ -173,6 +173,7 @@
 void mdss_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
 	int len, u32 **dump_mem, bool from_isr);
 void mdss_mdp_debug_mid(u32 mid);
+void mdss_dump_dsi_debug_bus(u32 bus_dump_flag, u32 **dump_mem);
 #else
 struct mdss_debug_base;
 struct dump_offset;
diff --git a/drivers/video/fbdev/msm/mdss_debug_xlog.c b/drivers/video/fbdev/msm/mdss_debug_xlog.c
index a651b55..ed00fc5 100644
--- a/drivers/video/fbdev/msm/mdss_debug_xlog.c
+++ b/drivers/video/fbdev/msm/mdss_debug_xlog.c
@@ -32,6 +32,7 @@
 #define XLOG_DEFAULT_REGDUMP 0x2 /* dump in RAM */
 #define XLOG_DEFAULT_DBGBUSDUMP 0x2 /* dump in RAM */
 #define XLOG_DEFAULT_VBIF_DBGBUSDUMP 0x2 /* dump in RAM */
+#define XLOG_DEFAULT_DSI_DBGBUSDUMP 0x2 /* dump in RAM */
 
 /*
  * xlog will print this number of entries when it is called through
@@ -73,14 +74,17 @@
 	u32 enable_reg_dump;
 	u32 enable_dbgbus_dump;
 	u32 enable_vbif_dbgbus_dump;
+	u32 enable_dsi_dbgbus_dump;
 	struct work_struct xlog_dump_work;
 	struct mdss_debug_base *blk_arr[MDSS_DEBUG_BASE_MAX];
 	bool work_panic;
 	bool work_dbgbus;
 	bool work_vbif_dbgbus;
+	bool work_dsi_dbgbus;
 	u32 *dbgbus_dump; /* address for the debug bus dump */
 	u32 *vbif_dbgbus_dump; /* address for the vbif debug bus dump */
 	u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */
+	u32 *dsi_dbgbus_dump; /* address for the dsi debug bus dump */
 } mdss_dbg_xlog;
 
 static inline bool mdss_xlog_is_enabled(u32 flag)
@@ -89,6 +93,48 @@
 		(flag == MDSS_XLOG_ALL && mdss_dbg_xlog.xlog_enable);
 }
 
+static void __halt_vbif_xin(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	pr_err("Halting VBIF-XIN\n");
+	MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0, 0xFFFFFFFF, false);
+}
+
+static void __halt_vbif_axi(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	pr_err("Halting VBIF-AXI\n");
+	MDSS_VBIF_WRITE(mdata, MMSS_VBIF_AXI_HALT_CTRL0, 0xFFFFFFFF, false);
+}
+
+static void __dump_vbif_state(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	unsigned int reg_vbif_src_err, reg_vbif_err_info,
+		reg_vbif_xin_halt_ctrl0, reg_vbif_xin_halt_ctrl1,
+		reg_vbif_axi_halt_ctrl0, reg_vbif_axi_halt_ctrl1;
+
+	reg_vbif_src_err = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_SRC_ERR, false);
+	reg_vbif_err_info = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_ERR_INFO, false);
+	reg_vbif_xin_halt_ctrl0 = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_XIN_HALT_CTRL0, false);
+	reg_vbif_xin_halt_ctrl1 = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_XIN_HALT_CTRL1, false);
+	reg_vbif_axi_halt_ctrl0 = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_AXI_HALT_CTRL0, false);
+	reg_vbif_axi_halt_ctrl1 = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_AXI_HALT_CTRL1, false);
+	pr_err("VBIF SRC_ERR=%x, ERR_INFO=%x\n",
+				reg_vbif_src_err, reg_vbif_err_info);
+	pr_err("VBIF XIN_HALT_CTRL0=%x, XIN_HALT_CTRL1=%x, AXI_HALT_CTRL0=%x, AXI_HALT_CTRL1=%x\n"
+			, reg_vbif_xin_halt_ctrl0, reg_vbif_xin_halt_ctrl1,
+			reg_vbif_axi_halt_ctrl0, reg_vbif_axi_halt_ctrl1);
+}
+
 void mdss_xlog(const char *name, int line, int flag, ...)
 {
 	unsigned long flags;
@@ -572,7 +618,7 @@
 
 static void mdss_xlog_dump_array(struct mdss_debug_base *blk_arr[],
 	u32 len, bool dead, const char *name, bool dump_dbgbus,
-	bool dump_vbif_dbgbus)
+	bool dump_vbif_dbgbus, bool dump_dsi_dbgbus)
 {
 	int i;
 
@@ -596,8 +642,21 @@
 			&mdss_dbg_xlog.nrt_vbif_dbgbus_dump, false);
 	}
 
-	if (dead && mdss_dbg_xlog.panic_on_err)
+	if (dump_dsi_dbgbus)
+		mdss_dump_dsi_debug_bus(mdss_dbg_xlog.enable_dsi_dbgbus_dump,
+			&mdss_dbg_xlog.dsi_dbgbus_dump);
+
+	if (dead && mdss_dbg_xlog.panic_on_err) {
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		__dump_vbif_state();
+		__halt_vbif_xin();
+		usleep_range(10000, 10010);
+		__halt_vbif_axi();
+		usleep_range(10000, 10010);
+		__dump_vbif_state();
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
 		panic(name);
+	}
 }
 
 static void xlog_debug_work(struct work_struct *work)
@@ -607,7 +666,8 @@
 		ARRAY_SIZE(mdss_dbg_xlog.blk_arr),
 		mdss_dbg_xlog.work_panic, "xlog_workitem",
 		mdss_dbg_xlog.work_dbgbus,
-		mdss_dbg_xlog.work_vbif_dbgbus);
+		mdss_dbg_xlog.work_vbif_dbgbus,
+		mdss_dbg_xlog.work_dsi_dbgbus);
 }
 
 void mdss_xlog_tout_handler_default(bool queue, const char *name, ...)
@@ -615,6 +675,7 @@
 	int i, index = 0;
 	bool dead = false;
 	bool dump_dbgbus = false, dump_vbif_dbgbus = false;
+	bool dump_dsi_dbgbus = false;
 	va_list args;
 	char *blk_name = NULL;
 	struct mdss_debug_base *blk_base = NULL;
@@ -650,6 +711,9 @@
 		if (!strcmp(blk_name, "vbif_dbg_bus"))
 			dump_vbif_dbgbus = true;
 
+		if (!strcmp(blk_name, "dsi_dbg_bus"))
+			dump_dsi_dbgbus = true;
+
 		if (!strcmp(blk_name, "panic"))
 			dead = true;
 	}
@@ -660,10 +724,11 @@
 		mdss_dbg_xlog.work_panic = dead;
 		mdss_dbg_xlog.work_dbgbus = dump_dbgbus;
 		mdss_dbg_xlog.work_vbif_dbgbus = dump_vbif_dbgbus;
+		mdss_dbg_xlog.work_dsi_dbgbus = dump_dsi_dbgbus;
 		schedule_work(&mdss_dbg_xlog.xlog_dump_work);
 	} else {
 		mdss_xlog_dump_array(blk_arr, blk_len, dead, name, dump_dbgbus,
-			dump_vbif_dbgbus);
+			dump_vbif_dbgbus, dump_dsi_dbgbus);
 	}
 }
 
@@ -752,6 +817,7 @@
 	mdss_dbg_xlog.enable_reg_dump = XLOG_DEFAULT_REGDUMP;
 	mdss_dbg_xlog.enable_dbgbus_dump = XLOG_DEFAULT_DBGBUSDUMP;
 	mdss_dbg_xlog.enable_vbif_dbgbus_dump = XLOG_DEFAULT_VBIF_DBGBUSDUMP;
+	mdss_dbg_xlog.enable_dsi_dbgbus_dump = XLOG_DEFAULT_DSI_DBGBUSDUMP;
 
 	pr_info("xlog_status: enable:%d, panic:%d, dump:%d\n",
 		mdss_dbg_xlog.xlog_enable, mdss_dbg_xlog.panic_on_err,
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index d8e74da..b4e4bdd 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -27,6 +27,7 @@
 #include <linux/msm-bus.h>
 #include <linux/pm_qos.h>
 #include <linux/mdss_io_util.h>
+#include <linux/dma-buf.h>
 
 #include "mdss.h"
 #include "mdss_panel.h"
@@ -46,6 +47,97 @@
 
 static struct pm_qos_request mdss_dsi_pm_qos_request;
 
+void mdss_dump_dsi_debug_bus(u32 bus_dump_flag,
+	u32 **dump_mem)
+{
+	struct mdss_dsi_data *sdata = mdss_dsi_res;
+	struct mdss_dsi_ctrl_pdata *m_ctrl, *s_ctrl;
+	bool in_log, in_mem;
+	u32 *dump_addr = NULL;
+	u32 status0 = 0, status1 = 0;
+	phys_addr_t phys = 0;
+	int list_size = 0;
+	int i;
+	bool dsi0_active = false, dsi1_active = false;
+
+	if (!sdata || !sdata->dbg_bus || !sdata->dbg_bus_size)
+		return;
+
+	m_ctrl = sdata->ctrl_pdata[0];
+	s_ctrl = sdata->ctrl_pdata[1];
+
+	if (!m_ctrl)
+		return;
+
+	if (m_ctrl && m_ctrl->shared_data->dsi0_active)
+		dsi0_active = true;
+	if (s_ctrl && s_ctrl->shared_data->dsi1_active)
+		dsi1_active = true;
+
+	list_size = (sdata->dbg_bus_size * sizeof(sdata->dbg_bus[0]) * 4);
+
+	in_log = (bus_dump_flag & MDSS_DBG_DUMP_IN_LOG);
+	in_mem = (bus_dump_flag & MDSS_DBG_DUMP_IN_MEM);
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(&sdata->pdev->dev,
+				list_size, &phys, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
+				__func__, dump_addr, dump_addr + list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	pr_info("========= Start DSI Debug Bus =========\n");
+
+	mdss_dsi_clk_ctrl(m_ctrl, m_ctrl->dsi_clk_handle,
+			  MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_ON);
+
+	for (i = 0; i < sdata->dbg_bus_size; i++) {
+		if (dsi0_active) {
+			writel_relaxed(sdata->dbg_bus[i],
+					m_ctrl->ctrl_base + 0x124);
+			wmb(); /* ensure regsiter is committed */
+		}
+		if (dsi1_active) {
+			writel_relaxed(sdata->dbg_bus[i],
+					s_ctrl->ctrl_base + 0x124);
+			wmb(); /* ensure register is committed */
+		}
+
+		if (dsi0_active) {
+			status0 = readl_relaxed(m_ctrl->ctrl_base + 0x128);
+			if (in_log)
+				pr_err("CTRL:0 bus_ctrl: 0x%x status: 0x%x\n",
+					sdata->dbg_bus[i], status0);
+		}
+		if (dsi1_active) {
+			status1 = readl_relaxed(s_ctrl->ctrl_base + 0x128);
+			if (in_log)
+				pr_err("CTRL:1 bus_ctrl: 0x%x status: 0x%x\n",
+					sdata->dbg_bus[i], status1);
+		}
+
+		if (dump_addr && in_mem) {
+			dump_addr[i*4]     = sdata->dbg_bus[i];
+			dump_addr[i*4 + 1] = status0;
+			dump_addr[i*4 + 2] = status1;
+			dump_addr[i*4 + 3] = 0x0;
+		}
+	}
+
+	mdss_dsi_clk_ctrl(m_ctrl, m_ctrl->dsi_clk_handle,
+			  MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_OFF);
+
+	pr_info("========End DSI Debug Bus=========\n");
+}
+
 static void mdss_dsi_pm_qos_add_request(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
 {
 	struct irq_info *irq_info;
@@ -291,6 +383,14 @@
 		ret = 0;
 	}
 
+	if (gpio_is_valid(ctrl_pdata->vdd_ext_gpio)) {
+		ret = gpio_direction_output(
+			ctrl_pdata->vdd_ext_gpio, 0);
+		if (ret)
+			pr_err("%s: unable to set dir for vdd gpio\n",
+					__func__);
+	}
+
 	if (mdss_dsi_pinctrl_set_state(ctrl_pdata, false))
 		pr_debug("reset disable: pinctrl not enabled\n");
 
@@ -318,6 +418,15 @@
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 				panel_data);
 
+	if (gpio_is_valid(ctrl_pdata->vdd_ext_gpio)) {
+		ret = gpio_direction_output(
+				ctrl_pdata->vdd_ext_gpio, 1);
+		usleep_range(3000, 4000); /* h/w recommended delay */
+		if (ret)
+			pr_err("%s: unable to set dir for vdd gpio\n",
+					__func__);
+	}
+
 	ret = msm_mdss_enable_vreg(
 		ctrl_pdata->panel_power_data.vreg_config,
 		ctrl_pdata->panel_power_data.num_vreg, 1);
@@ -1704,7 +1813,8 @@
 			ATRACE_BEGIN("dsi_panel_off");
 			ret = ctrl_pdata->off(pdata);
 			if (ret) {
-				pr_err("%s: Panel OFF failed\n", __func__);
+				pr_err("%s: Panel OFF failed\n",
+					__func__);
 				goto error;
 			}
 			ATRACE_END("dsi_panel_off");
@@ -2658,7 +2768,6 @@
 		break;
 	case MDSS_EVENT_PANEL_OFF:
 		power_state = (int) (unsigned long) arg;
-		disable_esd_thread();
 		ctrl_pdata->ctrl_state &= ~CTRL_STATE_MDP_ACTIVE;
 		if (ctrl_pdata->off_cmds.link_state == DSI_LP_MODE)
 			rc = mdss_dsi_blank(pdata, power_state);
@@ -3171,6 +3280,7 @@
 	struct mdss_util_intf *util;
 	static int te_irq_registered;
 	struct mdss_panel_data *pdata;
+	struct mdss_panel_cfg *pan_cfg = NULL;
 
 	if (!pdev || !pdev->dev.of_node) {
 		pr_err("%s: pdev not found for DSI controller\n", __func__);
@@ -3203,6 +3313,14 @@
 		return -ENODEV;
 	}
 
+	pan_cfg = util->panel_intf_type(MDSS_PANEL_INTF_SPI);
+	if (IS_ERR(pan_cfg)) {
+		return PTR_ERR(pan_cfg);
+	} else if (pan_cfg) {
+		pr_debug("%s: SPI is primary\n", __func__);
+		return -ENODEV;
+	}
+
 	ctrl_pdata->mdss_util = util;
 	atomic_set(&ctrl_pdata->te_irq_ready, 0);
 
@@ -3340,6 +3458,8 @@
 	else
 		ctrl_pdata->shared_data->dsi1_active = true;
 
+	mdss_dsi_debug_bus_init(mdss_dsi_res);
+
 	return 0;
 
 error_shadow_clk_deinit:
@@ -4069,6 +4189,7 @@
 		if (!gpio_is_valid(ctrl_pdata->disp_en_gpio))
 			pr_debug("%s:%d, Disp_en gpio not specified\n",
 					__func__, __LINE__);
+		pdata->panel_en_gpio = ctrl_pdata->disp_en_gpio;
 	}
 
 	ctrl_pdata->disp_te_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
@@ -4088,6 +4209,11 @@
 			of_property_read_bool(ctrl_pdev->dev.of_node,
 				"qcom,platform-bklight-en-gpio-invert");
 
+	ctrl_pdata->vdd_ext_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+		"qcom,ext-vdd-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->vdd_ext_gpio))
+		pr_info("%s: ext vdd gpio not specified\n", __func__);
+
 	ctrl_pdata->rst_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
 			 "qcom,platform-reset-gpio", 0);
 	if (!gpio_is_valid(ctrl_pdata->rst_gpio))
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
index fd6c4d9..5d2d677 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.h
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -57,7 +57,7 @@
 #define MDSS_DSI_HW_REV_104             0x10040000      /* 8996   */
 #define MDSS_DSI_HW_REV_104_1           0x10040001      /* 8996   */
 #define MDSS_DSI_HW_REV_104_2           0x10040002      /* 8937   */
-
+#define MDSS_DSI_HW_REV_201		20010000	/* 660 */
 #define MDSS_DSI_HW_REV_STEP_0		0x0
 #define MDSS_DSI_HW_REV_STEP_1		0x1
 #define MDSS_DSI_HW_REV_STEP_2		0x2
@@ -297,6 +297,8 @@
 	 * mutex, clocks, regulator information, setup information
 	 */
 	struct dsi_shared_data *shared_data;
+	u32 *dbg_bus;
+	int dbg_bus_size;
 };
 
 /*
@@ -407,7 +409,7 @@
 	int (*cmdlist_commit)(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp);
 	void (*switch_mode)(struct mdss_panel_data *pdata, int mode);
 	struct mdss_panel_data panel_data;
-	unsigned char *ctrl_base;
+	unsigned char __iomem *ctrl_base;
 	struct mdss_io_data ctrl_io;
 	struct mdss_io_data mmss_misc_io;
 	struct mdss_io_data phy_io;
@@ -433,6 +435,7 @@
 	int rst_gpio;
 	int disp_en_gpio;
 	int bklt_en_gpio;
+	int vdd_ext_gpio;
 	int mode_gpio;
 	int intf_mux_gpio;
 	bool bklt_en_gpio_invert;
@@ -683,6 +686,9 @@
 	u32 mask, u32 val);
 int mdss_dsi_phy_pll_reset_status(struct mdss_dsi_ctrl_pdata *ctrl);
 int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, int power_state);
+void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl);
+
+void mdss_dsi_debug_bus_init(struct mdss_dsi_data *sdata);
 
 static inline const char *__mdss_dsi_pm_name(enum dsi_pm_type module)
 {
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index 56ed15d..751a463 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -82,6 +82,8 @@
 static struct mdss_dsi_event dsi_event;
 
 static int dsi_event_thread(void *data);
+static void dsi_send_events(struct mdss_dsi_ctrl_pdata *ctrl,
+					u32 events, u32 arg);
 
 void mdss_dsi_ctrl_init(struct device *ctrl_dev,
 			struct mdss_dsi_ctrl_pdata *ctrl)
@@ -820,8 +822,10 @@
 		 * Disable PHY contention detection and receive.
 		 * Configure the strength ctrl 1 register.
 		 */
-		MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0);
-		MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0);
+		if (ctrl0->shared_data->phy_rev != DSI_PHY_REV_12NM) {
+			MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0);
+			MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0);
+		}
 
 		data0 = MIPI_INP(ctrl0->ctrl_base + 0x0004);
 		data1 = MIPI_INP(ctrl1->ctrl_base + 0x0004);
@@ -876,6 +880,18 @@
 			udelay(u_dly);
 		}
 		if (i == loop) {
+			if ((ctrl0->shared_data->phy_rev == DSI_PHY_REV_12NM) &&
+				(event == DSI_EV_LP_RX_TIMEOUT)) {
+				struct mdss_panel_info *pinfo =
+					&ctrl0->panel_data.panel_info;
+				/* If ESD is not enabled, report panel dead */
+				if (!pinfo->esd_check_enabled &&
+					ctrl0->recovery)
+					ctrl0->recovery->fxn(
+						ctrl0->recovery->data,
+						MDP_INTF_DSI_PANEL_DEAD);
+				return;
+			}
 			MDSS_XLOG(ctrl0->ndx, ln0, 0x1f1f);
 			MDSS_XLOG(ctrl1->ndx, ln1, 0x1f1f);
 			pr_err("%s: Clock lane still in stop state\n",
@@ -896,13 +912,20 @@
 		 * Enable PHY contention detection and receive.
 		 * Configure the strength ctrl 1 register.
 		 */
-		MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0x6);
-		MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0x6);
+		if (ctrl0->shared_data->phy_rev != DSI_PHY_REV_12NM) {
+			MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0x6);
+			MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0x6);
+		}
 		/*
 		 * Add sufficient delay to make sure
 		 * pixel transmission as started
 		 */
 		udelay(200);
+		/* Un-mask LP_RX_TIMEOUT error if recovery successful */
+		if (event == DSI_EV_LP_RX_TIMEOUT) {
+			mdss_dsi_set_reg(ctrl0, 0x10c, BIT(5), 0);
+			mdss_dsi_set_reg(ctrl1, 0x10c, BIT(5), 0);
+		}
 	} else {
 		if (ctrl->recovery) {
 			rc = ctrl->recovery->fxn(ctrl->recovery->data,
@@ -914,7 +937,8 @@
 			}
 		}
 		/* Disable PHY contention detection and receive */
-		MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0);
+		if (ctrl->shared_data->phy_rev != DSI_PHY_REV_12NM)
+			MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0);
 
 		data0 = MIPI_INP(ctrl->ctrl_base + 0x0004);
 		/* Disable DSI video mode */
@@ -955,6 +979,17 @@
 			udelay(u_dly);
 		}
 		if (i == loop) {
+			if ((ctrl->shared_data->phy_rev == DSI_PHY_REV_12NM) &&
+				(event == DSI_EV_LP_RX_TIMEOUT)) {
+				struct mdss_panel_info *pinfo =
+					&ctrl->panel_data.panel_info;
+				/* If ESD is not enabled, report panel dead */
+				if (!pinfo->esd_check_enabled && ctrl->recovery)
+					ctrl->recovery->fxn(
+						ctrl->recovery->data,
+						MDP_INTF_DSI_PANEL_DEAD);
+				return;
+			}
 			MDSS_XLOG(ctrl->ndx, ln0, 0x1f1f);
 			pr_err("%s: Clock lane still in stop state\n",
 					__func__);
@@ -968,12 +1003,16 @@
 		/* Enable Video mode for DSI controller */
 		MIPI_OUTP(ctrl->ctrl_base + 0x004, data0);
 		/* Enable PHY contention detection and receiver */
-		MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0x6);
+		if (ctrl->shared_data->phy_rev != DSI_PHY_REV_12NM)
+			MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0x6);
 		/*
 		 * Add sufficient delay to make sure
 		 * pixel transmission as started
 		 */
 		udelay(200);
+		/* Un-mask LP_RX_TIMEOUT error if recovery successful */
+		if (event == DSI_EV_LP_RX_TIMEOUT)
+			mdss_dsi_set_reg(ctrl, 0x10c, BIT(5), 0);
 	}
 	pr_debug("Recovery done\n");
 }
@@ -1251,6 +1290,15 @@
 {
 	u32 data, offset;
 
+	if (!dsc) {
+		if (ctrl->panel_mode == DSI_VIDEO_MODE)
+			offset = MDSS_DSI_VIDEO_COMPRESSION_MODE_CTRL;
+		else
+			offset = MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL;
+		MIPI_OUTP((ctrl->ctrl_base) + offset, 0);
+		return;
+	}
+
 	if (dsc->pkt_per_line <= 0) {
 		pr_err("%s: Error: pkt_per_line cannot be negative or 0\n",
 			__func__);
@@ -1439,8 +1487,7 @@
 		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x5C, stream_total);
 	}
 
-	if (dsc)	/* compressed */
-		mdss_dsi_dsc_config(ctrl_pdata, dsc);
+	mdss_dsi_dsc_config(ctrl_pdata, dsc);
 }
 
 void mdss_dsi_ctrl_setup(struct mdss_dsi_ctrl_pdata *ctrl)
@@ -1497,21 +1544,53 @@
 	/* mask out overflow errors */
 	if (ignore_underflow)
 		mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0x0f0000);
+
+	MDSS_XLOG(ctrl_pdata->ndx, ctrl_pdata->mdp_busy);
 	MIPI_OUTP(ctrl_pdata->ctrl_base + 0x098, 0x01); /* trigger  */
 	wmb(); /* ensure write is finished before progressing */
 
 	ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp,
 						DSI_BTA_EVENT_TIMEOUT);
 	if (ret <= 0) {
-		mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
-		pr_err("%s: DSI BTA error: %i\n", __func__, ret);
+		u32 reg_val, status;
+
+		reg_val = MIPI_INP(ctrl_pdata->ctrl_base + 0x0110);
+		status = reg_val & DSI_INTR_BTA_DONE;
+		if (status) {
+			reg_val &= DSI_INTR_MASK_ALL;
+			/* clear BTA_DONE isr only */
+			reg_val |= DSI_INTR_BTA_DONE;
+			MIPI_OUTP(ctrl_pdata->ctrl_base + 0x0110, reg_val);
+			mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
+			complete(&ctrl_pdata->bta_comp);
+			ret = 1;
+			pr_warn("%s: bta done but irq not triggered\n",
+				__func__);
+		} else {
+			pr_err("%s: DSI BTA error: %i\n", __func__, ret);
+			/*
+			 * For 12nm DSI PHY, BTA_TO interrupt may not trigger.
+			 * Treat software timer timeout as BTA_TO.
+			 */
+			if (ctrl_pdata->shared_data->phy_rev ==
+				DSI_PHY_REV_12NM) {
+				/* Mask BTA_TIMEOUT/LP_RX_TIMEOUT error */
+				mdss_dsi_set_reg(ctrl_pdata, 0x10c,
+					(BIT(5) | BIT(7)), (BIT(5) | BIT(7)));
+				dsi_send_events(ctrl_pdata,
+					DSI_EV_LP_RX_TIMEOUT, 0);
+			}
+			ret = -ETIMEDOUT;
+		}
 	}
 
 	if (ignore_underflow) {
+		u32 data = MIPI_INP((ctrl_pdata->ctrl_base) + 0x10c);
 		/* clear pending overflow status */
 		mdss_dsi_set_reg(ctrl_pdata, 0xc, 0xffffffff, 0x44440000);
-		/* restore overflow isr */
-		mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0);
+		/* restore overflow isr if LP_RX_TO not masked*/
+		if (!(data & BIT(5)))
+			mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0);
 	}
 
 	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
@@ -1698,8 +1777,8 @@
 					__func__,  cm->payload[0], len);
 
 			if (!wait || dchdr->wait > VSYNC_PERIOD)
-				usleep_range(dchdr->wait * 1000,
-					     dchdr->wait * 1000);
+				usleep_range((dchdr->wait * 1000),
+					     (dchdr->wait * 1000) + 10);
 
 			mdss_dsi_buf_init(tp);
 			len = 0;
@@ -2117,6 +2196,7 @@
 
 	MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);
 	wmb(); /* ensure write is finished before progressing */
+	MDSS_XLOG(ctrl->dma_addr, len);
 
 	if (ctrl->do_unicast) {
 		/* let cmd_trigger to kickoff later */
@@ -2153,10 +2233,12 @@
 
 	if (mctrl && mctrl->dma_addr) {
 		if (ignored) {
+			u32 data = MIPI_INP((mctrl->ctrl_base) + 0x10c);
 			/* clear pending overflow status */
 			mdss_dsi_set_reg(mctrl, 0xc, 0xffffffff, 0x44440000);
-			/* restore overflow isr */
-			mdss_dsi_set_reg(mctrl, 0x10c, 0x0f0000, 0);
+			/* restore overflow isr if LP_RX_TO not masked*/
+			if (!(data & BIT(5)))
+				mdss_dsi_set_reg(mctrl, 0x10c, 0x0f0000, 0);
 		}
 		if (mctrl->dmap_iommu_map) {
 			mdss_smmu_dsi_unmap_buffer(mctrl->dma_addr, domain,
@@ -2174,10 +2256,12 @@
 	}
 
 	if (ignored) {
+		u32 data = MIPI_INP((ctrl->ctrl_base) + 0x10c);
 		/* clear pending overflow status */
 		mdss_dsi_set_reg(ctrl, 0xc, 0xffffffff, 0x44440000);
-		/* restore overflow isr */
-		mdss_dsi_set_reg(ctrl, 0x10c, 0x0f0000, 0);
+		/* restore overflow isr if LP_RX_TO/BTA_TO not masked*/
+		if (!(data & BIT(5)))
+			mdss_dsi_set_reg(ctrl, 0x10c, 0x0f0000, 0);
 	}
 	ctrl->dma_addr = 0;
 	ctrl->dma_size = 0;
@@ -2193,7 +2277,7 @@
 	u32 *lp, *temp, data;
 	int i, j = 0, off, cnt;
 	bool ack_error = false;
-	char reg[16];
+	char reg[16] = {0x0};
 	int repeated_bytes = 0;
 	struct mdss_dsi_ctrl_pdata *mctrl = mdss_dsi_get_other_ctrl(ctrl);
 
@@ -3013,8 +3097,12 @@
 
 	if (status & 0x0111) {
 		MIPI_OUTP(base + 0x00c0, status);
-		if (status & 0x0110)
+		if (status & 0x0110) {
+			/* Mask BTA_TIMEOUT/LP_RX_TIMEOUT error */
+			mdss_dsi_set_reg(ctrl, 0x10c,
+				(BIT(5) | BIT(7)), (BIT(5) | BIT(7)));
 			dsi_send_events(ctrl, DSI_EV_LP_RX_TIMEOUT, 0);
+		}
 		pr_err("%s: status=%x\n", __func__, status);
 		ret = true;
 	}
@@ -3144,7 +3232,7 @@
 		pr_err("%s: panic in WQ as dsi error intrs within:%dms\n",
 				__func__, err_container->err_time_delta);
 		MDSS_XLOG_TOUT_HANDLER_WQ("mdp", "dsi0_ctrl", "dsi0_phy",
-			"dsi1_ctrl", "dsi1_phy");
+			"dsi1_ctrl", "dsi1_phy", "dsi_dbg_bus", "panic");
 	}
 }
 
@@ -3234,6 +3322,7 @@
 	}
 
 	if (isr & DSI_INTR_VIDEO_DONE) {
+		MDSS_XLOG(ctrl->ndx, isr, 0x111);
 		spin_lock(&ctrl->mdp_lock);
 		mdss_dsi_disable_irq_nosync(ctrl, DSI_VIDEO_TERM);
 		complete(&ctrl->video_comp);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index 1688503..cde2bc3 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -24,6 +24,7 @@
 #include <linux/string.h>
 
 #include "mdss_dsi.h"
+#include "mdss_debug.h"
 #ifdef TARGET_HW_MDSS_HDMI
 #include "mdss_dba_utils.h"
 #endif
@@ -260,11 +261,12 @@
 	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 						panel_data);
 
-	pr_debug("%s: Idle (%d->%d)\n", __func__, ctrl->idle, enable);
+	pr_info("%s: Idle (%d->%d)\n", __func__, ctrl->idle, enable);
 
 	if (ctrl->idle == enable)
 		return;
 
+	MDSS_XLOG(ctrl->idle, enable);
 	if (enable) {
 		if (ctrl->idle_on_cmds.cmd_cnt) {
 			mdss_dsi_panel_cmds_send(ctrl, &ctrl->idle_on_cmds,
@@ -324,6 +326,15 @@
 			goto bklt_en_gpio_err;
 		}
 	}
+	if (gpio_is_valid(ctrl_pdata->vdd_ext_gpio)) {
+		rc = gpio_request(ctrl_pdata->vdd_ext_gpio,
+						"vdd_enable");
+		if (rc) {
+			pr_err("request vdd enable gpio failed, rc=%d\n",
+				rc);
+			goto vdd_en_gpio_err;
+		}
+	}
 	if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
 		rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode");
 		if (rc) {
@@ -335,6 +346,9 @@
 	return rc;
 
 mode_gpio_err:
+	if (gpio_is_valid(ctrl_pdata->vdd_ext_gpio))
+		gpio_free(ctrl_pdata->vdd_ext_gpio);
+vdd_en_gpio_err:
 	if (gpio_is_valid(ctrl_pdata->bklt_en_gpio))
 		gpio_free(ctrl_pdata->bklt_en_gpio);
 bklt_en_gpio_err:
@@ -427,6 +441,8 @@
 						__func__);
 					goto exit;
 				}
+				gpio_set_value((ctrl_pdata->disp_en_gpio), 1);
+				usleep_range(100, 110);
 			}
 
 			if (pdata->panel_info.rst_seq_len) {
@@ -443,8 +459,8 @@
 				gpio_set_value((ctrl_pdata->rst_gpio),
 					pdata->panel_info.rst_seq[i]);
 				if (pdata->panel_info.rst_seq[++i])
-					usleep_range(pinfo->rst_seq[i] * 1000,
-						     pinfo->rst_seq[i] * 1000);
+					usleep_range((pinfo->rst_seq[i] * 1000),
+					(pinfo->rst_seq[i] * 1000) + 10);
 			}
 
 			if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) {
@@ -497,6 +513,7 @@
 		}
 		if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
 			gpio_set_value((ctrl_pdata->disp_en_gpio), 0);
+			usleep_range(100, 110);
 			gpio_free(ctrl_pdata->disp_en_gpio);
 		}
 		gpio_set_value((ctrl_pdata->rst_gpio), 0);
@@ -1047,6 +1064,48 @@
 	return 0;
 }
 
+static void mdss_dsi_parse_mdp_kickoff_threshold(struct device_node *np,
+	struct mdss_panel_info *pinfo)
+{
+	int len, rc;
+	const u32 *src;
+	u32 tmp;
+	u32 max_delay_us;
+
+	pinfo->mdp_koff_thshold = false;
+	src = of_get_property(np, "qcom,mdss-mdp-kickoff-threshold", &len);
+	if (!src || (len == 0))
+		return;
+
+	rc = of_property_read_u32(np, "qcom,mdss-mdp-kickoff-delay", &tmp);
+	if (!rc)
+		pinfo->mdp_koff_delay = tmp;
+	else
+		return;
+
+	if (pinfo->mipi.frame_rate == 0) {
+		pr_err("cannot enable guard window, unexpected panel fps\n");
+		return;
+	}
+
+	pinfo->mdp_koff_thshold_low = be32_to_cpu(src[0]);
+	pinfo->mdp_koff_thshold_high = be32_to_cpu(src[1]);
+	max_delay_us = 1000000 / pinfo->mipi.frame_rate;
+
+	/* enable the feature if threshold is valid */
+	if ((pinfo->mdp_koff_thshold_low < pinfo->mdp_koff_thshold_high) &&
+	   ((pinfo->mdp_koff_delay > 0) ||
+	    (pinfo->mdp_koff_delay < max_delay_us)))
+		pinfo->mdp_koff_thshold = true;
+
+	pr_debug("panel kickoff thshold:[%d, %d] delay:%d (max:%d) enable:%d\n",
+		pinfo->mdp_koff_thshold_low,
+		pinfo->mdp_koff_thshold_high,
+		pinfo->mdp_koff_delay,
+		max_delay_us,
+		pinfo->mdp_koff_thshold);
+}
+
 static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger,
 		char *trigger_key)
 {
@@ -2828,6 +2887,8 @@
 	rc = of_property_read_u32(np, "qcom,mdss-mdp-transfer-time-us", &tmp);
 	pinfo->mdp_transfer_time_us = (!rc ? tmp : DEFAULT_MDP_TRANSFER_TIME);
 
+	mdss_dsi_parse_mdp_kickoff_threshold(np, pinfo);
+
 	pinfo->mipi.lp11_init = of_property_read_bool(np,
 					"qcom,mdss-dsi-lp11-init");
 	rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c b/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c
index 9bd2c4d..ec179e8 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c
@@ -107,6 +107,7 @@
 {
 	DSI_PHY_W32(ctrl->phy_io.base, SYS_CTRL, BIT(0) | BIT(3));
 	wmb(); /* make sure DSI PHY is disabled */
+	mdss_dsi_ctrl_phy_reset(ctrl);
 	return 0;
 }
 
diff --git a/drivers/video/fbdev/msm/mdss_dsi_status.c b/drivers/video/fbdev/msm/mdss_dsi_status.c
index c5af962..326768d 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_status.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_status.c
@@ -146,15 +146,18 @@
 		return NOTIFY_DONE;
 
 	mfd = evdata->info->par;
-	ctrl_pdata = container_of(dev_get_platdata(&mfd->pdev->dev),
+	if (mfd->panel_info->type == SPI_PANEL) {
+		pinfo = mfd->panel_info;
+	} else {
+		ctrl_pdata = container_of(dev_get_platdata(&mfd->pdev->dev),
 				struct mdss_dsi_ctrl_pdata, panel_data);
-	if (!ctrl_pdata) {
-		pr_err("%s: DSI ctrl not available\n", __func__);
-		return NOTIFY_BAD;
+		if (!ctrl_pdata) {
+			pr_err("%s: DSI ctrl not available\n", __func__);
+			return NOTIFY_BAD;
+		}
+
+		pinfo = &ctrl_pdata->panel_data.panel_info;
 	}
-
-	pinfo = &ctrl_pdata->panel_data.panel_info;
-
 	if ((!(pinfo->esd_check_enabled) &&
 			dsi_status_disable) ||
 			(dsi_status_disable == DSI_STATUS_CHECK_DISABLE)) {
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 7ca94df..72adb17 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -243,9 +243,11 @@
 		}
 	} else if (notify == NOTIFY_UPDATE_STOP) {
 		mutex_lock(&mfd->update.lock);
-		if (mfd->update.init_done)
+		if (mfd->update.init_done) {
+			mutex_unlock(&mfd->update.lock);
+			mutex_lock(&mfd->no_update.lock);
 			reinit_completion(&mfd->no_update.comp);
-		else {
+		} else {
 			mutex_unlock(&mfd->update.lock);
 			pr_err("notify update stop called without init\n");
 			return -EINVAL;
@@ -306,10 +308,23 @@
 	}
 }
 
+static enum led_brightness mdss_fb_get_bl_brightness(
+	struct led_classdev *led_cdev)
+{
+	struct msm_fb_data_type *mfd = dev_get_drvdata(led_cdev->dev->parent);
+	u64 value;
+
+	MDSS_BL_TO_BRIGHT(value, mfd->bl_level, mfd->panel_info->bl_max,
+			  mfd->panel_info->brightness_max);
+
+	return value;
+}
+
 static struct led_classdev backlight_led = {
 	.name           = "lcd-backlight",
 	.brightness     = MDSS_MAX_BL_BRIGHTNESS / 2,
 	.brightness_set = mdss_fb_set_bl_brightness,
+	.brightness_get = mdss_fb_get_bl_brightness,
 	.max_brightness = MDSS_MAX_BL_BRIGHTNESS,
 };
 
@@ -345,6 +360,9 @@
 	case EDP_PANEL:
 		ret = snprintf(buf, PAGE_SIZE, "edp panel\n");
 		break;
+	case SPI_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "spi panel\n");
+		break;
 	default:
 		ret = snprintf(buf, PAGE_SIZE, "unknown panel\n");
 		break;
@@ -592,7 +610,8 @@
 			"white_chromaticity_x=%d\nwhite_chromaticity_y=%d\n"
 			"red_chromaticity_x=%d\nred_chromaticity_y=%d\n"
 			"green_chromaticity_x=%d\ngreen_chromaticity_y=%d\n"
-			"blue_chromaticity_x=%d\nblue_chromaticity_y=%d\n",
+			"blue_chromaticity_x=%d\nblue_chromaticity_y=%d\n"
+			"panel_orientation=%d\n",
 			pinfo->partial_update_enabled,
 			pinfo->roi_alignment.xstart_pix_align,
 			pinfo->roi_alignment.width_pix_align,
@@ -615,7 +634,8 @@
 			pinfo->hdr_properties.display_primaries[4],
 			pinfo->hdr_properties.display_primaries[5],
 			pinfo->hdr_properties.display_primaries[6],
-			pinfo->hdr_properties.display_primaries[7]);
+			pinfo->hdr_properties.display_primaries[7],
+			pinfo->panel_orientation);
 
 	return ret;
 }
@@ -885,6 +905,12 @@
 	return ret;
 }
 
+static ssize_t mdss_fb_idle_pc_notify(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "idle power collapsed\n");
+}
+
 static DEVICE_ATTR(msm_fb_type, 0444, mdss_fb_get_type, NULL);
 static DEVICE_ATTR(msm_fb_split, 0644, mdss_fb_show_split,
 					mdss_fb_store_split);
@@ -905,6 +931,8 @@
 	mdss_fb_get_fps_info, NULL);
 static DEVICE_ATTR(msm_fb_persist_mode, 0644,
 	mdss_fb_get_persist_mode, mdss_fb_change_persist_mode);
+static DEVICE_ATTR(idle_power_collapse, 0444, mdss_fb_idle_pc_notify, NULL);
+
 static struct attribute *mdss_fb_attrs[] = {
 	&dev_attr_msm_fb_type.attr,
 	&dev_attr_msm_fb_split.attr,
@@ -918,6 +946,7 @@
 	&dev_attr_msm_fb_dfps_mode.attr,
 	&dev_attr_measured_fps.attr,
 	&dev_attr_msm_fb_persist_mode.attr,
+	&dev_attr_idle_power_collapse.attr,
 	NULL,
 };
 
@@ -1214,6 +1243,7 @@
 	struct mdss_panel_data *pdata;
 	struct fb_info *fbi;
 	int rc;
+	const char *data;
 
 	if (fbi_list_index >= MAX_FBI_LIST)
 		return -ENOMEM;
@@ -1259,9 +1289,25 @@
 	mfd->fb_imgType = MDP_RGBA_8888;
 	mfd->calib_mode_bl = 0;
 	mfd->unset_bl_level = U32_MAX;
+	mfd->bl_extn_level = -1;
 
 	mfd->pdev = pdev;
 
+	if (mfd->panel.type == SPI_PANEL)
+		mfd->fb_imgType = MDP_RGB_565;
+	if (mfd->panel.type == MIPI_VIDEO_PANEL || mfd->panel.type ==
+		MIPI_CMD_PANEL || mfd->panel.type == SPI_PANEL){
+		rc = of_property_read_string(pdev->dev.of_node,
+			"qcom,mdss-fb-format", &data);
+		if (!rc) {
+			if (!strcmp(data, "rgb888"))
+				mfd->fb_imgType = MDP_RGB_888;
+			else if (!strcmp(data, "rgb565"))
+				mfd->fb_imgType = MDP_RGB_565;
+			else
+				mfd->fb_imgType = MDP_RGBA_8888;
+			}
+		}
 	mfd->split_fb_left = mfd->split_fb_right = 0;
 
 	mdss_fb_set_split_mode(mfd, pdata);
@@ -1374,6 +1420,7 @@
 		mfd->mdp_sync_pt_data.threshold = 1;
 		mfd->mdp_sync_pt_data.retire_threshold = 0;
 		break;
+	case SPI_PANEL:
 	case MIPI_CMD_PANEL:
 		mfd->mdp_sync_pt_data.threshold = 1;
 		mfd->mdp_sync_pt_data.retire_threshold = 1;
@@ -1666,6 +1713,7 @@
 	u32 temp = bkl_lvl;
 	bool ad_bl_notify_needed = false;
 	bool bl_notify_needed = false;
+	bool twm_en = false;
 
 	if ((((mdss_fb_is_power_off(mfd) && mfd->dcm_state != DCM_ENTER)
 		|| !mfd->allow_bl_update) && !IS_CALIB_MODE_BL(mfd)) ||
@@ -1700,9 +1748,17 @@
 			if (mfd->bl_level != bkl_lvl)
 				bl_notify_needed = true;
 			pr_debug("backlight sent to panel :%d\n", temp);
-			pdata->set_backlight(pdata, temp);
-			mfd->bl_level = bkl_lvl;
-			mfd->bl_level_scaled = temp;
+
+			if (mfd->mdp.is_twm_en)
+				twm_en = mfd->mdp.is_twm_en();
+
+			if (twm_en) {
+				pr_info("TWM Enabled skip backlight update\n");
+			} else {
+				pdata->set_backlight(pdata, temp);
+				mfd->bl_level = bkl_lvl;
+				mfd->bl_level_scaled = temp;
+			}
 		}
 		if (ad_bl_notify_needed)
 			mdss_fb_bl_update_notify(mfd,
@@ -2077,6 +2133,7 @@
 	}
 
 	ret = mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
+	MDSS_XLOG(blank_mode);
 
 end:
 	mutex_unlock(&mfd->mdss_sysfs_lock);
@@ -2834,7 +2891,9 @@
 		 * enabling ahead of unblank. for some special cases like
 		 * adb shell stop/start.
 		 */
+		mutex_lock(&mfd->bl_lock);
 		mdss_fb_set_backlight(mfd, 0);
+		mutex_unlock(&mfd->bl_lock);
 
 		ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info,
 			mfd->op_enable);
@@ -3431,6 +3490,15 @@
 	mfd->msm_fb_backup.atomic_commit = true;
 	mfd->msm_fb_backup.disp_commit.l_roi =  commit_v1->left_roi;
 	mfd->msm_fb_backup.disp_commit.r_roi =  commit_v1->right_roi;
+	mfd->msm_fb_backup.disp_commit.flags =  commit_v1->flags;
+	if (commit_v1->flags & MDP_COMMIT_UPDATE_BRIGHTNESS) {
+		MDSS_BRIGHT_TO_BL(mfd->bl_extn_level, commit_v1->bl_level,
+			mfd->panel_info->bl_max,
+			mfd->panel_info->brightness_max);
+		if (!mfd->bl_extn_level && commit_v1->bl_level)
+			mfd->bl_extn_level = 1;
+	} else
+		mfd->bl_extn_level = -1;
 
 	mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
 	atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
@@ -4520,6 +4588,9 @@
 		 * In case of an ESD attack, since we early return from the
 		 * commits, we need to signal the outstanding fences.
 		 */
+		mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+		atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
+		mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
 		mdss_fb_release_fences(mfd);
 		if ((mfd->panel.type == MIPI_CMD_PANEL) &&
 			mfd->mdp.signal_retire_fence && mdp5_data)
@@ -4743,6 +4814,9 @@
 	if (!mfd || !mfd->panel_info)
 		return -EINVAL;
 
+	/* make sure that we are idle while switching */
+	mdss_fb_wait_for_kickoff(mfd);
+
 	pinfo = mfd->panel_info;
 	if (pinfo->mipi.dms_mode == DYNAMIC_MODE_SWITCH_SUSPEND_RESUME) {
 		ret = mdss_fb_blanking_mode_switch(mfd, mode);
@@ -5136,3 +5210,18 @@
 		mfd->fps_info.frame_count = 0;
 	}
 }
+
+void mdss_fb_idle_pc(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data;
+
+	if (!mfd || mdss_fb_is_power_off(mfd))
+		return;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if ((mfd->panel_info->type == MIPI_CMD_PANEL) && mdp5_data) {
+		pr_debug("Notify fb%d idle power collapsed\n", mfd->index);
+		sysfs_notify(&mfd->fbi->dev->kobj, NULL, "idle_power_collapse");
+	}
+}
diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h
index c85f033..3515e37 100644
--- a/drivers/video/fbdev/msm/mdss_fb.h
+++ b/drivers/video/fbdev/msm/mdss_fb.h
@@ -236,6 +236,7 @@
 	int (*pp_release_fnc)(struct msm_fb_data_type *mfd);
 	void (*signal_retire_fence)(struct msm_fb_data_type *mfd,
 					int retire_cnt);
+	bool (*is_twm_en)(void);
 	void *private1;
 };
 
@@ -244,6 +245,10 @@
 				out = (2 * (v) * (bl_max) + max_bright);\
 				do_div(out, 2 * max_bright);\
 				} while (0)
+#define MDSS_BL_TO_BRIGHT(out, v, bl_max, max_bright) do {\
+				out = ((v) * (max_bright));\
+				do_div(out, bl_max);\
+				} while (0)
 
 struct mdss_fb_file_info {
 	struct file *file;
@@ -308,6 +313,7 @@
 	u32 calib_mode_bl;
 	u32 ad_bl_level;
 	u64 bl_level;
+	u64 bl_extn_level;
 	u32 bl_scale;
 	u32 bl_min_lvl;
 	u32 unset_bl_level;
@@ -472,4 +478,5 @@
 void mdss_panelinfo_to_fb_var(struct mdss_panel_info *pinfo,
 						struct fb_var_screeninfo *var);
 void mdss_fb_calc_fps(struct msm_fb_data_type *mfd);
+void mdss_fb_idle_pc(struct msm_fb_data_type *mfd);
 #endif /* MDSS_FB_H */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
index 8dce151..98f4c658 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
@@ -666,7 +666,7 @@
 
 static void hdmi_hdcp2p2_recv_msg(struct hdmi_hdcp2p2_ctrl *ctrl)
 {
-	int rc = 0, timeout_hsync;
+	int timeout_hsync = 0, rc = 0;
 	char *recvd_msg_buf = NULL;
 	struct hdmi_tx_hdcp2p2_ddc_data *ddc_data;
 	struct hdmi_tx_ddc_ctrl *ddc_ctrl;
@@ -1077,7 +1077,7 @@
 
 static bool hdmi_hdcp2p2_supported(struct hdmi_hdcp2p2_ctrl *ctrl)
 {
-	u8 hdcp2version;
+	u8 hdcp2version = 0;
 
 	int rc = hdmi_hdcp2p2_read_version(ctrl, &hdcp2version);
 
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 4f2bb09..42e2181 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -70,8 +70,10 @@
 #define HDMI_TX_3_MAX_PCLK_RATE            297000
 #define HDMI_TX_4_MAX_PCLK_RATE            600000
 
-#define hdmi_tx_get_fd(x) (x ? hdmi_ctrl->feature_data[ffs(x) - 1] : 0)
-#define hdmi_tx_set_fd(x, y) {if (x) hdmi_ctrl->feature_data[ffs(x) - 1] = y; }
+#define hdmi_tx_get_fd(x) ((x && (ffs(x) > 0))  ? \
+			hdmi_ctrl->feature_data[ffs(x) - 1] : 0)
+#define hdmi_tx_set_fd(x, y) {if (x && (ffs(x) > 0)) \
+			hdmi_ctrl->feature_data[ffs(x) - 1] = y; }
 
 #define MAX_EDID_READ_RETRY	5
 
diff --git a/drivers/video/fbdev/msm/mdss_io_util.c b/drivers/video/fbdev/msm/mdss_io_util.c
index 3117793..2f70ad3 100644
--- a/drivers/video/fbdev/msm/mdss_io_util.c
+++ b/drivers/video/fbdev/msm/mdss_io_util.c
@@ -275,8 +275,8 @@
 			}
 			need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
 			if (in_vreg[i].pre_on_sleep && need_sleep)
-				usleep_range(in_vreg[i].pre_on_sleep * 1000,
-					in_vreg[i].pre_on_sleep * 1000);
+				usleep_range((in_vreg[i].pre_on_sleep * 1000),
+					(in_vreg[i].pre_on_sleep * 1000) + 10);
 			rc = regulator_set_load(in_vreg[i].vreg,
 				in_vreg[i].load[DSS_REG_MODE_ENABLE]);
 			if (rc < 0) {
@@ -287,8 +287,8 @@
 			}
 			rc = regulator_enable(in_vreg[i].vreg);
 			if (in_vreg[i].post_on_sleep && need_sleep)
-				usleep_range(in_vreg[i].post_on_sleep * 1000,
-					in_vreg[i].post_on_sleep * 1000);
+				usleep_range((in_vreg[i].post_on_sleep * 1000),
+					(in_vreg[i].post_on_sleep * 1000) + 10);
 			if (rc < 0) {
 				DEV_ERR("%pS->%s: %s enable failed\n",
 					__builtin_return_address(0), __func__,
@@ -299,8 +299,8 @@
 	} else {
 		for (i = num_vreg-1; i >= 0; i--) {
 			if (in_vreg[i].pre_off_sleep)
-				usleep_range(in_vreg[i].pre_off_sleep * 1000,
-					in_vreg[i].pre_off_sleep * 1000);
+				usleep_range((in_vreg[i].pre_off_sleep * 1000),
+					(in_vreg[i].pre_off_sleep * 1000) + 10);
 			regulator_set_load(in_vreg[i].vreg,
 				in_vreg[i].load[DSS_REG_MODE_DISABLE]);
 
@@ -308,8 +308,8 @@
 				regulator_disable(in_vreg[i].vreg);
 
 			if (in_vreg[i].post_off_sleep)
-				usleep_range(in_vreg[i].post_off_sleep * 1000,
-					in_vreg[i].post_off_sleep * 1000);
+				usleep_range((in_vreg[i].post_off_sleep * 1000),
+				(in_vreg[i].post_off_sleep * 1000) + 10);
 		}
 	}
 	return rc;
@@ -321,14 +321,14 @@
 vreg_set_opt_mode_fail:
 	for (i--; i >= 0; i--) {
 		if (in_vreg[i].pre_off_sleep)
-			usleep_range(in_vreg[i].pre_off_sleep * 1000,
-				in_vreg[i].pre_off_sleep * 1000);
+			usleep_range((in_vreg[i].pre_off_sleep * 1000),
+				(in_vreg[i].pre_off_sleep * 1000) + 10);
 		regulator_set_load(in_vreg[i].vreg,
 			in_vreg[i].load[DSS_REG_MODE_DISABLE]);
 		regulator_disable(in_vreg[i].vreg);
 		if (in_vreg[i].post_off_sleep)
-			usleep_range(in_vreg[i].post_off_sleep * 1000,
-				in_vreg[i].post_off_sleep * 1000);
+			usleep_range((in_vreg[i].post_off_sleep * 1000),
+				(in_vreg[i].post_off_sleep * 1000) + 10);
 	}
 
 	return rc;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index e472e7f..24f7521 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -811,7 +811,7 @@
 
 int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
 {
-	int irq_idx;
+	int irq_idx = 0;
 	unsigned long irq_flags;
 	int ret = 0;
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -830,7 +830,7 @@
 	spin_lock_irqsave(&mdp_lock, irq_flags);
 	if (mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask) {
 		pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
-			irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
+				irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
 		ret = -EBUSY;
 	} else {
 		pr_debug("MDP IRQ mask old=%x new=%x\n",
@@ -1129,12 +1129,31 @@
 {
 	int ret = -ENODEV;
 	struct clk *clk = mdss_mdp_get_clk(clk_idx);
+	struct mdss_data_type *mdata = mdss_res;
 
 	if (clk) {
 		pr_debug("clk=%d en=%d\n", clk_idx, enable);
 		if (enable) {
 			if (clk_idx == MDSS_CLK_MDP_VSYNC)
 				clk_set_rate(clk, 19200000);
+			if (mdss_has_quirk(mdata, MDSS_QUIRK_MDP_CLK_SET_RATE)
+					&& (clk_idx == MDSS_CLK_MDP_CORE)) {
+
+				if (WARN_ON(!mdata->mdp_clk_rate)) {
+					/*
+					 * rate should have been set in probe
+					 * or during clk scaling; but if this
+					 * is not the case, set max clk rate.
+					 */
+					pr_warn("set max mdp clk rate:%u\n",
+						mdata->max_mdp_clk_rate);
+					mdss_mdp_set_clk_rate(
+						mdata->max_mdp_clk_rate, true);
+				} else {
+					clk_set_rate(clk, mdata->mdp_clk_rate);
+				}
+			}
+
 			ret = clk_prepare_enable(clk);
 		} else {
 			clk_disable_unprepare(clk);
@@ -1163,7 +1182,7 @@
 	return ret;
 }
 
-void mdss_mdp_set_clk_rate(unsigned long rate)
+void mdss_mdp_set_clk_rate(unsigned long rate, bool locked)
 {
 	struct mdss_data_type *mdata = mdss_res;
 	unsigned long clk_rate;
@@ -1173,7 +1192,9 @@
 	min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk);
 
 	if (clk) {
-		mutex_lock(&mdp_clk_lock);
+
+		if (!locked)
+			mutex_lock(&mdp_clk_lock);
 		if (min_clk_rate < mdata->max_mdp_clk_rate)
 			clk_rate = clk_round_rate(clk, min_clk_rate);
 		else
@@ -1181,13 +1202,15 @@
 		if (IS_ERR_VALUE(clk_rate)) {
 			pr_err("unable to round rate err=%ld\n", clk_rate);
 		} else if (clk_rate != clk_get_rate(clk)) {
-			if (IS_ERR_VALUE((unsigned long)
-					 clk_set_rate(clk, clk_rate)))
+			mdata->mdp_clk_rate = clk_rate;
+			if (IS_ERR_VALUE(
+				(unsigned long)clk_set_rate(clk, clk_rate)))
 				pr_err("clk_set_rate failed\n");
 			else
 				pr_debug("mdp clk rate=%lu\n", clk_rate);
 		}
-		mutex_unlock(&mdp_clk_lock);
+		if (!locked)
+			mutex_unlock(&mdp_clk_lock);
 	} else {
 		pr_err("mdp src clk not setup properly\n");
 	}
@@ -1285,6 +1308,68 @@
 	}
 }
 
+/*
+ * __mdss_mdp_clk_control - Overall MDSS clock control for power on/off
+ */
+static void __mdss_mdp_clk_control(struct mdss_data_type *mdata, bool enable)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	if (enable) {
+		pm_runtime_get_sync(&mdata->pdev->dev);
+
+		mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+			VOTE_INDEX_LOW);
+
+		rc = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE((unsigned long)rc))
+			pr_err("IOMMU attach failed\n");
+
+		/* Active+Sleep */
+		msm_bus_scale_client_update_context(mdata->bus_hdl,
+			false, mdata->curr_bw_uc_idx);
+
+		spin_lock_irqsave(&mdp_lock, flags);
+		mdata->clk_ena = enable;
+		spin_unlock_irqrestore(&mdp_lock, flags);
+
+		mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 1);
+		mdss_mdp_clk_update(MDSS_CLK_AHB, 1);
+		mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
+		mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
+		mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, 1);
+		if (mdata->vsync_ena)
+			mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, 1);
+	} else {
+		spin_lock_irqsave(&mdp_lock, flags);
+		mdata->clk_ena = enable;
+		spin_unlock_irqrestore(&mdp_lock, flags);
+
+		if (mdata->vsync_ena)
+			mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, 0);
+
+		mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, 0);
+		mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0);
+		mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
+		mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
+		mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 0);
+
+		/* release iommu control */
+		mdss_iommu_ctrl(0);
+
+		/* Active-Only */
+		msm_bus_scale_client_update_context(mdata->bus_hdl,
+			true, mdata->ao_bw_uc_idx);
+
+		mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+			VOTE_INDEX_DISABLE);
+
+		pm_runtime_mark_last_busy(&mdata->pdev->dev);
+		pm_runtime_put_autosuspend(&mdata->pdev->dev);
+	}
+}
+
 int __mdss_mdp_vbif_halt(struct mdss_data_type *mdata, bool is_nrt)
 {
 	int rc = 0;
@@ -1382,10 +1467,17 @@
 		return mdata->iommu_ref_cnt;
 }
 
-static void mdss_mdp_memory_retention_enter(void)
+#define MEM_RETAIN_ON 1
+#define MEM_RETAIN_OFF 0
+#define PERIPH_RETAIN_ON 1
+#define PERIPH_RETAIN_OFF 0
+
+static void mdss_mdp_memory_retention_ctrl(bool mem_ctrl, bool periph_ctrl)
 {
 	struct clk *mdss_mdp_clk = NULL;
 	struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct clk *mdss_mdp_lut_clk = NULL;
 
 	if (mdp_vote_clk) {
 		mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
@@ -1395,21 +1487,40 @@
 			clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
 		}
 	}
-}
 
-static void mdss_mdp_memory_retention_exit(void)
-{
-	struct clk *mdss_mdp_clk = NULL;
-	struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
-
-	if (mdp_vote_clk) {
-		mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
-		if (mdss_mdp_clk) {
+	__mdss_mdp_reg_access_clk_enable(mdata, true);
+	if (mdss_mdp_clk) {
+		if (mem_ctrl)
 			clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_MEM);
+
+		if (periph_ctrl) {
 			clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH);
 			clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_CLEAR);
+		} else {
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET);
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
 		}
 	}
+
+	if (mdss_mdp_lut_clk) {
+		if (mem_ctrl)
+			clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_NORETAIN_MEM);
+
+		if (periph_ctrl) {
+			clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_PERIPH);
+			clk_set_flags(mdss_mdp_lut_clk,
+				CLKFLAG_PERIPH_OFF_CLEAR);
+		} else {
+			clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_PERIPH_OFF_SET);
+			clk_set_flags(mdss_mdp_lut_clk,
+				CLKFLAG_NORETAIN_PERIPH);
+		}
+	}
+	__mdss_mdp_reg_access_clk_enable(mdata, false);
 }
 
 /**
@@ -1440,22 +1551,55 @@
 	mdss_hw_init(mdata);
 	mdss_iommu_ctrl(0);
 
-	/**
-	 * sleep 10 microseconds to make sure AD auto-reinitialization
-	 * is done
-	 */
-	udelay(10);
-	mdss_mdp_memory_retention_exit();
-
 	mdss_mdp_ctl_restore(true);
 	mdata->idle_pc = false;
 
 end:
+	if (mdata->mem_retain) {
+		/**
+		 * sleep 10 microseconds to make sure AD auto-reinitialization
+		 * is done
+		 */
+		udelay(10);
+		mdss_mdp_memory_retention_ctrl(MEM_RETAIN_ON,
+			PERIPH_RETAIN_ON);
+		mdata->mem_retain = false;
+	}
+
 	mutex_unlock(&mdp_fs_idle_pc_lock);
 	return rc;
 }
 
 /**
+ * mdss_mdp_retention_init() - initialize retention setting
+ * @mdata: pointer to the global mdss data structure.
+ */
+static int mdss_mdp_retention_init(struct mdss_data_type *mdata)
+{
+	struct clk *mdss_axi_clk = mdss_mdp_get_clk(MDSS_CLK_AXI);
+	int rc;
+
+	if (!mdss_axi_clk) {
+		pr_err("failed to get AXI clock\n");
+		return -EINVAL;
+	}
+
+	rc = clk_set_flags(mdss_axi_clk, CLKFLAG_NORETAIN_MEM);
+	if (rc) {
+		pr_err("failed to set AXI no memory retention %d\n", rc);
+		return rc;
+	}
+
+	rc = clk_set_flags(mdss_axi_clk, CLKFLAG_NORETAIN_PERIPH);
+	if (rc) {
+		pr_err("failed to set AXI no periphery retention %d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/**
  * mdss_bus_bandwidth_ctrl() -- place bus bandwidth request
  * @enable:	value of enable or disable
  *
@@ -1514,9 +1658,7 @@
 {
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 	static int mdp_clk_cnt;
-	unsigned long flags;
 	int changed = 0;
-	int rc = 0;
 
 	mutex_lock(&mdp_clk_lock);
 	if (enable) {
@@ -1540,49 +1682,8 @@
 		__builtin_return_address(0), current->group_leader->comm,
 		mdata->bus_ref_cnt, changed, enable);
 
-	if (changed) {
-		if (enable) {
-			pm_runtime_get_sync(&mdata->pdev->dev);
-
-			mdss_update_reg_bus_vote(mdata->reg_bus_clt,
-				VOTE_INDEX_LOW);
-
-			rc = mdss_iommu_ctrl(1);
-			if (IS_ERR_VALUE((unsigned long)rc))
-				pr_err("IOMMU attach failed\n");
-
-			/* Active+Sleep */
-			msm_bus_scale_client_update_context(mdata->bus_hdl,
-				false, mdata->curr_bw_uc_idx);
-		}
-
-		spin_lock_irqsave(&mdp_lock, flags);
-		mdata->clk_ena = enable;
-		spin_unlock_irqrestore(&mdp_lock, flags);
-
-		mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, enable);
-		mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
-		mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
-		mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
-		mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
-		if (mdata->vsync_ena)
-			mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
-
-		if (!enable) {
-			/* release iommu control */
-			mdss_iommu_ctrl(0);
-
-			/* Active-Only */
-			msm_bus_scale_client_update_context(mdata->bus_hdl,
-				true, mdata->ao_bw_uc_idx);
-
-			mdss_update_reg_bus_vote(mdata->reg_bus_clt,
-				VOTE_INDEX_DISABLE);
-
-			pm_runtime_mark_last_busy(&mdata->pdev->dev);
-			pm_runtime_put_autosuspend(&mdata->pdev->dev);
-		}
-	}
+	if (changed)
+		__mdss_mdp_clk_control(mdata, enable);
 
 	if (enable && changed)
 		mdss_mdp_idle_pc_restore();
@@ -1728,7 +1829,7 @@
 	mdss_mdp_irq_clk_register(mdata, "mnoc_clk", MDSS_CLK_MNOC_AHB);
 
 	/* Setting the default clock rate to the max supported.*/
-	mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate);
+	mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate, false);
 	pr_debug("mdp clk rate=%ld\n",
 		mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false));
 
@@ -1838,7 +1939,8 @@
 		mdata->pixel_ram_size = 50 * 1024;
 		set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
-		set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); /* cdp supported */
+		mdata->enable_cdp = true; /* enable cdp */
 		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
@@ -1932,7 +2034,8 @@
 		set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
-		set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); /* cdp supported */
+		mdata->enable_cdp = false; /* disable cdp */
 		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
@@ -1949,6 +2052,7 @@
 		mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
 		mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
 		mdss_set_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
+		mdss_set_quirk(mdata, MDSS_QUIRK_MDP_CLK_SET_RATE);
 		mdata->has_wb_ubwc = true;
 		set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
 		break;
@@ -2311,6 +2415,8 @@
 	size_t len = PAGE_SIZE;
 	int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
 
+	if (!pipe)
+		return;
 #define SPRINT(fmt, ...) \
 		(*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
 
@@ -2695,6 +2801,12 @@
 		goto probe_done;
 	}
 
+	rc = mdss_mdp_retention_init(mdata);
+	if (rc) {
+		pr_err("unable to initialize mdss mdp retention\n");
+		goto probe_done;
+	}
+
 	pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS);
 	if (mdata->idle_pc_enabled)
 		pm_runtime_use_autosuspend(&pdev->dev);
@@ -2789,11 +2901,9 @@
 		MDSS_MDP_REG_SPLIT_DISPLAY_EN);
 	mdata->splash_intf_sel = intf_sel;
 	mdata->splash_split_disp = split_display;
-
 	if (intf_sel != 0) {
 		for (i = 0; i < 4; i++)
-			if ((intf_sel >> i*8) & 0x000000FF)
-				num_of_display_on++;
+			num_of_display_on += ((intf_sel >> i*8) & 0x000000FF);
 
 		/*
 		 * For split display enabled - DSI0, DSI1 interfaces are
@@ -3883,6 +3993,7 @@
 static void mdss_mdp_parse_vbif_qos(struct platform_device *pdev)
 {
 	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 npriority_lvl_nrt;
 	int rc;
 
 	mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
@@ -3906,8 +4017,20 @@
 		return;
 	}
 
-	mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
+	npriority_lvl_nrt = mdss_mdp_parse_dt_prop_len(pdev,
 			"qcom,mdss-vbif-qos-nrt-setting");
+
+	if (!npriority_lvl_nrt) {
+		pr_debug("no vbif nrt priorities found rt:%d\n",
+			mdata->npriority_lvl);
+		return;
+	} else if (npriority_lvl_nrt != mdata->npriority_lvl) {
+		/* driver expects same number for both nrt and rt */
+		pr_err("invalid nrt settings nrt(%d) != rt(%d)\n",
+			npriority_lvl_nrt, mdata->npriority_lvl);
+		return;
+	}
+
 	if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
 		mdata->vbif_nrt_qos = kcalloc(mdata->npriority_lvl,
 					      sizeof(u32), GFP_KERNEL);
@@ -3923,7 +4046,7 @@
 		}
 	} else {
 		mdata->npriority_lvl = 0;
-		pr_debug("Invalid or no vbif qos nrt seting\n");
+		pr_debug("Invalid or no vbif qos nrt setting\n");
 	}
 }
 
@@ -4148,6 +4271,15 @@
 	mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor",
 		&mdata->clk_factor);
 
+	/*
+	 * Bus throughput factor will be used during high downscale cases.
+	 * The recommended default factor is 1.1.
+	 */
+	mdata->bus_throughput_factor.numer = 11;
+	mdata->bus_throughput_factor.denom = 10;
+	mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-bus-througput-factor",
+		&mdata->bus_throughput_factor);
+
 	rc = of_property_read_u32(pdev->dev.of_node,
 			"qcom,max-bandwidth-low-kbps", &mdata->max_bw_low);
 	if (rc)
@@ -4786,6 +4918,22 @@
 }
 
 /**
+ * mdss_mdp_notify_idle_pc() - Notify fb driver of idle power collapse
+ * @mdata: MDP private data
+ *
+ * This function is called if there are active overlays.
+ */
+static void mdss_mdp_notify_idle_pc(struct mdss_data_type *mdata)
+{
+	int i;
+
+	for (i = 0; i < mdata->nctl; i++)
+		if ((mdata->ctl_off[i].ref_cnt) &&
+			!mdss_mdp_ctl_is_power_off(&mdata->ctl_off[i]))
+			mdss_fb_idle_pc(mdata->ctl_off[i].mfd);
+}
+
+/**
  * mdss_mdp_footswitch_ctrl() - Disable/enable MDSS GDSC and CX/Batfet rails
  * @mdata: MDP private data
  * @on: 1 to turn on footswitch, 0 to turn off footswitch
@@ -4835,14 +4983,21 @@
 				 * Turning off GDSC while overlays are still
 				 * active.
 				 */
+
+				mdss_mdp_memory_retention_ctrl(MEM_RETAIN_ON,
+					PERIPH_RETAIN_OFF);
 				mdata->idle_pc = true;
+				mdss_mdp_notify_idle_pc(mdata);
 				pr_debug("idle pc. active overlays=%d\n",
 					active_cnt);
-				mdss_mdp_memory_retention_enter();
 			} else {
 				mdss_mdp_cx_ctrl(mdata, false);
 				mdss_mdp_batfet_ctrl(mdata, false);
+				mdss_mdp_memory_retention_ctrl(
+					MEM_RETAIN_OFF,
+					PERIPH_RETAIN_OFF);
 			}
+			mdata->mem_retain = true;
 			if (mdata->en_svs_high)
 				mdss_mdp_config_cx_voltage(mdata, false);
 			regulator_disable(mdata->fs);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 9497318..78ee489 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -350,6 +350,9 @@
 
 	/* to update lineptr, [1..yres] - enable, 0 - disable */
 	int (*update_lineptr)(struct mdss_mdp_ctl *ctl, bool enable);
+
+	/* to wait for vsync */
+	int (*wait_for_vsync_fnc)(struct mdss_mdp_ctl *ctl);
 };
 
 /* FRC info used for Deterministic Frame Rate Control */
@@ -648,6 +651,7 @@
 
 	/* vsync handler for FRC */
 	struct mdss_mdp_vsync_handler frc_vsync_handler;
+	bool need_vsync_on;
 };
 
 struct mdss_mdp_mixer {
@@ -664,6 +668,8 @@
 	bool valid_roi;
 	bool roi_changed;
 	struct mdss_rect roi;
+	bool dsc_enabled;
+	bool dsc_merge_enabled;
 
 	u8 cursor_enabled;
 	u16 cursor_hotx;
@@ -738,6 +744,7 @@
 	struct dma_buf *srcp_dma_buf;
 	struct dma_buf_attachment *srcp_attachment;
 	struct sg_table *srcp_table;
+	struct ion_handle *ihandle;
 };
 
 enum mdss_mdp_data_state {
@@ -1026,6 +1033,8 @@
 	struct kthread_worker worker;
 	struct kthread_work vsync_work;
 	struct task_struct *thread;
+
+	bool cache_null_commit; /* Cache if preceding commit was NULL */
 };
 
 struct mdss_mdp_set_ot_params {
@@ -1620,7 +1629,7 @@
 
 void mdss_mdp_footswitch_ctrl_splash(int on);
 void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable);
-void mdss_mdp_set_clk_rate(unsigned long min_clk_rate);
+void mdss_mdp_set_clk_rate(unsigned long min_clk_rate, bool locked);
 unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked);
 int mdss_mdp_vsync_clk_enable(int enable, bool locked);
 void mdss_mdp_clk_ctrl(int enable);
@@ -1730,7 +1739,7 @@
 void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl,
 	struct notifier_block *notifier);
 u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl);
-u32 apply_comp_ratio_factor(u32 quota, struct mdss_mdp_format_params *fmt,
+u64 apply_comp_ratio_factor(u64 quota, struct mdss_mdp_format_params *fmt,
 	struct mult_factor *factor);
 
 int mdss_mdp_scan_pipes(void);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_cdm.c b/drivers/video/fbdev/msm/mdss_mdp_cdm.c
index ab680f5..3d1be10 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_cdm.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_cdm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -378,6 +378,17 @@
 		return -EINVAL;
 	}
 
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	mutex_lock(&cdm->lock);
+	/* Disable HDMI packer */
+	writel_relaxed(0x0, cdm->base + MDSS_MDP_REG_CDM_HDMI_PACK_OP_MODE);
+
+	/* Put CDM in bypass */
+	writel_relaxed(0x0, cdm->mdata->mdp_base + MDSS_MDP_MDP_OUT_CTL_0);
+
+	mutex_unlock(&cdm->lock);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
 	kref_put(&cdm->kref, mdss_mdp_cdm_free);
 
 	return rc;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 344d6ef..4cfb48d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -31,6 +31,7 @@
 #define MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM 2
 #define NUM_MIXERCFG_REGS 3
 #define MDSS_MDP_WB_OUTPUT_BPP	3
+#define MIN_BUS_THROUGHPUT_SCALE_FACTOR 35
 struct mdss_mdp_mixer_cfg {
 	u32 config_masks[NUM_MIXERCFG_REGS];
 	bool border_enabled;
@@ -571,11 +572,12 @@
 	u32 fps, u32 v_total)
 {
 	u32 active_line_cycle, backfill_cycle, total_cycle;
-	u32 ver_dwnscale;
+	u64 ver_dwnscale;
 	u32 active_line;
 	u32 backfill_line;
 
-	ver_dwnscale = (src_h << PHASE_STEP_SHIFT) / dst.h;
+	ver_dwnscale = src_h << PHASE_STEP_SHIFT;
+	do_div(ver_dwnscale, dst.h);
 
 	if (ver_dwnscale > (MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM
 			<< PHASE_STEP_SHIFT)) {
@@ -599,7 +601,7 @@
 	total_cycle = active_line_cycle + backfill_cycle;
 
 	pr_debug("line: active=%d backfill=%d vds=%d\n",
-		active_line, backfill_line, ver_dwnscale);
+		active_line, backfill_line, (u32)ver_dwnscale);
 	pr_debug("cycle: total=%d active=%d backfill=%d\n",
 		total_cycle, active_line_cycle, backfill_cycle);
 
@@ -607,11 +609,21 @@
 }
 
 static inline bool __is_vert_downscaling(u32 src_h,
-	struct mdss_rect dst){
-
+	struct mdss_rect dst)
+{
 	return (src_h > dst.h);
 }
 
+static inline bool __is_bus_throughput_factor_required(u32 src_h,
+	struct mdss_rect dst)
+{
+	u64 scale_factor = src_h * 10;
+
+	do_div(scale_factor, dst.h);
+	return (__is_vert_downscaling(src_h, dst) &&
+		(scale_factor >= MIN_BUS_THROUGHPUT_SCALE_FACTOR));
+}
+
 static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
 	struct mdss_rect src, struct mdss_rect dst,
 	u32 fps, u32 v_total, u32 flags)
@@ -658,6 +670,15 @@
 		}
 	}
 
+	/*
+	 * If the downscale factor is >= 3.5 for a 32 BPP surface,
+	 * it is recommended to add a 10% bus throughput factor to
+	 * the clock rate.
+	 */
+	if ((pipe->src_fmt->bpp == 4) &&
+		__is_bus_throughput_factor_required(src_h, dst))
+		rate = apply_fudge_factor(rate, &mdata->bus_throughput_factor);
+
 	if (flags & PERF_CALC_PIPE_APPLY_CLK_FUDGE)
 		rate = mdss_mdp_clk_fudge_factor(mixer, rate);
 
@@ -815,7 +836,7 @@
 	return factor->numer && factor->denom;
 }
 
-u32 apply_comp_ratio_factor(u32 quota,
+u64 apply_comp_ratio_factor(u64 quota,
 	struct mdss_mdp_format_params *fmt,
 	struct mult_factor *factor)
 {
@@ -906,7 +927,7 @@
 
 static u32 __get_min_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
 {
-	u32 vbp_min = 0;
+	u32 vbp_min = UINT_MAX;
 	int i;
 	struct mdss_data_type *mdata;
 
@@ -928,6 +949,9 @@
 		}
 	}
 
+	if (vbp_min == UINT_MAX)
+		vbp_min = 0;
+
 	return vbp_min;
 }
 
@@ -2054,12 +2078,11 @@
 	return bw_sum_of_intfs;
 }
 
-static void mdss_mdp_ctl_update_client_vote(struct mdss_data_type *mdata,
+/* apply any adjustments to the ib quota */
+static inline u64 __calc_bus_ib_quota(struct mdss_data_type *mdata,
 	struct mdss_mdp_perf_params *perf, bool nrt_client, u64 bw_vote)
 {
-	u64 bus_ab_quota, bus_ib_quota;
-
-	bus_ab_quota = max(bw_vote, mdata->perf_tune.min_bus_vote);
+	u64 bus_ib_quota;
 
 	if (test_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map)) {
 		if (!nrt_client)
@@ -2086,6 +2109,18 @@
 		bus_ib_quota = apply_fudge_factor(bus_ib_quota,
 			&mdata->per_pipe_ib_factor);
 
+	return bus_ib_quota;
+}
+
+static void mdss_mdp_ctl_update_client_vote(struct mdss_data_type *mdata,
+	struct mdss_mdp_perf_params *perf, bool nrt_client, u64 bw_vote)
+{
+	u64 bus_ab_quota, bus_ib_quota;
+
+	bus_ab_quota = max(bw_vote, mdata->perf_tune.min_bus_vote);
+	bus_ib_quota = __calc_bus_ib_quota(mdata, perf, nrt_client, bw_vote);
+
+
 	bus_ab_quota = apply_fudge_factor(bus_ab_quota, &mdss_res->ab_factor);
 	ATRACE_INT("bus_quota", bus_ib_quota);
 
@@ -2251,6 +2286,48 @@
 	return false;
 }
 
+static bool __mdss_mdp_compare_bw(
+	struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_perf_params *new_perf,
+	struct mdss_mdp_perf_params *old_perf,
+	bool params_changed,
+	bool stop_req)
+{
+	struct mdss_data_type *mdata = ctl->mdata;
+	bool is_nrt = mdss_mdp_is_nrt_ctl_path(ctl);
+	u64 new_ib =
+		__calc_bus_ib_quota(mdata, new_perf, is_nrt, new_perf->bw_ctl);
+	u64 old_ib =
+		__calc_bus_ib_quota(mdata, old_perf, is_nrt, old_perf->bw_ctl);
+	u64 new_ab = new_perf->bw_ctl;
+	u64 old_ab = old_perf->bw_ctl;
+	bool update_bw = false;
+
+	/*
+	 * three cases for bus bandwidth update.
+	 * 1. new bandwidth vote (ab or ib) or writeback output vote
+	 *		are higher than current vote for update request.
+	 * 2. new bandwidth vote or writeback output vote are
+	 *		lower than current vote at end of commit or stop.
+	 * 3. end of writeback/rotator session - last chance to
+	 *		non-realtime remove vote.
+	 */
+	if ((params_changed &&
+			(((new_ib > old_ib) || (new_ab > old_ab)) ||
+			(new_perf->bw_writeback > old_perf->bw_writeback))) ||
+			(!params_changed &&
+			(((new_ib < old_ib) || (new_ab < old_ab)) ||
+			(new_perf->bw_writeback < old_perf->bw_writeback))) ||
+			(stop_req && is_nrt))
+		update_bw = true;
+
+	trace_mdp_compare_bw(new_perf->bw_ctl, new_ib, new_perf->bw_writeback,
+		old_perf->bw_ctl, old_ib, old_perf->bw_writeback,
+		params_changed, update_bw);
+
+	return update_bw;
+}
+
 static void mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl,
 		int params_changed, bool stop_req)
 {
@@ -2286,20 +2363,8 @@
 		else if (is_bw_released || params_changed)
 			mdss_mdp_perf_calc_ctl(ctl, new);
 
-		/*
-		 * three cases for bus bandwidth update.
-		 * 1. new bandwidth vote or writeback output vote
-		 *    are higher than current vote for update request.
-		 * 2. new bandwidth vote or writeback output vote are
-		 *    lower than current vote at end of commit or stop.
-		 * 3. end of writeback/rotator session - last chance to
-		 *    non-realtime remove vote.
-		 */
-		if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
-			(new->bw_writeback > old->bw_writeback))) ||
-		    (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
-			(new->bw_writeback < old->bw_writeback))) ||
-			(stop_req && mdss_mdp_is_nrt_ctl_path(ctl))) {
+		if (__mdss_mdp_compare_bw(ctl, new, old, params_changed,
+				stop_req)) {
 
 			pr_debug("c=%d p=%d new_bw=%llu,old_bw=%llu\n",
 				ctl->num, params_changed, new->bw_ctl,
@@ -2354,7 +2419,7 @@
 	 */
 	if (update_clk) {
 		ATRACE_INT("mdp_clk", clk_rate);
-		mdss_mdp_set_clk_rate(clk_rate);
+		mdss_mdp_set_clk_rate(clk_rate, false);
 		pr_debug("update clk rate = %d HZ\n", clk_rate);
 	}
 
@@ -2453,6 +2518,7 @@
 	u32 nmixers_wb;
 	u32 i;
 	u32 nmixers;
+	u32 nmixers_active;
 	struct mdss_mdp_mixer *mixer_pool = NULL;
 
 	if (!ctl || !ctl->mdata)
@@ -2466,10 +2532,21 @@
 	case MDSS_MDP_MIXER_TYPE_INTF:
 		mixer_pool = ctl->mdata->mixer_intf;
 		nmixers = nmixers_intf;
+		nmixers_active = nmixers;
+
+		for (i = 0; i < nmixers; i++) {
+			mixer = mixer_pool + i;
+			if (mixer->ref_cnt)
+				nmixers_active--;
+		}
+		mixer = NULL;
 
 		/*
 		 * try to reserve first layer mixer for write back if
-		 * assertive display needs to be supported through wfd
+		 * assertive display needs to be supported through wfd.
+		 * For external displays(pluggable) and writeback avoid
+		 * allocating mixers LM0 and LM1 which are allocated
+		 * to primary display first.
 		 */
 		if (ctl->mdata->has_wb_ad && ctl->intf_num &&
 			((ctl->panel_data->panel_info.type != MIPI_CMD_PANEL) ||
@@ -2481,6 +2558,10 @@
 			&& (ctl->mdata->ndspp < nmixers)) {
 			mixer_pool += ctl->mdata->ndspp;
 			nmixers -= ctl->mdata->ndspp;
+		} else if ((ctl->panel_data->panel_info.is_pluggable) &&
+				nmixers_active) {
+			mixer_pool += ctl->mdata->ndspp;
+			nmixers -= ctl->mdata->ndspp;
 		}
 		break;
 
@@ -2694,10 +2775,7 @@
 	if (!clk_period)
 		return -EINVAL;
 
-	time_of_line = (pinfo->lcdc.h_back_porch +
-		 pinfo->lcdc.h_front_porch +
-		 pinfo->lcdc.h_pulse_width +
-		 pinfo->xres) * clk_period;
+	time_of_line = mdss_panel_get_htotal(pinfo, true) * clk_period;
 
 	time_of_line /= 1000;	/* in nano second */
 	if (!time_of_line)
@@ -2705,10 +2783,7 @@
 
 	current_line = ctl->ops.read_line_cnt_fnc(ctl);
 
-	total_line = pinfo->lcdc.v_back_porch +
-		pinfo->lcdc.v_front_porch +
-		pinfo->lcdc.v_pulse_width +
-		pinfo->yres;
+	total_line = mdss_panel_get_vtotal(pinfo);
 
 	if (current_line >= total_line)
 		time_to_vsync = time_of_line * total_line;
@@ -2834,6 +2909,7 @@
 {
 	mdss_mdp_pingpong_write(mixer->pingpong_base,
 			MDSS_MDP_REG_PP_DSC_MODE, 1);
+	mixer->dsc_enabled = true;
 }
 
 static inline void __dsc_disable(struct mdss_mdp_mixer *mixer)
@@ -2853,6 +2929,13 @@
 		return;
 	}
 	writel_relaxed(0, offset + MDSS_MDP_REG_DSC_COMMON_MODE);
+	mixer->dsc_enabled = false;
+	mixer->dsc_merge_enabled = false;
+}
+
+static bool __is_dsc_merge_enabled(u32 common_mode)
+{
+	return common_mode & BIT(1);
 }
 
 static void __dsc_config(struct mdss_mdp_mixer *mixer,
@@ -2865,6 +2948,7 @@
 	u32 initial_lines = dsc->initial_lines;
 	bool is_cmd_mode = !(mode & BIT(2));
 
+	mixer->dsc_merge_enabled = __is_dsc_merge_enabled(mode);
 	data = mdss_mdp_pingpong_read(mixer->pingpong_base,
 			MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP);
 	data |= BIT(18); /* endian flip */
@@ -3019,11 +3103,6 @@
 	}
 }
 
-static bool __is_dsc_merge_enabled(u32 common_mode)
-{
-	return common_mode & BIT(1);
-}
-
 static bool __dsc_is_3d_mux_enabled(struct mdss_mdp_ctl *ctl,
 	struct mdss_panel_info *pinfo)
 {
@@ -3365,8 +3444,19 @@
 	struct mdss_mdp_ctl *sctl;
 	struct mdss_panel_info *spinfo;
 
-	if (!is_dsc_compression(pinfo))
+	/*
+	 * Check for dynamic resolution switch from DSC On to DSC Off
+	 * and disable DSC
+	 */
+	if ((ctl->pending_mode_switch == SWITCH_RESOLUTION) &&
+	    ctl->is_master &&
+	    (!is_dsc_compression(pinfo))) {
+		if (ctl->mixer_left && ctl->mixer_left->dsc_enabled)
+			__dsc_disable(ctl->mixer_left);
+		if (ctl->mixer_right && ctl->mixer_right->dsc_enabled)
+			__dsc_disable(ctl->mixer_right);
 		return;
+	}
 
 	if (!ctl->is_master) {
 		pr_debug("skip slave ctl because master will program for both\n");
@@ -3652,6 +3742,30 @@
 			ctl->mixer_right->width = ctl->width / 2;
 			ctl->mixer_right->height = ctl->height;
 		}
+
+		/*
+		 * If we are transitioning from  DSC On + DSC Merge to DSC Off
+		 * the 3D mux needs to be enabled
+		 */
+		if (!is_dsc_compression(&pdata->panel_info) &&
+		    ctl->mixer_left &&
+		    ctl->mixer_left->dsc_enabled &&
+		    ctl->mixer_left->dsc_merge_enabled) {
+			ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+				       MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+		}
+
+		/*
+		 * If we are transitioning from DSC Off to DSC On + DSC Merge
+		 * the 3D mux needs to be disabled
+		 */
+		if (is_dsc_compression(&pdata->panel_info) &&
+		    ctl->mixer_left &&
+		    !ctl->mixer_left->dsc_enabled &&
+		    pdata->panel_info.dsc_enc_total != 1) {
+			ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+				  MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
+		}
 	} else {
 		/*
 		 * Handles MDP_SPLIT_MODE_NONE, MDP_DUAL_LM_DUAL_DISPLAY and
@@ -3666,7 +3780,6 @@
 
 	ctl->border_x_off = pdata->panel_info.lcdc.border_left;
 	ctl->border_y_off = pdata->panel_info.lcdc.border_top;
-
 	return ret;
 }
 
@@ -4340,11 +4453,13 @@
 		return;
 
 	pr_debug("hw ctl reset is set for ctl:%d\n", ctl->num);
-	status = mdss_mdp_poll_ctl_reset_status(ctl, 5);
+	/* poll for at least ~1 frame */
+	status = mdss_mdp_poll_ctl_reset_status(ctl, 320);
 	if (status) {
-		pr_err("hw recovery is not complete for ctl:%d\n", ctl->num);
+		pr_err("hw recovery is not complete for ctl:%d status:0x%x\n",
+			ctl->num, status);
 		MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt", "dbg_bus",
-			"vbif_dbg_bus", "panic");
+			"vbif_dbg_bus", "dsi_dbg_bus", "panic");
 	}
 }
 
@@ -5574,10 +5689,10 @@
 				sctl->flush_bits = 0;
 				sctl_flush_bits = 0;
 			} else {
-				sctl_flush_bits = sctl->flush_bits;
+				sctl_flush_bits |= sctl->flush_bits;
 			}
 		}
-		ctl_flush_bits = ctl->flush_bits;
+		ctl_flush_bits |= ctl->flush_bits;
 		mutex_unlock(&ctl->flush_lock);
 	}
 	/*
@@ -5667,6 +5782,10 @@
 		mdss_mdp_bwcpanic_ctrl(mdata, true);
 
 	ATRACE_BEGIN("flush_kickoff");
+
+	MDSS_XLOG(ctl->intf_num, ctl_flush_bits, sctl_flush_bits,
+		mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH), split_lm_valid);
+
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush_bits);
 	if (sctl && sctl_flush_bits) {
 		mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH,
@@ -5700,6 +5819,18 @@
 	if (ret)
 		pr_warn("ctl %d error displaying frame\n", ctl->num);
 
+	/* update backlight in commit */
+	if (ctl->intf_type == MDSS_INTF_DSI && !ctl->is_video_mode &&
+	    ctl->mfd && ctl->mfd->bl_extn_level > 0) {
+		if (!IS_CALIB_MODE_BL(ctl->mfd) && (!ctl->mfd->ext_bl_ctrl ||
+						!ctl->mfd->bl_level)) {
+			mutex_lock(&ctl->mfd->bl_lock);
+			mdss_fb_set_backlight(ctl->mfd,
+					      ctl->mfd->bl_extn_level);
+			mutex_unlock(&ctl->mfd->bl_lock);
+		}
+	}
+
 	ctl->play_cnt++;
 	ATRACE_END("flush_kickoff");
 
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_debug.c
index 6024ea1..bd4ee23 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_debug.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.c
@@ -955,6 +955,11 @@
 	mdata->dbg_bus_size = 0;
 
 	switch (mdata->mdp_rev) {
+	case MDSS_MDP_HW_REV_114:
+	case MDSS_MDP_HW_REV_116:
+		mdata->dbg_bus = dbg_bus_8996;
+		mdata->dbg_bus_size = ARRAY_SIZE(dbg_bus_8996);
+		break;
 	case MDSS_MDP_HW_REV_107:
 	case MDSS_MDP_HW_REV_107_1:
 	case MDSS_MDP_HW_REV_107_2:
@@ -1061,12 +1066,14 @@
 	seq_puts(s, "\n");
 }
 
-static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe)
+static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe,
+		struct msm_fb_data_type *mfd)
 {
 	struct mdss_mdp_data *buf;
 	int format;
 	int smps[4];
 	int i;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 
 	seq_printf(s, "\nSSPP #%d type=%s ndx=%x flags=0x%08x play_cnt=%u xin_id=%d\n",
 			pipe->num, mdss_mdp_pipetype2str(pipe->type),
@@ -1121,11 +1128,14 @@
 
 	seq_puts(s, "Data:\n");
 
+	mutex_lock(&mdp5_data->list_lock);
 	list_for_each_entry(buf, &pipe->buf_queue, pipe_list)
 		__print_buf(s, buf, false);
+	mutex_unlock(&mdp5_data->list_lock);
 }
 
-static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer)
+static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer,
+		struct msm_fb_data_type *mfd)
 {
 	struct mdss_mdp_pipe *pipe;
 	int i, cnt = 0;
@@ -1142,7 +1152,7 @@
 	for (i = 0; i < ARRAY_SIZE(mixer->stage_pipe); i++) {
 		pipe = mixer->stage_pipe[i];
 		if (pipe) {
-			__dump_pipe(s, pipe);
+			__dump_pipe(s, pipe, mfd);
 			cnt++;
 		}
 	}
@@ -1219,8 +1229,8 @@
 	seq_printf(s, "Play Count=%u  Underrun Count=%u\n",
 			ctl->play_cnt, ctl->underrun_cnt);
 
-	__dump_mixer(s, ctl->mixer_left);
-	__dump_mixer(s, ctl->mixer_right);
+	__dump_mixer(s, ctl->mixer_left, ctl->mfd);
+	__dump_mixer(s, ctl->mixer_right, ctl->mfd);
 }
 
 static int __dump_mdp(struct seq_file *s, struct mdss_data_type *mdata)
@@ -1414,7 +1424,7 @@
 		seq_printf(s, "vsync: %08u \tunderrun: %08u\n",
 				ctl->vsync_cnt, ctl->underrun_cnt);
 		if (ctl->mfd) {
-			seq_printf(s, "user_bl: %08llu \tmod_bl: %08u\n",
+			seq_printf(s, "user_bl: %llu \tmod_bl: %08u\n",
 				ctl->mfd->bl_level, ctl->mfd->bl_level_scaled);
 		}
 	} else {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index 54ceb19..5bff904 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -820,6 +820,8 @@
 #define MMSS_VBIF_WR_LIM_CONF			0x0C0
 #define MMSS_VBIF_OUT_RD_LIM_CONF0		0x0D0
 
+#define MMSS_VBIF_SRC_ERR		0x194
+#define MMSS_VBIF_ERR_INFO		0x1A0
 #define MMSS_VBIF_XIN_HALT_CTRL0	0x200
 #define MMSS_VBIF_XIN_HALT_CTRL1	0x204
 #define MMSS_VBIF_AXI_HALT_CTRL0	0x208
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 764b830..947a3fe 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -52,8 +52,10 @@
 	u32 current_pp_num;
 	/*
 	 * aux_pp_num will be set only when topology is using split-lm.
-	 * aux_pp_num will be used only when MDSS_QUIRK_DSC_RIGHT_ONLY_PU
-	 * quirk is set and on following partial updates.
+	 * aux_pp_num will be used
+	 * if right-only update on DUAL_LM_SINGLE_DISPLAY with 3D Mux
+	 * or if MDSS_QUIRK_DSC_RIGHT_ONLY_PU quirk is set
+	 * and on following partial updates.
 	 *
 	 * right-only update on DUAL_LM_SINGLE_DISPLAY with DSC_MERGE
 	 * right-only update on DUAL_LM_DUAL_DISPLAY with DSC
@@ -76,6 +78,7 @@
 	struct mutex clk_mtx;
 	spinlock_t clk_lock;
 	spinlock_t koff_lock;
+	spinlock_t ctlstart_lock;
 	struct work_struct gate_clk_work;
 	struct delayed_work delayed_off_clk_work;
 	struct work_struct pp_done_work;
@@ -119,14 +122,32 @@
 static bool __mdss_mdp_cmd_is_aux_pp_needed(struct mdss_data_type *mdata,
 	struct mdss_mdp_ctl *mctl)
 {
-	return (mdata && mctl && mctl->is_master &&
-		mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU) &&
-		is_dsc_compression(&mctl->panel_data->panel_info) &&
-		((mctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
-		 ((mctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
-		  (mctl->panel_data->panel_info.dsc_enc_total == 1))) &&
-		!mctl->mixer_left->valid_roi &&
-		mctl->mixer_right->valid_roi);
+	bool mux3d, merge, quirk, rightonly;
+
+	if (!mdata || !mctl || !mctl->is_master)
+		return false;
+
+	/*
+	 * aux_pp_num will be used:
+	 * if right-only update on DUAL_LM_SINGLE_DISPLAY with 3D Mux
+	 * or if MDSS_QUIRK_DSC_RIGHT_ONLY_PU quirk is set
+	 * and on following partial updates.
+	 *
+	 * right-only update on DUAL_LM_SINGLE_DISPLAY with DSC_MERGE
+	 * right-only update on DUAL_LM_DUAL_DISPLAY with DSC
+	 */
+	mux3d = ((mctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
+			(mctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE));
+	merge = ((mctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
+			(mctl->panel_data->panel_info.dsc_enc_total == 1));
+	quirk = (mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU) &&
+			is_dsc_compression(&mctl->panel_data->panel_info) &&
+			((mctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
+			merge));
+	rightonly = !mctl->mixer_left->valid_roi &&
+			mctl->mixer_right->valid_roi;
+
+	return ((mux3d || quirk) && rightonly);
 }
 
 static bool __mdss_mdp_cmd_is_panel_power_off(struct mdss_mdp_cmd_ctx *ctx)
@@ -147,15 +168,11 @@
 	u32 init;
 	u32 height;
 
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
-
 	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
 	if (!mixer) {
 		mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
-		if (!mixer) {
-			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		if (!mixer)
 			goto exit;
-		}
 	}
 
 	init = mdss_mdp_pingpong_read(mixer->pingpong_base,
@@ -163,10 +180,8 @@
 	height = mdss_mdp_pingpong_read(mixer->pingpong_base,
 		MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
 
-	if (height < init) {
-		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	if (height < init)
 		goto exit;
-	}
 
 	cnt = mdss_mdp_pingpong_read(mixer->pingpong_base,
 		MDSS_MDP_REG_PP_INT_COUNT_VAL) & 0xffff;
@@ -176,13 +191,21 @@
 	else
 		cnt -= init;
 
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
-
 	pr_debug("cnt=%d init=%d height=%d\n", cnt, init, height);
 exit:
 	return cnt;
 }
 
+static inline u32 mdss_mdp_cmd_line_count_wrapper(struct mdss_mdp_ctl *ctl)
+{
+	u32 ret;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	ret = mdss_mdp_cmd_line_count(ctl);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return ret;
+}
+
 static int mdss_mdp_tearcheck_enable(struct mdss_mdp_ctl *ctl, bool enable)
 {
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -335,6 +358,138 @@
 	return 0;
 }
 
+/**
+ * mdss_mdp_cmd_autorefresh_pp_done() - pp done irq callback for autorefresh
+ * @arg: void pointer to the controller context.
+ *
+ * This function is the pp_done interrupt callback while disabling
+ * autorefresh. This function does not modify the kickoff count (koff_cnt).
+ */
+static void mdss_mdp_cmd_autorefresh_pp_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+		ctx->current_pp_num);
+	mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+		ctx->current_pp_num, NULL, NULL);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
+	complete_all(&ctx->autorefresh_ppdone);
+
+	pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d cnt=%d\n", __func__,
+		ctl->num, ctl->intf_num, ctx->current_pp_num,
+		atomic_read(&ctx->koff_cnt));
+}
+
+static void mdss_mdp_cmd_wait4_autorefresh_pp(struct mdss_mdp_ctl *ctl)
+{
+	int rc;
+	u32 val, line_out, intr_type = MDSS_MDP_IRQ_TYPE_PING_PONG_COMP;
+	char __iomem *pp_base = ctl->mixer_left->pingpong_base;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
+
+	MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
+	pr_debug("ctl:%d line_out:%d\n", ctl->num, line_out);
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	if (line_out < ctl->mixer_left->roi.h) {
+		reinit_completion(&ctx->autorefresh_ppdone);
+
+		/* enable ping pong done */
+		mdss_mdp_set_intr_callback(intr_type, ctx->current_pp_num,
+					mdss_mdp_cmd_autorefresh_pp_done, ctl);
+		mdss_mdp_irq_enable(intr_type, ctx->current_pp_num);
+
+		/* wait for ping pong done */
+		rc = wait_for_completion_timeout(&ctx->autorefresh_ppdone,
+				KOFF_TIMEOUT);
+		if (rc <= 0) {
+			val = mdss_mdp_pingpong_read(pp_base,
+				MDSS_MDP_REG_PP_LINE_COUNT);
+			if (val == ctl->mixer_left->roi.h) {
+				mdss_mdp_irq_clear(ctl->mdata,
+					MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+					ctx->current_pp_num);
+				mdss_mdp_irq_disable_nosync(intr_type,
+					ctx->current_pp_num);
+				mdss_mdp_set_intr_callback(intr_type,
+					ctx->current_pp_num, NULL, NULL);
+			} else {
+				pr_err("timedout waiting for ctl%d autorefresh pp done\n",
+					ctl->num);
+				MDSS_XLOG(0xbad3);
+				MDSS_XLOG_TOUT_HANDLER("mdp",
+					"vbif", "dbg_bus", "vbif_dbg_bus",
+					"panic");
+			}
+		}
+	}
+}
+
+static bool __disable_rd_ptr_from_te(char __iomem *pingpong_base)
+{
+	u32 cfg;
+	bool disabled;
+
+	cfg = mdss_mdp_pingpong_read(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+
+	disabled = BIT(20) & cfg;
+	cfg &= ~BIT(20);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+
+	return disabled;
+}
+
+static inline void __enable_rd_ptr_from_te(char __iomem *pingpong_base)
+{
+	u32 cfg;
+
+	cfg = mdss_mdp_pingpong_read(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+	cfg |= BIT(20);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+}
+
+/*
+ * __disable_autorefresh - disables autorefresh feature in the hw.
+ *
+ * To disable autorefresh, driver needs to make sure no transactions are
+ * on-going; for ensuring this, driver must:
+ *
+ * 1. Disable listening to the external TE (this gives extra time before
+ *     trigger next transaction).
+ * 2. Wait for any on-going transaction (wait for ping pong done interrupt).
+ * 3. Disable auto-refresh.
+ * 4. Re-enable listening to the external panel TE.
+ *
+ * So it is responsability of the caller of this function to only call to
+ * disable autorefresh if no hw transaction is on-going (wait for ping pong)
+ * and if the listening for the external TE is disabled in the tear
+ * check logic (this to prevent any race conditions with the hw), as mentioned
+ * in the above steps.
+ */
+static inline void __disable_autorefresh(char __iomem *pingpong_base)
+{
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0x0);
+}
+
 static int mdss_mdp_cmd_tearcheck_setup(struct mdss_mdp_cmd_ctx *ctx,
 		bool locked)
 {
@@ -342,7 +497,7 @@
 	struct mdss_mdp_mixer *mixer = NULL, *mixer_right = NULL;
 	struct mdss_mdp_ctl *ctl = ctx->ctl;
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
-	u32 offset = 0;
+	bool rd_ptr_disabled = false;
 
 	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
 	if (mixer) {
@@ -352,21 +507,35 @@
 		 */
 		if (mdss_mdp_pingpong_read(mixer->pingpong_base,
 			MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG) & BIT(31)) {
-			offset = MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG;
+
+			/* 1. disable rd pointer from the external te  */
+			rd_ptr_disabled =
+				__disable_rd_ptr_from_te(mixer->pingpong_base);
+
+			/* 2. wait for previous transfer to finish */
+			mdss_mdp_cmd_wait4_autorefresh_pp(ctl);
+
+			/* 3. disable autorefresh  */
 			if (is_pingpong_split(ctl->mfd))
-				writel_relaxed(0x0,
-					(mdata->slave_pingpong_base + offset));
+				__disable_autorefresh(
+					mdata->slave_pingpong_base);
+
 			if (is_split_lm(ctl->mfd)) {
-				mixer_right =
-					mdss_mdp_mixer_get(ctl,
-						MDSS_MDP_MIXER_MUX_RIGHT);
+				mixer_right = mdss_mdp_mixer_get(ctl,
+					MDSS_MDP_MIXER_MUX_RIGHT);
+
 				if (mixer_right)
-					writel_relaxed(0x0,
-					(mixer_right->pingpong_base + offset));
+					__disable_autorefresh(
+						mixer_right->pingpong_base);
 			}
-			mdss_mdp_pingpong_write(mixer->pingpong_base,
-				MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0x0);
+
+			__disable_autorefresh(mixer->pingpong_base);
 			pr_debug("%s: disabling auto refresh\n", __func__);
+
+			/* 4. re-enable rd pointer from te (if was enabled) */
+			if (rd_ptr_disabled)
+				__enable_rd_ptr_from_te(mixer->pingpong_base);
+
 		}
 		rc = mdss_mdp_cmd_tearcheck_cfg(mixer, ctx, locked);
 		if (rc)
@@ -527,6 +696,8 @@
 				pr_err("%s cannot find master ctl\n",
 					__func__);
 				WARN_ON(1);
+				rc = -EINVAL;
+				goto exit;
 			}
 			/*
 			 * We have both controllers but sctl has the Master,
@@ -999,6 +1170,7 @@
 	vsync_time = ktime_get();
 	ctl->vsync_cnt++;
 	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+	trace_mdp_cmd_readptr_done(ctl->num, atomic_read(&ctx->koff_cnt));
 	complete_all(&ctx->rdptr_done);
 
 	/* If caller is waiting for the read pointer, notify. */
@@ -1135,16 +1307,21 @@
 		return -EINVAL;
 
 	/*
-	 * Currently, only intf_fifo_underflow is
+	 * Currently, intf_fifo_overflow is not
 	 * supported for recovery sequence for command
 	 * mode DSI interface
 	 */
-	if (event != MDP_INTF_DSI_CMD_FIFO_UNDERFLOW) {
+	if (event == MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW) {
 		pr_warn("%s: unsupported recovery event:%d\n",
 					__func__, event);
 		return -EPERM;
 	}
 
+	if (event == MDP_INTF_DSI_PANEL_DEAD) {
+		mdss_fb_report_panel_dead(ctx->ctl->mfd);
+		return 0;
+	}
+
 	if (atomic_read(&ctx->koff_cnt)) {
 		mdss_mdp_ctl_reset(ctx->ctl, true);
 		reset_done = true;
@@ -1419,36 +1596,6 @@
 	return 0;
 }
 
-/**
- * mdss_mdp_cmd_autorefresh_pp_done() - pp done irq callback for autorefresh
- * @arg: void pointer to the controller context.
- *
- * This function is the pp_done interrupt callback while disabling
- * autorefresh. This function does not modify the kickoff count (koff_cnt).
- */
-static void mdss_mdp_cmd_autorefresh_pp_done(void *arg)
-{
-	struct mdss_mdp_ctl *ctl = arg;
-	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
-
-	if (!ctx) {
-		pr_err("%s: invalid ctx\n", __func__);
-		return;
-	}
-
-	mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
-		ctx->current_pp_num);
-	mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
-		ctx->current_pp_num, NULL, NULL);
-
-	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
-	complete_all(&ctx->autorefresh_ppdone);
-
-	pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d cnt=%d\n", __func__,
-		ctl->num, ctl->intf_num, ctx->current_pp_num,
-		atomic_read(&ctx->koff_cnt));
-}
-
 static void pingpong_done_work(struct work_struct *work)
 {
 	u32 status;
@@ -1851,22 +1998,34 @@
 	bool handoff)
 {
 	struct mdss_panel_data *pdata;
-	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+	struct mdss_mdp_ctl *sctl = NULL;
+	struct mdss_mdp_cmd_ctx *sctx = NULL;
 	struct dsi_panel_clk_ctrl clk_ctrl;
 	int ret = 0;
 
+	/* Get both controllers in the correct order for dual displays */
+	mdss_mdp_get_split_display_ctls(&ctl, &sctl);
+
+	if (sctl)
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+	/* In pingpong split we have single controller, dual context */
+	if (is_pingpong_split(ctl->mfd))
+		sctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[SLAVE_CTX];
+
 	pdata = ctl->panel_data;
 
 	clk_ctrl.state = MDSS_DSI_CLK_OFF;
 	clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
-	if (sctl) {
+
+	if (sctx) { /* then slave */
 		u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
 
-		if (is_pingpong_split(sctl->mfd))
+		if (sctx->pingpong_split_slave)
 			flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
 
-		mdss_mdp_ctl_intf_event(sctl, MDSS_EVENT_PANEL_CLK_CTRL,
-			(void *)&clk_ctrl, flags);
+		mdss_mdp_ctl_intf_event(sctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+					(void *)&clk_ctrl, flags);
 	}
 
 	mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
@@ -1975,12 +2134,14 @@
 			MDSS_XLOG(0xbad);
 			MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
 				"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
-				"dbg_bus", "vbif_dbg_bus", "panic");
+				"dbg_bus", "vbif_dbg_bus",
+				"dsi_dbg_bus", "panic");
 		} else if (ctx->pp_timeout_report_cnt == MAX_RECOVERY_TRIALS) {
 			MDSS_XLOG(0xbad2);
 			MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
 				"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
-				"dbg_bus", "vbif_dbg_bus", "panic");
+				"dbg_bus", "vbif_dbg_bus",
+				"dsi_dbg_bus", "panic");
 			mdss_fb_report_panel_dead(ctl->mfd);
 		}
 
@@ -2061,11 +2222,27 @@
 		return;
 
 	pinfo = &ctl->panel_data->panel_info;
-	if (pinfo->compression_mode != COMPRESSION_DSC)
-		return;
+	if (pinfo->compression_mode != COMPRESSION_DSC) {
+		/*
+		 * Check for a dynamic resolution switch from DSC On to
+		 * DSC Off and call mdss_mdp_ctl_dsc_setup to disable DSC
+		 */
+		if (ctl->pending_mode_switch == SWITCH_RESOLUTION) {
+			if (ctl->mixer_left && ctl->mixer_left->dsc_enabled)
+				changed = true;
+			if (is_split_lm(ctl->mfd) &&
+			    ctl->mixer_right &&
+			    ctl->mixer_right->dsc_enabled)
+				changed = true;
+		} else {
+			return;
+		}
+	}
 
-	changed = ctl->mixer_left->roi_changed;
-	if (is_split_lm(ctl->mfd))
+	if (ctl->mixer_left)
+		changed |= ctl->mixer_left->roi_changed;
+	if (is_split_lm(ctl->mfd) &&
+	    ctl->mixer_right)
 		changed |= ctl->mixer_right->roi_changed;
 
 	if (changed)
@@ -2305,7 +2482,6 @@
 	struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
 	char __iomem *pp_base;
 	u32 autorefresh_state;
-	u32 cfg;
 
 	if (!mctl->is_master)
 		return;
@@ -2325,11 +2501,8 @@
 		 * instruct MDP to ignore the panel TE so the next auto-refresh
 		 * is delayed until flush bits are set.
 		 */
-		cfg = mdss_mdp_pingpong_read(pp_base,
-			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
-		cfg &= ~BIT(20);
-		mdss_mdp_pingpong_write(pp_base,
-			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+		__disable_rd_ptr_from_te(pp_base);
+
 		ctx->ignore_external_te = true;
 
 	}
@@ -2341,7 +2514,6 @@
 {
 	struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
 	char __iomem *pp_base;
-	u32 cfg;
 
 	if (!mctl->is_master)
 		return;
@@ -2360,60 +2532,12 @@
 		pp_base = mctl->mixer_left->pingpong_base;
 
 		/* enable MDP to listen to the TE */
-		cfg = mdss_mdp_pingpong_read(pp_base,
-			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
-		cfg |= BIT(20);
-		mdss_mdp_pingpong_write(pp_base,
-			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+		__enable_rd_ptr_from_te(pp_base);
+
 		ctx->ignore_external_te = false;
 	}
 }
 
-static void mdss_mdp_cmd_wait4_autorefresh_pp(struct mdss_mdp_ctl *ctl)
-{
-	int rc;
-	u32 val, line_out, intr_type = MDSS_MDP_IRQ_TYPE_PING_PONG_COMP;
-	char __iomem *pp_base = ctl->mixer_left->pingpong_base;
-	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
-
-	line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
-
-	MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
-
-	if ((line_out < ctl->mixer_left->roi.h) && line_out) {
-		reinit_completion(&ctx->autorefresh_ppdone);
-
-		/* enable ping pong done */
-		mdss_mdp_set_intr_callback(intr_type, ctx->current_pp_num,
-					mdss_mdp_cmd_autorefresh_pp_done, ctl);
-		mdss_mdp_irq_enable(intr_type, ctx->current_pp_num);
-
-		/* wait for ping pong done */
-		rc = wait_for_completion_timeout(&ctx->autorefresh_ppdone,
-				KOFF_TIMEOUT);
-		if (rc <= 0) {
-			val = mdss_mdp_pingpong_read(pp_base,
-				MDSS_MDP_REG_PP_LINE_COUNT);
-			if (val == ctl->mixer_left->roi.h) {
-				mdss_mdp_irq_clear(ctl->mdata,
-					MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
-					ctx->current_pp_num);
-				mdss_mdp_irq_disable_nosync(intr_type,
-					ctx->current_pp_num);
-				mdss_mdp_set_intr_callback(intr_type,
-					ctx->current_pp_num, NULL, NULL);
-			} else {
-				pr_err("timedout waiting for ctl%d autorefresh pp done\n",
-					ctl->num);
-				MDSS_XLOG(0xbad3);
-				MDSS_XLOG_TOUT_HANDLER("mdp",
-					"vbif", "dbg_bus", "vbif_dbg_bus",
-					"panic");
-			}
-		}
-	}
-}
-
 static void mdss_mdp_cmd_autorefresh_done(void *arg)
 {
 	struct mdss_mdp_ctl *ctl = arg;
@@ -2487,10 +2611,11 @@
 	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
 	unsigned long flags;
 	unsigned long autorefresh_timeout;
+	u32 timeout_us = 20000;
 
 	line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
 
-	MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
+	MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h, 0x111);
 
 	reinit_completion(&ctx->autorefresh_done);
 
@@ -2549,6 +2674,26 @@
 			"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
 			"dbg_bus", "vbif_dbg_bus", "panic");
 	}
+
+	/* once autorefresh is done, wait for write pointer */
+	line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
+	MDSS_XLOG(ctl->num, line_out, 0x222);
+
+	/* wait until the first line is out to make sure transfer is on-going */
+	rc = readl_poll_timeout(pp_base +
+		MDSS_MDP_REG_PP_LINE_COUNT, val,
+		(val & 0xffff) >= 1, 10, timeout_us);
+
+	if (rc) {
+		pr_err("timed out waiting for line out ctl:%d val:0x%x\n",
+			ctl->num, val);
+		MDSS_XLOG(0xbad5, val);
+		MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+			"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
+			"dbg_bus", "vbif_dbg_bus", "panic");
+	}
+
+	MDSS_XLOG(val, 0x333);
 }
 
 /* caller needs to hold autorefresh_lock before calling this function */
@@ -2616,11 +2761,10 @@
 		mdss_mdp_cmd_wait4_autorefresh_pp(sctl);
 
 	/* disable autorefresh */
-	mdss_mdp_pingpong_write(pp_base, MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0);
+	__disable_autorefresh(pp_base);
 
 	if (is_pingpong_split(ctl->mfd))
-		mdss_mdp_pingpong_write(mdata->slave_pingpong_base,
-				MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0);
+		__disable_autorefresh(mdata->slave_pingpong_base);
 
 	ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
 	ctx->autorefresh_frame_cnt = 0;
@@ -2636,12 +2780,42 @@
 	return 0;
 }
 
+static bool wait_for_read_ptr_if_late(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_ctl *sctl, struct mdss_panel_info *pinfo)
+{
+	u32 line_count;
+	u32 sline_count = 0;
+	bool ret = true;
+	u32 low_threshold = pinfo->mdp_koff_thshold_low;
+	u32 high_threshold = pinfo->mdp_koff_thshold_high;
+
+	/* read the line count */
+	line_count = mdss_mdp_cmd_line_count(ctl);
+	if (sctl)
+		sline_count = mdss_mdp_cmd_line_count(sctl);
+
+	/* if line count is between the range, return to trigger transfer */
+	if (((line_count > low_threshold) && (line_count < high_threshold)) &&
+	     (!sctl || ((sline_count > low_threshold) &&
+			(sline_count < high_threshold))))
+		ret = false;
+
+	pr_debug("threshold:[%d, %d]\n", low_threshold, high_threshold);
+	pr_debug("line:%d sline:%d ret:%d\n", line_count, sline_count, ret);
+	MDSS_XLOG(line_count, sline_count, ret);
+
+	return ret;
+}
 
 static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
-	struct mdss_mdp_cmd_ctx *ctx)
+	struct mdss_mdp_ctl *sctl, struct mdss_mdp_cmd_ctx *ctx)
 {
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 	bool is_pp_split = is_pingpong_split(ctl->mfd);
+	struct mdss_panel_info *pinfo = NULL;
+
+	if (ctl->panel_data)
+		pinfo = &ctl->panel_data->panel_info;
 
 	MDSS_XLOG(ctx->autorefresh_state);
 
@@ -2666,12 +2840,71 @@
 		ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
 
 	} else {
+
+		/*
+		 * Some panels can require that mdp is within some range
+		 * of the scanlines in order to trigger the tansfer.
+		 * If that is the case, make  sure the panel scanline
+		 * is within the limit to start.
+		 * Acquire an spinlock for this operation to raise the
+		 * priority of this thread and make sure the context
+		 * is maintained, so we can have the less time possible
+		 * between the check of the scanline and the kickoff.
+		 */
+		if (pinfo && pinfo->mdp_koff_thshold) {
+			spin_lock(&ctx->ctlstart_lock);
+			if (wait_for_read_ptr_if_late(ctl, sctl, pinfo)) {
+				spin_unlock(&ctx->ctlstart_lock);
+				usleep_range(pinfo->mdp_koff_delay,
+						pinfo->mdp_koff_delay + 10);
+				spin_lock(&ctx->ctlstart_lock);
+			}
+		}
+
 		/* SW Kickoff */
 		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
 		MDSS_XLOG(0x11, ctx->autorefresh_state);
+
+		if (pinfo && pinfo->mdp_koff_thshold)
+			spin_unlock(&ctx->ctlstart_lock);
 	}
 }
 
+static int mdss_mdp_cmd_wait4_vsync(struct mdss_mdp_ctl *ctl)
+{
+	int rc = 0;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	if (!ctx) {
+		pr_err("invalid context to wait for vsync\n");
+		return -ENODEV;
+	}
+
+	atomic_inc(&ctx->rdptr_cnt);
+
+	/* enable clks and rd_ptr interrupt */
+	mdss_mdp_setup_vsync(ctx, true);
+
+	/* wait for read pointer */
+	MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+	pr_debug("%s: wait for vsync cnt:%d\n",
+		__func__, atomic_read(&ctx->rdptr_cnt));
+
+	rc = mdss_mdp_cmd_wait4readptr(ctx);
+
+	/* wait for 1ms to make sure we are out from trigger window */
+	usleep_range(1000, 1010);
+
+	/* disable rd_ptr interrupt */
+	mdss_mdp_setup_vsync(ctx, false);
+
+	MDSS_XLOG(ctl->num);
+	pr_debug("%s: out from wait for rd_ptr ctl:%d\n", __func__, ctl->num);
+
+	return rc;
+}
+
+
 /*
  * There are 3 partial update possibilities
  * left only ==> enable left pingpong_done
@@ -2800,7 +3033,7 @@
 	}
 
 	/* Kickoff */
-	__mdss_mdp_kickoff(ctl, ctx);
+	__mdss_mdp_kickoff(ctl, sctl, ctx);
 
 	mdss_mdp_cmd_post_programming(ctl);
 
@@ -3037,8 +3270,18 @@
 		 * mode.
 		 */
 		send_panel_events = true;
-		if (mdss_panel_is_power_on_ulp(panel_power_state))
+		if (mdss_panel_is_power_on_ulp(panel_power_state)) {
 			turn_off_clocks = true;
+		} else if (atomic_read(&ctx->koff_cnt)) {
+			/*
+			 * Transition from interactive to low power
+			 * Wait for kickoffs to finish
+			 */
+			MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+			mdss_mdp_cmd_wait4pingpong(ctl, NULL);
+			if (sctl)
+				mdss_mdp_cmd_wait4pingpong(sctl, NULL);
+		}
 	} else {
 		/* Transitions between low power and ultra low power */
 		if (mdss_panel_is_power_on_ulp(panel_power_state)) {
@@ -3067,6 +3310,8 @@
 				(void *)&ctx->intf_mdp_callback,
 				CTL_INTF_EVENT_FLAG_DEFAULT);
 
+			mdss_mdp_tearcheck_enable(ctl, true);
+
 			ctx->intf_stopped = 0;
 			if (sctx)
 				sctx->intf_stopped = 0;
@@ -3132,6 +3377,7 @@
 	ctl->ops.add_vsync_handler = NULL;
 	ctl->ops.remove_vsync_handler = NULL;
 	ctl->ops.reconfigure = NULL;
+	ctl->ops.wait_for_vsync_fnc = NULL;
 
 end:
 	if (!IS_ERR_VALUE((unsigned long)ret)) {
@@ -3234,6 +3480,7 @@
 	init_completion(&ctx->autorefresh_done);
 	spin_lock_init(&ctx->clk_lock);
 	spin_lock_init(&ctx->koff_lock);
+	spin_lock_init(&ctx->ctlstart_lock);
 	mutex_init(&ctx->clk_mtx);
 	mutex_init(&ctx->mdp_rdptr_lock);
 	mutex_init(&ctx->mdp_wrptr_lock);
@@ -3490,12 +3737,24 @@
 				}
 				ctl->switch_with_handoff = false;
 			}
+			/*
+			 * keep track of vsync, so it can be enabled as part
+			 * of the post switch sequence
+			 */
+			if (ctl->vsync_handler.enabled)
+				ctl->need_vsync_on = true;
 
 			mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
 			mdss_mdp_ctl_intf_event(ctl,
 				MDSS_EVENT_DSI_DYNAMIC_SWITCH,
 				(void *) mode, CTL_INTF_EVENT_FLAG_DEFAULT);
 		} else {
+			if (ctl->need_vsync_on &&
+					ctl->ops.add_vsync_handler) {
+				ctl->ops.add_vsync_handler(ctl,
+						&ctl->vsync_handler);
+				ctl->need_vsync_on = false;
+			}
 			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
 		}
 	}
@@ -3522,12 +3781,13 @@
 	ctl->ops.wait_pingpong = mdss_mdp_cmd_wait4pingpong;
 	ctl->ops.add_vsync_handler = mdss_mdp_cmd_add_vsync_handler;
 	ctl->ops.remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler;
-	ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count;
+	ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count_wrapper;
 	ctl->ops.restore_fnc = mdss_mdp_cmd_restore;
 	ctl->ops.early_wake_up_fnc = mdss_mdp_cmd_early_wake_up;
 	ctl->ops.reconfigure = mdss_mdp_cmd_reconfigure;
 	ctl->ops.pre_programming = mdss_mdp_cmd_pre_programming;
 	ctl->ops.update_lineptr = mdss_mdp_cmd_update_lineptr;
+	ctl->ops.wait_for_vsync_fnc = mdss_mdp_cmd_wait4_vsync;
 	pr_debug("%s:-\n", __func__);
 
 	return 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index 7b69aa3..dba3b0d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -327,11 +327,11 @@
 	}
 
 	/*
-	 * Currently, only intf_fifo_overflow is
+	 * Currently, intf_fifo_underflow is not
 	 * supported for recovery sequence for video
 	 * mode DSI interface
 	 */
-	if (event != MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW) {
+	if (event == MDP_INTF_DSI_CMD_FIFO_UNDERFLOW) {
 		pr_warn("%s: unsupported recovery event:%d\n",
 					__func__, event);
 		return -EPERM;
@@ -341,6 +341,11 @@
 	pr_debug("%s: ctl num = %d, event = %d\n",
 				__func__, ctl->num, event);
 
+	if (event == MDP_INTF_DSI_PANEL_DEAD) {
+		mdss_fb_report_panel_dead(ctx->ctl->mfd);
+		return 0;
+	}
+
 	pinfo = &ctl->panel_data->panel_info;
 	clk_rate = ((ctl->intf_type == MDSS_INTF_DSI) ?
 			pinfo->mipi.dsi_pclk_rate :
@@ -539,8 +544,10 @@
 			ctl_flush |= (BIT(31) >>
 					(sctx->intf_num - MDSS_MDP_INTF0));
 	}
+	MDSS_XLOG(ctl->intf_num, sctx?sctx->intf_num:0xf00, ctl_flush,
+				mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH));
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush);
-	MDSS_XLOG(ctl->intf_num, sctx?sctx->intf_num:0xf00, ctl_flush);
+
 }
 
 static inline void video_vsync_irq_enable(struct mdss_mdp_ctl *ctl, bool clear)
@@ -924,6 +931,13 @@
 {
 	int intfs_num, ret = 0;
 
+	if (ctl->cdm) {
+		if (!mdss_mdp_cdm_destroy(ctl->cdm))
+			mdss_mdp_ctl_write(ctl,
+				MDSS_MDP_REG_CTL_FLUSH, BIT(26));
+		ctl->cdm = NULL;
+	}
+
 	intfs_num = ctl->intf_num - MDSS_MDP_INTF0;
 	ret = mdss_mdp_video_intfs_stop(ctl, ctl->panel_data, intfs_num);
 	if (IS_ERR_VALUE((unsigned long)ret)) {
@@ -936,10 +950,6 @@
 	mdss_mdp_ctl_reset(ctl, false);
 	ctl->intf_ctx[MASTER_CTX] = NULL;
 
-	if (ctl->cdm) {
-		mdss_mdp_cdm_destroy(ctl->cdm);
-		ctl->cdm = NULL;
-	}
 	return 0;
 }
 
@@ -1252,7 +1262,7 @@
 			pr_err("error polling for vsync\n");
 			MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
 				"dsi1_ctrl", "dsi1_phy", "vbif", "dbg_bus",
-				"vbif_dbg_bus", "panic");
+				"vbif_dbg_bus", "dsi_dbg_bus", "panic");
 		}
 	} else {
 		rc = 0;
@@ -1551,13 +1561,43 @@
 	return 0;
 }
 
+static int mdss_mdp_video_splash_handoff(struct mdss_mdp_ctl *ctl)
+{
+	int i, ret = 0;
+	u32 data, flush;
+
+	ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_BEGIN,
+			NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+
+	if (ret) {
+		pr_err("%s:ctl%d failed to handle 'CONT_SPLASH_BEGIN' event\n"
+			, __func__, ctl->num);
+		return ret;
+	}
+
+	/* clear up mixer0 and mixer1 */
+	flush = 0;
+	for (i = 0; i < 2; i++) {
+		data = mdss_mdp_ctl_read(ctl,
+			MDSS_MDP_REG_CTL_LAYER(i));
+		if (data) {
+			mdss_mdp_ctl_write(ctl,
+				MDSS_MDP_REG_CTL_LAYER(i),
+				MDSS_MDP_LM_BORDER_COLOR);
+			flush |= (0x40 << i);
+		}
+	}
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush);
+
+	return ret;
+}
+
 int mdss_mdp_video_reconfigure_splash_done(struct mdss_mdp_ctl *ctl,
 	bool handoff)
 {
 	struct mdss_panel_data *pdata;
-	int i, ret = 0, off;
-	u32 data, flush;
-	struct mdss_mdp_video_ctx *ctx;
+	int ret = 0, off;
+	struct mdss_mdp_video_ctx *ctx, *sctx = NULL;
 	struct mdss_mdp_ctl *sctl;
 
 	if (!ctl) {
@@ -1581,41 +1621,43 @@
 	pdata->panel_info.cont_splash_enabled = 0;
 	sctl = mdss_mdp_get_split_ctl(ctl);
 
-	if (sctl)
+	if (sctl) {
 		sctl->panel_data->panel_info.cont_splash_enabled = 0;
-	else if (ctl->panel_data->next && is_pingpong_split(ctl->mfd))
+		sctx = (struct mdss_mdp_video_ctx *) sctl->intf_ctx[MASTER_CTX];
+	} else if (ctl->panel_data->next && is_pingpong_split(ctl->mfd)) {
 		ctl->panel_data->next->panel_info.cont_splash_enabled = 0;
+		sctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[SLAVE_CTX];
+	}
 
 	if (!handoff) {
-		ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_BEGIN,
-				      NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
-		if (ret) {
-			pr_err("%s: Failed to handle 'CONT_SPLASH_BEGIN' event\n"
-				, __func__);
-			return ret;
-		}
+		ret = mdss_mdp_video_splash_handoff(ctl);
 
-		/* clear up mixer0 and mixer1 */
-		flush = 0;
-		for (i = 0; i < 2; i++) {
-			data = mdss_mdp_ctl_read(ctl,
-				MDSS_MDP_REG_CTL_LAYER(i));
-			if (data) {
-				mdss_mdp_ctl_write(ctl,
-					MDSS_MDP_REG_CTL_LAYER(i),
-					MDSS_MDP_LM_BORDER_COLOR);
-				flush |= (0x40 << i);
-			}
-		}
-		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush);
+		if (!ret && sctl)
+			ret = mdss_mdp_video_splash_handoff(sctl);
+
+		if (ret)
+			return ret;
 
 		mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+
+		if (sctx)
+			mdp_video_write(sctx,
+				MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+
+		mdss_mdp_video_timegen_flush(ctl, sctx);
+
 		/* wait for 1 VSYNC for the pipe to be unstaged */
 		msleep(20);
 
 		ret = mdss_mdp_ctl_intf_event(ctl,
 			MDSS_EVENT_CONT_SPLASH_FINISH, NULL,
 			CTL_INTF_EVENT_FLAG_DEFAULT);
+
+		if (!ret && sctl)
+			ret = mdss_mdp_ctl_intf_event(sctl,
+				MDSS_EVENT_CONT_SPLASH_FINISH, NULL,
+				CTL_INTF_EVENT_FLAG_DEFAULT);
+
 	}
 
 	return ret;
@@ -1668,7 +1710,7 @@
 static void mdss_mdp_fetch_start_config(struct mdss_mdp_video_ctx *ctx,
 		struct mdss_mdp_ctl *ctl)
 {
-	int fetch_start, fetch_enable, v_total, h_total;
+	int fetch_start = 0, fetch_enable, v_total, h_total;
 	struct mdss_data_type *mdata;
 	struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
 
@@ -1677,6 +1719,15 @@
 	pinfo->prg_fet = mdss_mdp_get_prefetch_lines(pinfo);
 	if (!pinfo->prg_fet) {
 		pr_debug("programmable fetch is not needed/supported\n");
+
+		fetch_enable = mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG);
+		fetch_enable &= ~BIT(31);
+
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_CONFIG, fetch_enable);
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_PROG_FETCH_START,
+						fetch_start);
+
+		MDSS_XLOG(ctx->intf_num, fetch_enable, fetch_start);
 		return;
 	}
 
@@ -2181,6 +2232,7 @@
 	ctl->ops.config_fps_fnc = mdss_mdp_video_config_fps;
 	ctl->ops.early_wake_up_fnc = mdss_mdp_video_early_wake_up;
 	ctl->ops.update_lineptr = mdss_mdp_video_lineptr_ctrl;
+	ctl->ops.wait_for_vsync_fnc = NULL;
 
 	return 0;
 }
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index 46f25dd..3d63890 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -518,7 +518,9 @@
 	}
 
 	if (ctl->cdm) {
-		mdss_mdp_cdm_destroy(ctl->cdm);
+		if (!mdss_mdp_cdm_destroy(ctl->cdm))
+			mdss_mdp_ctl_write(ctl,
+				MDSS_MDP_REG_CTL_FLUSH, BIT(26));
 		ctl->cdm = NULL;
 	}
 	return 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index d898e7e..a7692f0 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -103,14 +103,15 @@
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 
 	if ((layer->z_order != HW_CURSOR_STAGE(mdata))
+			|| layer->flags & MDP_LAYER_FLIP_LR
 			|| layer->src_rect.w > mdata->max_cursor_size
 			|| layer->src_rect.h > mdata->max_cursor_size
 			|| layer->src_rect.w != layer->dst_rect.w
 			|| layer->src_rect.h != layer->dst_rect.h
 			|| !mdata->ncursor_pipes) {
-		pr_err("Incorrect cursor configs for pipe:%d, cursor_pipes:%d, z_order:%d\n",
+		pr_err("Incorrect cursor configs for pipe:0x%x, ncursor_pipes:%d, z_order:%d, flags:0x%x\n",
 				layer->pipe_ndx, mdata->ncursor_pipes,
-				layer->z_order);
+				layer->z_order, layer->flags);
 		pr_err("src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
 				layer->src_rect.x, layer->src_rect.y,
 				layer->src_rect.w, layer->src_rect.h,
@@ -344,12 +345,18 @@
 	 */
 	if (pipe->csc_coeff_set != layer->color_space) {
 		src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
-		if (pipe->src_fmt->is_yuv && src_fmt && src_fmt->is_yuv) {
-			status = -EPERM;
-			pr_err("csc change is not permitted on used pipe\n");
+		if (!src_fmt) {
+			pr_err("Invalid layer format %d\n",
+						layer->buffer.format);
+			status = -EINVAL;
+		} else {
+			if (pipe->src_fmt->is_yuv && src_fmt &&
+							src_fmt->is_yuv) {
+				status = -EPERM;
+				pr_err("csc change is not permitted on used pipe\n");
+			}
 		}
 	}
-
 	return status;
 }
 
@@ -484,9 +491,6 @@
 
 	pipe->comp_ratio = layer->buffer.comp_ratio;
 
-	if (mfd->panel_orientation)
-		layer->flags ^= mfd->panel_orientation;
-
 	pipe->mixer_left = mixer;
 	pipe->mfd = mfd;
 	pipe->play_cnt = 0;
@@ -561,6 +565,15 @@
 		goto end;
 	}
 
+	/* scaling is not allowed for solid_fill layers */
+	if ((pipe->flags & MDP_SOLID_FILL) &&
+		((pipe->src.w != pipe->dst.w) ||
+			(pipe->src.h != pipe->dst.h))) {
+		pr_err("solid fill pipe:%d cannot have scaling\n", pipe->num);
+		ret = -EINVAL;
+		goto end;
+	}
+
 	/*
 	 * unstage the pipe if it's current z_order does not match with new
 	 * z_order because client may only call the validate.
@@ -625,13 +638,6 @@
 	pipe->multirect.mode = vinfo->multirect.mode;
 	pipe->mixer_stage = layer->z_order;
 
-	if (mfd->panel_orientation & MDP_FLIP_LR)
-		pipe->dst.x = pipe->mixer_left->width - pipe->dst.x -
-			pipe->dst.w;
-	if (mfd->panel_orientation & MDP_FLIP_UD)
-		pipe->dst.y = pipe->mixer_left->height - pipe->dst.y -
-			pipe->dst.h;
-
 	memcpy(&pipe->layer, layer, sizeof(struct mdp_input_layer));
 
 	mdss_mdp_overlay_set_chroma_sample(pipe);
@@ -1175,6 +1181,11 @@
 	pr_debug("pipe count:: secure display:%d non-secure:%d\n",
 		sd_pipes, nonsd_pipes);
 
+	if (mdss_get_sd_client_cnt() && !mdp5_data->sd_enabled) {
+		pr_err("Secure session already enabled for other client\n");
+		return -EINVAL;
+	}
+
 	mdp5_data->sd_transition_state = SD_TRANSITION_NONE;
 	if (!__is_sd_state_valid(sd_pipes, nonsd_pipes, panel_type,
 		mdp5_data->sd_enabled)) {
@@ -1188,7 +1199,14 @@
 	} else if (mdp5_data->sd_enabled && !sd_pipes) {
 		mdp5_data->sd_transition_state =
 			SD_TRANSITION_SECURE_TO_NON_SECURE;
+	} else if (mdp5_data->ctl->is_video_mode &&
+		((sd_pipes && !mdp5_data->sd_enabled) ||
+		(!sd_pipes && mdp5_data->sd_enabled)) &&
+		!mdp5_data->cache_null_commit) {
+		pr_err("NULL commit missing before display secure session entry/exit\n");
+		ret = -EINVAL;
 	}
+
 	return ret;
 }
 
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 80708aa..af6e4ce 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -2904,6 +2904,7 @@
 	if (ctl->ops.wait_pingpong && mdp5_data->mdata->serialize_wait4pp)
 		mdss_mdp_display_wait4pingpong(ctl, true);
 
+	mdp5_data->cache_null_commit = list_empty(&mdp5_data->pipes_used);
 	sd_transition_state = mdp5_data->sd_transition_state;
 	if (sd_transition_state != SD_TRANSITION_NONE) {
 		ret = __config_secure_display(mdp5_data);
@@ -2936,6 +2937,7 @@
 	ATRACE_BEGIN("sspp_programming");
 	ret = __overlay_queue_pipes(mfd);
 	ATRACE_END("sspp_programming");
+
 	mutex_unlock(&mdp5_data->list_lock);
 
 	mdp5_data->kickoff_released = false;
@@ -3864,7 +3866,8 @@
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct dynamic_fps_data data = {0};
 
-	if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
+	if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl) ||
+			mdss_panel_is_power_off(mfd->panel_power_state)) {
 		pr_debug("panel is off\n");
 		return count;
 	}
@@ -6603,6 +6606,7 @@
 	mdp5_interface->configure_panel = mdss_mdp_update_panel_info;
 	mdp5_interface->input_event_handler = mdss_mdp_input_event_handler;
 	mdp5_interface->signal_retire_fence = mdss_mdp_signal_retire_fence;
+	mdp5_interface->is_twm_en = NULL;
 
 	if (mfd->panel_info->type == WRITEBACK_PANEL) {
 		mdp5_interface->atomic_validate =
@@ -6648,10 +6652,6 @@
 
 	mfd->panel_orientation = mfd->panel_info->panel_orientation;
 
-	if ((mfd->panel_info->panel_orientation & MDP_FLIP_LR) &&
-	    (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY))
-		mdp5_data->mixer_swap = true;
-
 	rc = sysfs_create_group(&dev->kobj, &mdp_overlay_sysfs_group);
 	if (rc) {
 		pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index 3a8df20..4231af4 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -934,17 +934,18 @@
 		data = readl_relaxed(mdata->mdp_base +
 			MDSS_MDP_REG_SMP_ALLOC_W0 + off);
 		client_id = (data >> s) & 0xFF;
-		if (test_bit(i, mdata->mmb_alloc_map)) {
-			/*
-			 * Certain pipes may have a dedicated set of
-			 * SMP MMBs statically allocated to them. In
-			 * such cases, we do not need to do anything
-			 * here.
-			 */
-			pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)\n"
-				, i, pipe ? pipe->num : -1, client_id);
-			continue;
-		}
+		if (i < ARRAY_SIZE(mdata->mmb_alloc_map))
+			if (test_bit(i, mdata->mmb_alloc_map)) {
+				/*
+				 * Certain pipes may have a dedicated set of
+				 * SMP MMBs statically allocated to them. In
+				 * such cases, we do not need to do anything
+				 * here.
+				 */
+				pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)\n"
+					, i, pipe ? pipe->num : -1, client_id);
+				continue;
+			}
 
 		if (client_id) {
 			if (client_id != prev_id) {
@@ -1011,8 +1012,10 @@
 	u32 mask, reg_val, reg_val_lvl, i, vbif_qos;
 	u32 reg_high;
 	bool is_nrt_vbif = mdss_mdp_is_nrt_vbif_client(mdata, pipe);
+	u32 *vbif_qos_ptr = is_realtime ? mdata->vbif_rt_qos :
+		mdata->vbif_nrt_qos;
 
-	if (mdata->npriority_lvl == 0)
+	if ((mdata->npriority_lvl == 0) || !vbif_qos_ptr)
 		return;
 
 	if (test_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map)) {
@@ -1028,8 +1031,7 @@
 				is_nrt_vbif);
 
 			mask = 0x3 << (pipe->xin_id * 4);
-			vbif_qos = is_realtime ?
-				mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
+			vbif_qos = vbif_qos_ptr[i];
 
 			reg_val &= ~(mask);
 			reg_val |= vbif_qos << (pipe->xin_id * 4);
@@ -1053,8 +1055,7 @@
 
 			mask = 0x3 << (pipe->xin_id * 2);
 			reg_val &= ~(mask);
-			vbif_qos = is_realtime ?
-				mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
+			vbif_qos = vbif_qos_ptr[i];
 			reg_val |= vbif_qos << (pipe->xin_id * 2);
 			MDSS_VBIF_WRITE(mdata, MDSS_VBIF_QOS_REMAP_BASE + i*4,
 				reg_val, is_nrt_vbif);
@@ -2072,8 +2073,9 @@
 	u32 cdp_settings = 0x0;
 	bool is_rotator = (pipe->mixer_left && pipe->mixer_left->rotator_mode);
 
-	/* Disable CDP for rotator pipe in v1 */
-	if (is_rotator && mdss_has_quirk(mdata, MDSS_QUIRK_ROTCDP))
+	/* Disable CDP for rotator pipe or if not requested for the target */
+	if (!mdata->enable_cdp || (is_rotator &&
+			mdss_has_quirk(mdata, MDSS_QUIRK_ROTCDP)))
 		goto exit;
 
 	cdp_settings = MDSS_MDP_CDP_ENABLE;
@@ -2290,6 +2292,9 @@
 		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC3_ADDR, addr[2]);
 	}
 
+	MDSS_XLOG(pipe->num, pipe->multirect.num, pipe->mixer_left->num,
+		pipe->play_cnt, addr[0], addr[1], addr[2], addr[3]);
+
 	return 0;
 }
 
@@ -2311,7 +2316,7 @@
 
 	/* support ARGB color format only */
 	unpack = (C3_ALPHA << 24) | (C2_R_Cr << 16) |
-		(C1_B_Cb << 8) | (C0_G_Y << 0);
+		(C0_G_Y << 8) | (C1_B_Cb << 0);
 	if (pipe->scaler.enable)
 		opmode |= (1 << 31);
 
@@ -2677,9 +2682,6 @@
 		goto update_nobuf;
 	}
 
-	MDSS_XLOG(pipe->num, pipe->multirect.num, pipe->mixer_left->num,
-						pipe->play_cnt, 0x222);
-
 	if (params_changed) {
 		pipe->params_changed = 0;
 
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 13afa46..8f4d437 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -2445,7 +2445,7 @@
 	}
 
 	if (flags & PP_FLAGS_DIRTY_DITHER) {
-		if (!pp_ops[DITHER].pp_set_config) {
+		if (!pp_ops[DITHER].pp_set_config && addr) {
 			pp_dither_config(addr, pp_sts,
 				&mdss_pp_res->dither_disp_cfg[disp_num]);
 		} else {
@@ -5156,7 +5156,7 @@
 				u32 block)
 {
 	int ret = 0;
-	u32 sum;
+	int sum = 0;
 	char __iomem *v_base = NULL;
 	unsigned long flag;
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -5186,7 +5186,8 @@
 		else if (block == SSPP_VIG)
 			v_base = ctl_base +
 				MDSS_MDP_REG_VIG_HIST_CTL_BASE;
-		sum = pp_hist_read(v_base, hist_info);
+		if (v_base)
+			sum = pp_hist_read(v_base, hist_info);
 	}
 	writel_relaxed(0, hist_info->base);
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
index 44817c3..cf19501 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
@@ -149,7 +149,7 @@
 {
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
-	int rc, ret;
+	int ret;
 
 	/*
 	 * iommu dynamic attach for following conditions.
@@ -166,26 +166,41 @@
 		return -EPERM;
 	}
 
-	rc = mdss_smmu_map(MDSS_IOMMU_DOMAIN_UNSECURE,
+	/*
+	 * Putting handoff pending to false to ensure smmu attach happens
+	 * with early flag attribute
+	 */
+	mdata->handoff_pending = false;
+
+	ret = mdss_smmu_set_attribute(MDSS_IOMMU_DOMAIN_UNSECURE, EARLY_MAP, 1);
+	if (ret) {
+		pr_debug("mdss set attribute failed for early map\n");
+		goto end;
+	}
+
+	ret = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE((unsigned long)ret)) {
+		pr_err("mdss iommu attach failed\n");
+		goto end;
+	}
+
+	ret = mdss_smmu_map(MDSS_IOMMU_DOMAIN_UNSECURE,
 				mdp5_data->splash_mem_addr,
 				mdp5_data->splash_mem_addr,
 				mdp5_data->splash_mem_size,
 				IOMMU_READ | IOMMU_NOEXEC);
-	if (rc) {
-		pr_debug("iommu memory mapping failed rc=%d\n", rc);
+	if (ret) {
+		pr_err("iommu memory mapping failed ret=%d\n", ret);
 	} else {
-		ret = mdss_iommu_ctrl(1);
-		if (IS_ERR_VALUE((unsigned long)ret)) {
-			pr_err("mdss iommu attach failed\n");
-			mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
-					mdp5_data->splash_mem_addr,
-					mdp5_data->splash_mem_size);
-		} else {
-			mfd->splash_info.iommu_dynamic_attached = true;
-		}
+		pr_debug("iommu map passed for PA=VA\n");
+		mfd->splash_info.iommu_dynamic_attached = true;
 	}
 
-	return rc;
+	ret = mdss_smmu_set_attribute(MDSS_IOMMU_DOMAIN_UNSECURE, EARLY_MAP, 0);
+end:
+	mdata->handoff_pending = true;
+
+	return ret;
 }
 
 static void mdss_mdp_splash_unmap_splash_mem(struct msm_fb_data_type *mfd)
@@ -193,12 +208,10 @@
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 
 	if (mfd->splash_info.iommu_dynamic_attached) {
-
 		mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
 				mdp5_data->splash_mem_addr,
 				mdp5_data->splash_mem_size);
 		mdss_iommu_ctrl(0);
-
 		mfd->splash_info.iommu_dynamic_attached = false;
 	}
 }
diff --git a/drivers/video/fbdev/msm/mdss_mdp_trace.h b/drivers/video/fbdev/msm/mdss_mdp_trace.h
index f8a6baf..35a126b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_trace.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_trace.h
@@ -295,6 +295,42 @@
 			__entry->ib_quota)
 );
 
+TRACE_EVENT(mdp_compare_bw,
+	TP_PROTO(unsigned long long new_ab, unsigned long long new_ib,
+		unsigned long long new_wb,
+		unsigned long long old_ab, unsigned long long old_ib,
+		unsigned long long old_wb,
+		u32 params_changed, bool update_bw),
+	TP_ARGS(new_ab, new_ib, new_wb,
+					old_ab, old_ib, old_wb,
+					params_changed, update_bw),
+	TP_STRUCT__entry(
+			__field(u64, new_ab)
+			__field(u64, new_ib)
+			__field(u64, new_wb)
+			__field(u64, old_ab)
+			__field(u64, old_ib)
+			__field(u64, old_wb)
+			__field(u32, params_changed)
+			__field(bool, update_bw)
+	),
+	TP_fast_assign(
+			__entry->new_ab = new_ab;
+			__entry->new_ib = new_ib;
+			__entry->new_wb = new_wb;
+			__entry->old_ab = old_ab;
+			__entry->old_ib = old_ib;
+			__entry->old_wb = old_wb;
+			__entry->params_changed = params_changed;
+			__entry->update_bw = update_bw;
+	),
+	TP_printk("[ab,ib,wb]new[%llu,%llu,%llu]old[%llu,%llu,%llu]chgd:%d %d",
+		__entry->new_ab, __entry->new_ib, __entry->new_wb,
+		__entry->old_ab, __entry->old_ib,
+		__entry->old_wb, __entry->params_changed,
+		__entry->update_bw)
+);
+
 TRACE_EVENT(mdp_misr_crc,
 	TP_PROTO(u32 block_id, u32 vsync_cnt, u32 crc),
 	TP_ARGS(block_id, vsync_cnt, crc),
@@ -332,6 +368,22 @@
 			__entry->koff_cnt)
 );
 
+TRACE_EVENT(mdp_cmd_readptr_done,
+	TP_PROTO(u32 ctl_num, int koff_cnt),
+	TP_ARGS(ctl_num, koff_cnt),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+			__field(int, koff_cnt)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl_num;
+			__entry->koff_cnt = koff_cnt;
+	),
+	TP_printk("ctl num:%d kickoff:%d",
+			__entry->ctl_num,
+			__entry->koff_cnt)
+);
+
 TRACE_EVENT(mdp_cmd_release_bw,
 	TP_PROTO(u32 ctl_num),
 	TP_ARGS(ctl_num),
@@ -376,7 +428,7 @@
 			__entry->kickoff_cnt)
 );
 
-TRACE_EVENT(mdss_mark_write,
+TRACE_EVENT(tracing_mark_write,
 	TP_PROTO(int pid, const char *name, bool trace_begin),
 	TP_ARGS(pid, name, trace_begin),
 	TP_STRUCT__entry(
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
index c030109..dd73d2c 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_util.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -969,7 +969,9 @@
 		 * be filled due to map call which will be unmapped above.
 		 *
 		 */
-		pr_debug("skip memory unmapping for secure display content\n");
+		if (data->ihandle)
+			ion_free(iclient, data->ihandle);
+		pr_debug("free memory handle for secure display/camera content\n");
 	} else {
 		return -ENOMEM;
 	}
@@ -1048,19 +1050,18 @@
 			ret = 0;
 			goto done;
 		} else {
-			struct ion_handle *ihandle = NULL;
 			struct sg_table *sg_ptr = NULL;
 
+			data->ihandle = ion_import_dma_buf_fd(iclient,
+					img->memory_id);
+			if (IS_ERR_OR_NULL(data->ihandle)) {
+				ret = -EINVAL;
+				pr_err("ion import buffer failed\n");
+				data->ihandle = NULL;
+				goto done;
+			}
 			do {
-				ihandle = ion_import_dma_buf_fd(iclient,
-							     img->memory_id);
-				if (IS_ERR_OR_NULL(ihandle)) {
-					ret = -EINVAL;
-					pr_err("ion import buffer failed\n");
-					break;
-				}
-
-				sg_ptr = ion_sg_table(iclient, ihandle);
+				sg_ptr = ion_sg_table(iclient, data->ihandle);
 				if (sg_ptr == NULL) {
 					pr_err("ion sg table get failed\n");
 					ret = -EINVAL;
@@ -1086,8 +1087,6 @@
 				ret = 0;
 			} while (0);
 
-			if (!IS_ERR_OR_NULL(ihandle))
-				ion_free(iclient, ihandle);
 			return ret;
 		}
 	}
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index a3f9349..c66d5ab 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -60,6 +60,7 @@
 #define WRITEBACK_PANEL		10	/* Wifi display */
 #define LVDS_PANEL		11	/* LVDS */
 #define EDP_PANEL		12	/* LVDS */
+#define SPI_PANEL		13
 
 #define DSC_PPS_LEN		128
 
@@ -108,6 +109,7 @@
 	MDSS_PANEL_INTF_DSI,
 	MDSS_PANEL_INTF_EDP,
 	MDSS_PANEL_INTF_HDMI,
+	MDSS_PANEL_INTF_SPI,
 };
 
 enum {
@@ -118,6 +120,12 @@
 };
 
 enum {
+	MDSS_PANEL_BLANK_BLANK = 0,
+	MDSS_PANEL_BLANK_UNBLANK,
+	MDSS_PANEL_BLANK_LOW_POWER,
+};
+
+enum {
 	MDSS_PANEL_LOW_PERSIST_MODE_OFF = 0,
 	MDSS_PANEL_LOW_PERSIST_MODE_ON,
 };
@@ -167,6 +175,7 @@
 
 #define MDP_INTF_DSI_CMD_FIFO_UNDERFLOW		0x0001
 #define MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW	0x0002
+#define MDP_INTF_DSI_PANEL_DEAD			0x0003
 
 
 enum {
@@ -429,6 +438,10 @@
 	char frame_rate;	/* fps */
 };
 
+struct spi_panel_info {
+	char frame_rate;
+};
+
 /**
  * struct dynamic_fps_data - defines dynamic fps related data
  * @hfp: horizontal front porch
@@ -659,6 +672,10 @@
 	u32 saved_fporch;
 	/* current fps, once is programmed in hw */
 	int current_fps;
+	u32 mdp_koff_thshold_low;
+	u32 mdp_koff_thshold_high;
+	bool mdp_koff_thshold;
+	u32 mdp_koff_delay;
 
 	int panel_max_fps;
 	int panel_max_vtotal;
@@ -676,6 +693,7 @@
 	u32 partial_update_roi_merge;
 	struct ion_handle *splash_ihdl;
 	int panel_power_state;
+	int blank_state;
 	int compression_mode;
 
 	uint32_t panel_dead;
@@ -735,6 +753,7 @@
 	struct lcd_panel_info lcdc;
 	struct fbc_panel_info fbc;
 	struct mipi_panel_info mipi;
+	struct spi_panel_info spi;
 	struct lvds_panel_info lvds;
 	struct edp_panel_info edp;
 
@@ -821,6 +840,7 @@
 	struct mdss_panel_data *next;
 
 	int panel_te_gpio;
+	int panel_en_gpio;
 	struct completion te_done;
 };
 
@@ -873,6 +893,9 @@
 			frame_rate = panel_info->lcdc.frame_rate;
 			break;
 		}
+	case SPI_PANEL:
+		frame_rate = panel_info->spi.frame_rate;
+		break;
 	default:
 		pixel_total = (panel_info->lcdc.h_back_porch +
 			  panel_info->lcdc.h_front_porch +
diff --git a/drivers/video/fbdev/msm/mdss_qpic.c b/drivers/video/fbdev/msm/mdss_qpic.c
index aa8d783..2f170cc 100644
--- a/drivers/video/fbdev/msm/mdss_qpic.c
+++ b/drivers/video/fbdev/msm/mdss_qpic.c
@@ -231,6 +231,7 @@
 	qpic_interface->dma_fnc = mdss_qpic_pan_display;
 	qpic_interface->ioctl_handler = NULL;
 	qpic_interface->kickoff_fnc = NULL;
+	qpic_interface->is_twm_en = NULL;
 	return 0;
 }
 
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index 2d8b5d5..c213f70 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -180,6 +180,27 @@
 	return rc;
 }
 
+int mdss_smmu_set_attribute(int domain, int flag, int val)
+{
+	int rc = 0, domain_attr = 0;
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return -EINVAL;
+	}
+
+	if (flag == EARLY_MAP)
+		domain_attr = DOMAIN_ATTR_EARLY_MAP;
+	else
+		goto end;
+
+	rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain,
+			domain_attr, &val);
+end:
+	return rc;
+}
+
 /*
  * mdss_smmu_v2_attach()
  *
@@ -474,24 +495,30 @@
 		(struct mdss_smmu_client *)user_data;
 	u32 fsynr1, mid, i;
 
-	if (!mdss_smmu || !mdss_smmu->mmu_base)
+	if (!mdss_smmu)
 		goto end;
 
-	fsynr1 = readl_relaxed(mdss_smmu->mmu_base + SMMU_CBN_FSYNR1);
-	mid = fsynr1 & 0xff;
-	pr_err("mdss_smmu: iova:0x%lx flags:0x%x fsynr1: 0x%x mid: 0x%x\n",
-		iova, flags, fsynr1, mid);
+	if (mdss_smmu->mmu_base) {
+		fsynr1 = readl_relaxed(mdss_smmu->mmu_base + SMMU_CBN_FSYNR1);
+		mid = fsynr1 & 0xff;
+		pr_err("mdss_smmu: iova:0x%lx flags:0x%x fsynr1: 0x%x mid: 0x%x\n",
+			iova, flags, fsynr1, mid);
 
-	/* get domain id information */
-	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
-		if (mdss_smmu == mdss_smmu_get_cb(i))
-			break;
+		/* get domain id information */
+		for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+			if (mdss_smmu == mdss_smmu_get_cb(i))
+				break;
+		}
+
+		if (i == MDSS_IOMMU_MAX_DOMAIN)
+			goto end;
+
+		mdss_mdp_debug_mid(mid);
+	} else {
+		pr_err("mdss_smmu: iova:0x%lx flags:0x%x\n",
+			iova, flags);
+		MDSS_XLOG_TOUT_HANDLER("mdp");
 	}
-
-	if (i == MDSS_IOMMU_MAX_DOMAIN)
-		goto end;
-
-	mdss_mdp_debug_mid(mid);
 end:
 	return -ENODEV;
 }
@@ -693,13 +720,13 @@
 }
 
 static struct mdss_smmu_domain mdss_mdp_unsec = {
-	"mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_1M, (SZ_4G - SZ_1M)};
+	"mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_128M, (SZ_4G - SZ_128M)};
 static struct mdss_smmu_domain mdss_rot_unsec = {
-	NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_1M, (SZ_4G - SZ_1M)};
+	NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128M, (SZ_4G - SZ_128M)};
 static struct mdss_smmu_domain mdss_mdp_sec = {
-	"mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_1M, (SZ_4G - SZ_1M)};
+	"mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_128M, (SZ_4G - SZ_128M)};
 static struct mdss_smmu_domain mdss_rot_sec = {
-	NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_1M, (SZ_4G - SZ_1M)};
+	NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_128M, (SZ_4G - SZ_128M)};
 
 static const struct of_device_id mdss_smmu_dt_match[] = {
 	{ .compatible = "qcom,smmu_mdp_unsec", .data = &mdss_mdp_unsec},
@@ -840,14 +867,13 @@
 
 	mdss_smmu->dev = dev;
 
+	iommu_set_fault_handler(mdss_smmu->mmu_mapping->domain,
+			mdss_smmu_fault_handler, mdss_smmu);
 	address = of_get_address_by_name(pdev->dev.of_node, "mmu_cb", 0, 0);
 	if (address) {
 		size = address + 1;
 		mdss_smmu->mmu_base = ioremap(be32_to_cpu(*address),
 			be32_to_cpu(*size));
-		if (mdss_smmu->mmu_base)
-			iommu_set_fault_handler(mdss_smmu->mmu_mapping->domain,
-				mdss_smmu_fault_handler, mdss_smmu);
 	} else {
 		pr_debug("unable to map context bank base\n");
 	}
diff --git a/drivers/video/fbdev/msm/mdss_smmu.h b/drivers/video/fbdev/msm/mdss_smmu.h
index 091af3b..97f0933 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.h
+++ b/drivers/video/fbdev/msm/mdss_smmu.h
@@ -42,6 +42,11 @@
 
 void mdss_smmu_register(struct device *dev);
 int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev);
+int mdss_smmu_set_attribute(int domain, int flag, int val);
+
+enum smmu_attributes {
+	EARLY_MAP
+};
 
 static inline int mdss_smmu_dma_data_direction(int dir)
 {
diff --git a/drivers/video/fbdev/msm/mdss_spi_client.c b/drivers/video/fbdev/msm/mdss_spi_client.c
new file mode 100644
index 0000000..541e382
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_spi_client.c
@@ -0,0 +1,198 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
+
+#include "mdss_spi_client.h"
+
+#define MAX_READ_SPEED_HZ	9600000
+#define SPI_PANEL_COMMAND_LEN	1
+static struct spi_device *mdss_spi_client;
+
+int mdss_spi_read_data(u8 reg_addr, u8 *data, u8 len)
+{
+	int rc = 0;
+	u32 max_speed_hz;
+	u8 memory_write_reg = 0x2c;
+	u8 empty_pack[] = {0x29, 0x29, 0x29};
+	struct spi_transfer t[4] = {
+		[0] = {
+			.tx_buf = &reg_addr,
+			.len = 1,
+		},
+		[1] = {
+			.rx_buf = data,
+			.len = len,
+		},
+		[2] = {
+			.tx_buf = &empty_pack,
+			.len = 3,
+		},
+		[3] = {
+			.tx_buf = &memory_write_reg,
+			.len = 1,
+		}
+	};
+	struct spi_message m;
+
+	if (!mdss_spi_client) {
+		pr_err("%s: spi client not available\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_spi_client->bits_per_word = 8;
+	max_speed_hz = mdss_spi_client->max_speed_hz;
+	mdss_spi_client->max_speed_hz = MAX_READ_SPEED_HZ;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t[0], &m);
+	spi_message_add_tail(&t[1], &m);
+	rc = spi_sync(mdss_spi_client, &m);
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t[2], &m);
+	rc = spi_sync(mdss_spi_client, &m);
+	spi_message_init(&m);
+	spi_message_add_tail(&t[3], &m);
+	rc = spi_sync(mdss_spi_client, &m);
+	mdss_spi_client->max_speed_hz = max_speed_hz;
+
+	return rc;
+}
+
+int mdss_spi_tx_command(const void *buf)
+{
+	int rc = 0;
+	struct spi_transfer t = {
+		.tx_buf = buf,
+		.len    = SPI_PANEL_COMMAND_LEN,
+	};
+	struct spi_message m;
+
+	if (!mdss_spi_client) {
+		pr_err("%s: spi client not available\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_spi_client->bits_per_word = 8;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	rc = spi_sync(mdss_spi_client, &m);
+
+	return rc;
+}
+
+int mdss_spi_tx_parameter(const void *buf, size_t len)
+{
+	int rc = 0;
+	struct spi_transfer t = {
+		.tx_buf = buf,
+		.len    = len,
+	};
+	struct spi_message m;
+
+	if (!mdss_spi_client) {
+		pr_err("%s: spi client not available\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_spi_client->bits_per_word = 8;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	rc = spi_sync(mdss_spi_client, &m);
+
+	return rc;
+}
+
+int mdss_spi_tx_pixel(const void *buf, size_t len)
+{
+	int rc = 0;
+	struct spi_transfer t = {
+		.tx_buf = buf,
+		.len    = len,
+		};
+	struct spi_message m;
+
+	if (!mdss_spi_client) {
+		pr_err("%s: spi client not available\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_spi_client->bits_per_word = 16;
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	rc = spi_sync(mdss_spi_client, &m);
+
+	return rc;
+}
+
+static int mdss_spi_client_probe(struct spi_device *spidev)
+{
+	int irq;
+	int cs;
+	int cpha, cpol, cs_high;
+	u32 max_speed;
+	struct device_node *np;
+
+	irq = spidev->irq;
+	cs = spidev->chip_select;
+	cpha = (spidev->mode & SPI_CPHA) ? 1:0;
+	cpol = (spidev->mode & SPI_CPOL) ? 1:0;
+	cs_high = (spidev->mode & SPI_CS_HIGH) ? 1:0;
+	max_speed = spidev->max_speed_hz;
+	np = spidev->dev.of_node;
+	pr_debug("cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x] Max_speed[%d]\n",
+		cs, cpha, cpol, cs_high, max_speed);
+	mdss_spi_client = spidev;
+
+	return 0;
+}
+
+
+static const struct of_device_id mdss_spi_dt_match[] = {
+	{ .compatible = "qcom,mdss-spi-client" },
+	{},
+};
+
+static struct spi_driver mdss_spi_client_driver = {
+	.probe = mdss_spi_client_probe,
+	.driver = {
+		.name = "mdss-spi-client",
+		.owner  = THIS_MODULE,
+		.of_match_table = mdss_spi_dt_match,
+	},
+};
+
+static int __init mdss_spi_init(void)
+{
+	int ret;
+
+	ret = spi_register_driver(&mdss_spi_client_driver);
+
+	return 0;
+}
+module_init(mdss_spi_init);
+
+static void __exit mdss_spi_exit(void)
+{
+	spi_unregister_driver(&mdss_spi_client_driver);
+}
+module_exit(mdss_spi_exit);
+
diff --git a/drivers/video/fbdev/msm/mdss_spi_client.h b/drivers/video/fbdev/msm/mdss_spi_client.h
new file mode 100644
index 0000000..2d15625
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_spi_client.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_SPI_CLINET_H__
+#define __MDSS_SPI_CLINET_H__
+
+int mdss_spi_tx_command(const void *buf);
+int mdss_spi_tx_parameter(const void *buf, size_t len);
+int mdss_spi_tx_pixel(const void *buf, size_t len);
+int mdss_spi_read_data(u8 reg_addr, u8 *data, u8 len);
+#endif /* End of __MDSS_SPI_CLINET_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_spi_panel.c b/drivers/video/fbdev/msm/mdss_spi_panel.c
new file mode 100644
index 0000000..86a1c1c
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_spi_panel.c
@@ -0,0 +1,1713 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/leds.h>
+#include <linux/qpnp/pwm.h>
+#include <linux/of_device.h>
+
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_spi_panel.h"
+#include "mdss_spi_client.h"
+#include "mdp3.h"
+
+DEFINE_LED_TRIGGER(bl_led_trigger);
+static int mdss_spi_panel_reset(struct mdss_panel_data *pdata, int enable)
+{
+	struct spi_panel_data *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo = NULL;
+	int i, rc = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
+		pr_debug("%s:%d, reset line not configured\n",
+			   __func__, __LINE__);
+		return rc;
+	}
+
+	if (!gpio_is_valid(ctrl_pdata->disp_dc_gpio)) {
+		pr_debug("%s:%d, dc line not configured\n",
+			   __func__, __LINE__);
+		return rc;
+	}
+
+	pr_debug("%s: enable = %d\n", __func__, enable);
+	pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+	if (enable) {
+		rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
+		if (rc) {
+			pr_err("display reset gpio request failed\n");
+			return rc;
+		}
+
+		rc = gpio_request(ctrl_pdata->disp_dc_gpio, "disp_dc");
+		if (rc) {
+			pr_err("display dc gpio request failed\n");
+			return rc;
+		}
+
+		if (!pinfo->cont_splash_enabled) {
+			for (i = 0; i < pdata->panel_info.rst_seq_len; ++i) {
+				gpio_direction_output((ctrl_pdata->rst_gpio),
+					pdata->panel_info.rst_seq[i]);
+				if (pdata->panel_info.rst_seq[++i])
+					usleep_range(pinfo->rst_seq[i] * 1000,
+					pinfo->rst_seq[i] * 1000);
+			}
+		}
+
+		if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) {
+			pr_debug("%s: Panel Not properly turned OFF\n",
+						__func__);
+			ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_INIT;
+			pr_err("%s: Reset panel done\n", __func__);
+		}
+	} else {
+		gpio_direction_output((ctrl_pdata->rst_gpio), 0);
+		gpio_free(ctrl_pdata->rst_gpio);
+
+		gpio_direction_output(ctrl_pdata->disp_dc_gpio, 0);
+		gpio_free(ctrl_pdata->disp_dc_gpio);
+	}
+	return rc;
+}
+
+
+static int mdss_spi_panel_pinctrl_set_state(
+	struct spi_panel_data *ctrl_pdata,
+	bool active)
+{
+	struct pinctrl_state *pin_state;
+	int rc = -EFAULT;
+
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.pinctrl))
+		return PTR_ERR(ctrl_pdata->pin_res.pinctrl);
+
+	pin_state = active ? ctrl_pdata->pin_res.gpio_state_active
+				: ctrl_pdata->pin_res.gpio_state_suspend;
+	if (!IS_ERR_OR_NULL(pin_state)) {
+		rc = pinctrl_select_state(ctrl_pdata->pin_res.pinctrl,
+				pin_state);
+		if (rc)
+			pr_err("%s: can not set %s pins\n", __func__,
+			       active ? MDSS_PINCTRL_STATE_DEFAULT
+			       : MDSS_PINCTRL_STATE_SLEEP);
+	} else {
+		pr_err("%s: invalid '%s' pinstate\n", __func__,
+		       active ? MDSS_PINCTRL_STATE_DEFAULT
+		       : MDSS_PINCTRL_STATE_SLEEP);
+	}
+	return rc;
+}
+
+
+static int mdss_spi_panel_pinctrl_init(struct platform_device *pdev)
+{
+	struct spi_panel_data *ctrl_pdata;
+
+	ctrl_pdata = platform_get_drvdata(pdev);
+	ctrl_pdata->pin_res.pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.pinctrl)) {
+		pr_err("%s: failed to get pinctrl\n", __func__);
+		return PTR_ERR(ctrl_pdata->pin_res.pinctrl);
+	}
+
+	ctrl_pdata->pin_res.gpio_state_active
+		= pinctrl_lookup_state(ctrl_pdata->pin_res.pinctrl,
+				MDSS_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.gpio_state_active))
+		pr_warn("%s: can not get default pinstate\n", __func__);
+
+	ctrl_pdata->pin_res.gpio_state_suspend
+		= pinctrl_lookup_state(ctrl_pdata->pin_res.pinctrl,
+				MDSS_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.gpio_state_suspend))
+		pr_warn("%s: can not get sleep pinstate\n", __func__);
+
+	return 0;
+}
+
+
+static int mdss_spi_panel_power_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+	ret = msm_mdss_enable_vreg(
+		ctrl_pdata->panel_power_data.vreg_config,
+		ctrl_pdata->panel_power_data.num_vreg, 1);
+	if (ret) {
+		pr_err("%s: failed to enable vregs for %s\n",
+			__func__, "PANEL_PM");
+	}
+
+	/*
+	 * If continuous splash screen feature is enabled, then we need to
+	 * request all the GPIOs that have already been configured in the
+	 * bootloader. This needs to be done irresepective of whether
+	 * the lp11_init flag is set or not.
+	 */
+	if (pdata->panel_info.cont_splash_enabled) {
+		if (mdss_spi_panel_pinctrl_set_state(ctrl_pdata, true))
+			pr_debug("reset enable: pinctrl not enabled\n");
+
+		ret = mdss_spi_panel_reset(pdata, 1);
+		if (ret)
+			pr_err("%s: Panel reset failed. rc=%d\n",
+					__func__, ret);
+	}
+
+	return ret;
+}
+
+
+static int mdss_spi_panel_power_off(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	ret = mdss_spi_panel_reset(pdata, 0);
+	if (ret) {
+		pr_warn("%s: Panel reset failed. rc=%d\n", __func__, ret);
+		ret = 0;
+	}
+
+	if (mdss_spi_panel_pinctrl_set_state(ctrl_pdata, false))
+		pr_warn("reset disable: pinctrl not enabled\n");
+
+	ret = msm_mdss_enable_vreg(
+		ctrl_pdata->panel_power_data.vreg_config,
+		ctrl_pdata->panel_power_data.num_vreg, 0);
+	if (ret)
+		pr_err("%s: failed to disable vregs for %s\n",
+			__func__, "PANEL_PM");
+
+end:
+	return ret;
+}
+
+
+static int mdss_spi_panel_power_ctrl(struct mdss_panel_data *pdata,
+	int power_state)
+{
+	int ret;
+	struct mdss_panel_info *pinfo;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo = &pdata->panel_info;
+	pr_debug("%s: cur_power_state=%d req_power_state=%d\n", __func__,
+		pinfo->panel_power_state, power_state);
+
+	if (pinfo->panel_power_state == power_state) {
+		pr_debug("%s: no change needed\n", __func__);
+		return 0;
+	}
+
+	switch (power_state) {
+	case MDSS_PANEL_POWER_OFF:
+		ret = mdss_spi_panel_power_off(pdata);
+		break;
+	case MDSS_PANEL_POWER_ON:
+		ret = mdss_spi_panel_power_on(pdata);
+		break;
+	default:
+		pr_err("%s: unknown panel power state requested (%d)\n",
+			__func__, power_state);
+		ret = -EINVAL;
+	}
+
+	if (!ret)
+		pinfo->panel_power_state = power_state;
+
+	return ret;
+}
+
+static int mdss_spi_panel_unblank(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	if (!(ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT)) {
+		ret = ctrl_pdata->on(pdata);
+		if (ret) {
+			pr_err("%s: unable to initialize the panel\n",
+							__func__);
+			return ret;
+		}
+		ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_INIT;
+	}
+
+	return ret;
+}
+
+static int mdss_spi_panel_blank(struct mdss_panel_data *pdata, int power_state)
+{
+	int ret = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) {
+		ret = ctrl_pdata->off(pdata);
+		if (ret) {
+			pr_err("%s: Panel OFF failed\n", __func__);
+			return ret;
+		}
+		ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_INIT;
+	}
+
+	return ret;
+}
+
+
+static int mdss_spi_panel_event_handler(struct mdss_panel_data *pdata,
+				  int event, void *arg)
+{
+	int rc = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+	int power_state;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	switch (event) {
+	case MDSS_EVENT_LINK_READY:
+		rc = mdss_spi_panel_power_ctrl(pdata, MDSS_PANEL_POWER_ON);
+		if (rc) {
+			pr_err("%s:Panel power on failed. rc=%d\n",
+							__func__, rc);
+			return rc;
+		}
+		mdss_spi_panel_pinctrl_set_state(ctrl_pdata, true);
+		mdss_spi_panel_reset(pdata, 1);
+		break;
+	case MDSS_EVENT_UNBLANK:
+		rc = mdss_spi_panel_unblank(pdata);
+		break;
+	case MDSS_EVENT_PANEL_ON:
+		ctrl_pdata->ctrl_state |= CTRL_STATE_MDP_ACTIVE;
+		break;
+	case MDSS_EVENT_BLANK:
+		power_state = (int) (unsigned long) arg;
+		break;
+	case MDSS_EVENT_PANEL_OFF:
+		power_state = (int) (unsigned long) arg;
+		ctrl_pdata->ctrl_state &= ~CTRL_STATE_MDP_ACTIVE;
+		rc = mdss_spi_panel_blank(pdata, power_state);
+		rc = mdss_spi_panel_power_ctrl(pdata, power_state);
+		break;
+	default:
+		pr_debug("%s: unhandled event=%d\n", __func__, event);
+		break;
+	}
+	pr_debug("%s-:event=%d, rc=%d\n", __func__, event, rc);
+	return rc;
+}
+
+int is_spi_panel_continuous_splash_on(struct mdss_panel_data *pdata)
+{
+	int i = 0, voltage = 0;
+	struct mdss_vreg *vreg;
+	int num_vreg;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+			panel_data);
+	vreg = ctrl_pdata->panel_power_data.vreg_config;
+	num_vreg = ctrl_pdata->panel_power_data.num_vreg;
+
+	for (i = 0; i < num_vreg; i++) {
+		if (regulator_is_enabled(vreg[i].vreg) <= 0)
+			return false;
+		voltage = regulator_get_voltage(vreg[i].vreg);
+		if (!(voltage >= vreg[i].min_voltage &&
+			 voltage <= vreg[i].max_voltage))
+			return false;
+	}
+
+	return true;
+}
+
+static void enable_spi_panel_te_irq(struct spi_panel_data *ctrl_pdata,
+							bool enable)
+{
+	static bool is_enabled = true;
+
+	if (is_enabled == enable)
+		return;
+
+	if (!gpio_is_valid(ctrl_pdata->disp_te_gpio)) {
+		pr_err("%s:%d,SPI panel TE GPIO not configured\n",
+			   __func__, __LINE__);
+		return;
+	}
+
+	if (enable)
+		enable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+	else
+		disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+
+	is_enabled = enable;
+}
+
+int mdss_spi_panel_kickoff(struct mdss_panel_data *pdata,
+			char *buf, int len, int dma_stride)
+{
+	struct spi_panel_data *ctrl_pdata = NULL;
+	char *tx_buf;
+	int rc = 0;
+	int panel_yres;
+	int panel_xres;
+	int padding_length = 0;
+	int actual_stride = 0;
+	int byte_per_pixel = 0;
+	int scan_count = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	tx_buf = ctrl_pdata->tx_buf;
+	panel_xres = ctrl_pdata->panel_data.panel_info.xres;
+	panel_yres = ctrl_pdata->panel_data.panel_info.yres;
+
+	byte_per_pixel = ctrl_pdata->panel_data.panel_info.bpp / 8;
+	actual_stride = panel_xres * byte_per_pixel;
+	padding_length = dma_stride - actual_stride;
+
+	/* remove the padding and copy to continuous buffer */
+	while (scan_count < panel_yres) {
+		memcpy((tx_buf + scan_count * actual_stride),
+			(buf + scan_count * (actual_stride + padding_length)),
+				actual_stride);
+		scan_count++;
+	}
+
+	enable_spi_panel_te_irq(ctrl_pdata, true);
+
+	mutex_lock(&ctrl_pdata->spi_tx_mutex);
+	reinit_completion(&ctrl_pdata->spi_panel_te);
+
+	rc = wait_for_completion_timeout(&ctrl_pdata->spi_panel_te,
+				   msecs_to_jiffies(SPI_PANEL_TE_TIMEOUT));
+
+	if (rc == 0)
+		pr_err("wait panel TE time out\n");
+
+	rc = mdss_spi_tx_pixel(tx_buf, ctrl_pdata->byte_pre_frame);
+	mutex_unlock(&ctrl_pdata->spi_tx_mutex);
+
+	return rc;
+}
+
+static int mdss_spi_read_panel_data(struct mdss_panel_data *pdata,
+		u8 reg_addr, u8 *data, u8 len)
+{
+	int rc = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+		panel_data);
+
+	mutex_lock(&ctrl_pdata->spi_tx_mutex);
+	gpio_direction_output(ctrl_pdata->disp_dc_gpio, 0);
+	rc = mdss_spi_read_data(reg_addr, data, len);
+	gpio_direction_output(ctrl_pdata->disp_dc_gpio, 1);
+	mutex_unlock(&ctrl_pdata->spi_tx_mutex);
+
+	return rc;
+}
+
+static int mdss_spi_panel_on(struct mdss_panel_data *pdata)
+{
+	struct spi_panel_data *ctrl = NULL;
+	struct mdss_panel_info *pinfo;
+	int i;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+	pinfo = &pdata->panel_info;
+	ctrl = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	for (i = 0; i < ctrl->on_cmds.cmd_cnt; i++) {
+		/* pull down dc gpio indicate this is command */
+		gpio_direction_output(ctrl->disp_dc_gpio, 0);
+		mdss_spi_tx_command(ctrl->on_cmds.cmds[i].command);
+		gpio_direction_output((ctrl->disp_dc_gpio), 1);
+
+		if (ctrl->on_cmds.cmds[i].dchdr.dlen > 1) {
+			mdss_spi_tx_parameter(ctrl->on_cmds.cmds[i].parameter,
+					ctrl->on_cmds.cmds[i].dchdr.dlen-1);
+		}
+		if (ctrl->on_cmds.cmds[i].dchdr.wait != 0)
+			msleep(ctrl->on_cmds.cmds[i].dchdr.wait);
+	}
+
+	pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK;
+
+	pr_debug("%s:-\n", __func__);
+
+	return 0;
+}
+
+
+static int mdss_spi_panel_off(struct mdss_panel_data *pdata)
+{
+	struct spi_panel_data *ctrl = NULL;
+	struct mdss_panel_info *pinfo;
+	int i;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo = &pdata->panel_info;
+	ctrl = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	for (i = 0; i < ctrl->off_cmds.cmd_cnt; i++) {
+		/* pull down dc gpio indicate this is command */
+		gpio_direction_output(ctrl->disp_dc_gpio, 0);
+		mdss_spi_tx_command(ctrl->off_cmds.cmds[i].command);
+		gpio_direction_output((ctrl->disp_dc_gpio), 1);
+
+		if (ctrl->off_cmds.cmds[i].dchdr.dlen > 1) {
+			mdss_spi_tx_parameter(ctrl->off_cmds.cmds[i].parameter,
+					ctrl->off_cmds.cmds[i].dchdr.dlen-1);
+		}
+
+		if (ctrl->off_cmds.cmds[i].dchdr.wait != 0)
+			msleep(ctrl->off_cmds.cmds[i].dchdr.wait);
+	}
+
+	pinfo->blank_state = MDSS_PANEL_BLANK_BLANK;
+
+	pr_debug("%s:-\n", __func__);
+	return 0;
+}
+
+static void mdss_spi_put_dt_vreg_data(struct device *dev,
+	struct mdss_module_power *module_power)
+{
+	if (!module_power) {
+		pr_err("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->vreg_config) {
+		devm_kfree(dev, module_power->vreg_config);
+		module_power->vreg_config = NULL;
+	}
+	module_power->num_vreg = 0;
+}
+
+
+static int mdss_spi_get_panel_vreg_data(struct device *dev,
+			struct mdss_module_power *mp)
+{
+	int i = 0, rc = 0;
+	u32 tmp = 0;
+	struct device_node *of_node = NULL, *supply_node = NULL;
+	struct device_node *supply_root_node = NULL;
+
+	if (!dev || !mp) {
+		pr_err("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	of_node = dev->of_node;
+
+	mp->num_vreg = 0;
+
+	supply_root_node = of_get_child_by_name(of_node,
+				"qcom,panel-supply-entries");
+
+	for_each_available_child_of_node(supply_root_node, supply_node) {
+		mp->num_vreg++;
+	}
+	if (mp->num_vreg == 0) {
+		pr_debug("%s: no vreg\n", __func__);
+		goto novreg;
+	} else {
+		pr_debug("%s: vreg found. count=%d\n", __func__, mp->num_vreg);
+	}
+
+	mp->vreg_config = kcalloc(mp->num_vreg, sizeof(struct mdss_vreg),
+				GFP_KERNEL);
+
+	if (mp->vreg_config != NULL) {
+		for_each_available_child_of_node(supply_root_node,
+				supply_node) {
+			const char *st = NULL;
+			/* vreg-name */
+			rc = of_property_read_string(supply_node,
+				"qcom,supply-name", &st);
+			if (rc) {
+				pr_err("%s: error reading name. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+			snprintf(mp->vreg_config[i].vreg_name,
+				ARRAY_SIZE((mp->vreg_config[i].vreg_name)),
+					"%s", st);
+			/* vreg-min-voltage */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-min-voltage", &tmp);
+			if (rc) {
+				pr_err("%s: error reading min volt. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+			mp->vreg_config[i].min_voltage = tmp;
+
+			/* vreg-max-voltage */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-max-voltage", &tmp);
+			if (rc) {
+				pr_err("%s: error reading max volt. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+			mp->vreg_config[i].max_voltage = tmp;
+
+			/* enable-load */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-enable-load", &tmp);
+			if (rc) {
+				pr_err("%s: error read enable load. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+			mp->vreg_config[i].load[DSS_REG_MODE_ENABLE] = tmp;
+
+			/* disable-load */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-disable-load", &tmp);
+			if (rc) {
+				pr_err("%s: error read disable load. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+			mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] = tmp;
+
+			/* pre-sleep */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-pre-on-sleep", &tmp);
+			if (rc) {
+				pr_debug("%s: error read pre on value\n",
+						__func__);
+				rc = 0;
+			} else {
+				mp->vreg_config[i].pre_on_sleep = tmp;
+			}
+
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-pre-off-sleep", &tmp);
+			if (rc) {
+				pr_debug("%s: error read pre off value\n",
+						__func__);
+				rc = 0;
+			} else {
+				mp->vreg_config[i].pre_off_sleep = tmp;
+			}
+
+			/* post-sleep */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-post-on-sleep", &tmp);
+			if (rc) {
+				pr_debug("%s: error read post on value\n",
+						__func__);
+				rc = 0;
+			} else {
+				mp->vreg_config[i].post_on_sleep = tmp;
+			}
+
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-post-off-sleep", &tmp);
+			if (rc) {
+				pr_debug("%s: error read post off value\n",
+						__func__);
+				rc = 0;
+			} else {
+				mp->vreg_config[i].post_off_sleep = tmp;
+			}
+
+			++i;
+		}
+	}
+	return rc;
+error:
+	kfree(mp->vreg_config);
+	mp->vreg_config = NULL;
+
+novreg:
+	mp->num_vreg = 0;
+
+	return rc;
+
+}
+
+static int mdss_spi_panel_parse_cmds(struct device_node *np,
+		struct spi_panel_cmds *pcmds, char *cmd_key)
+{
+	const char *data;
+	int blen = 0, len;
+	char *buf, *bp;
+	struct spi_ctrl_hdr *dchdr;
+	int i, cnt;
+
+	data = of_get_property(np, cmd_key, &blen);
+	if (!data) {
+		pr_err("%s: failed, key=%s\n", __func__, cmd_key);
+		return -ENOMEM;
+	}
+
+	buf = kcalloc(blen, sizeof(char), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	memcpy(buf, data, blen);
+
+	/* scan dcs commands */
+	bp = buf;
+	len = blen;
+	cnt = 0;
+	while (len >= sizeof(*dchdr)) {
+		dchdr = (struct spi_ctrl_hdr *)bp;
+		if (dchdr->dlen > len) {
+			pr_err("%s: dtsi parse error, len=%d",
+				__func__, dchdr->dlen);
+			goto exit_free;
+		}
+		bp += sizeof(*dchdr);
+		len -= sizeof(*dchdr);
+		bp += dchdr->dlen;
+		len -= dchdr->dlen;
+		cnt++;
+	}
+
+	if (len != 0) {
+		pr_err("%s: dcs_cmd=%x len=%d error",
+				__func__, buf[0], len);
+		goto exit_free;
+	}
+
+	pcmds->cmds = kcalloc(cnt, sizeof(struct spi_cmd_desc),
+						GFP_KERNEL);
+	if (!pcmds->cmds)
+		goto exit_free;
+
+	pcmds->cmd_cnt = cnt;
+	pcmds->buf = buf;
+	pcmds->blen = blen;
+
+	bp = buf;
+	len = blen;
+	for (i = 0; i < cnt; i++) {
+		dchdr = (struct spi_ctrl_hdr *)bp;
+		len -= sizeof(*dchdr);
+		bp += sizeof(*dchdr);
+		pcmds->cmds[i].dchdr = *dchdr;
+		pcmds->cmds[i].command = bp;
+		pcmds->cmds[i].parameter = bp + sizeof(char);
+		bp += dchdr->dlen;
+		len -= dchdr->dlen;
+	}
+
+	pr_debug("%s: dcs_cmd=%x, len=%d, cmd_cnt=%d\n", __func__,
+		pcmds->buf[0], pcmds->blen, pcmds->cmd_cnt);
+
+	return 0;
+
+exit_free:
+	kfree(buf);
+	return -ENOMEM;
+}
+static int mdss_spi_panel_parse_reset_seq(struct device_node *np,
+		u32 rst_seq[MDSS_SPI_RST_SEQ_LEN], u32 *rst_len,
+		const char *name)
+{
+	int num = 0, i;
+	int rc;
+	struct property *data;
+	u32 tmp[MDSS_SPI_RST_SEQ_LEN];
+
+	*rst_len = 0;
+	data = of_find_property(np, name, &num);
+	num /= sizeof(u32);
+	if (!data || !num || num > MDSS_SPI_RST_SEQ_LEN || num % 2) {
+		pr_err("%s:%d, error reading %s, length found = %d\n",
+			__func__, __LINE__, name, num);
+	} else {
+		rc = of_property_read_u32_array(np, name, tmp, num);
+		if (rc)
+			pr_err("%s:%d, error reading %s, rc = %d\n",
+				__func__, __LINE__, name, rc);
+		else {
+			for (i = 0; i < num; ++i)
+				rst_seq[i] = tmp[i];
+			*rst_len = num;
+		}
+	}
+	return 0;
+}
+
+static bool mdss_send_panel_cmd_for_esd(struct spi_panel_data *ctrl_pdata)
+{
+
+	if (ctrl_pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return false;
+	}
+
+	mutex_lock(&ctrl_pdata->spi_tx_mutex);
+	mdss_spi_panel_on(&ctrl_pdata->panel_data);
+	mutex_unlock(&ctrl_pdata->spi_tx_mutex);
+
+	return true;
+}
+
+static bool mdss_spi_reg_status_check(struct spi_panel_data *ctrl_pdata)
+{
+	int ret = 0;
+	int i = 0;
+
+	if (ctrl_pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return false;
+	}
+
+	pr_debug("%s: Checking Register status\n", __func__);
+
+	ret = mdss_spi_read_panel_data(&ctrl_pdata->panel_data,
+					ctrl_pdata->panel_status_reg,
+					ctrl_pdata->act_status_value,
+					ctrl_pdata->status_cmds_rlen);
+	if (ret < 0) {
+		pr_err("%s: Read status register returned error\n", __func__);
+	} else {
+		for (i = 0; i < ctrl_pdata->status_cmds_rlen; i++) {
+			pr_debug("act_value[%d] = %x, exp_value[%d] = %x\n",
+					i, ctrl_pdata->act_status_value[i],
+					i, ctrl_pdata->exp_status_value[i]);
+			if (ctrl_pdata->act_status_value[i] !=
+					ctrl_pdata->exp_status_value[i])
+				return false;
+		}
+	}
+
+	return true;
+}
+
+static void mdss_spi_parse_esd_params(struct device_node *np,
+		struct spi_panel_data	*ctrl)
+{
+	u32 tmp;
+	int rc;
+	struct property *data;
+	const char *string;
+	struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+
+	pinfo->esd_check_enabled = of_property_read_bool(np,
+		"qcom,esd-check-enabled");
+
+	if (!pinfo->esd_check_enabled)
+		return;
+
+	ctrl->status_mode = SPI_ESD_MAX;
+
+	rc = of_property_read_string(np,
+			"qcom,mdss-spi-panel-status-check-mode", &string);
+	if (!rc) {
+		if (!strcmp(string, "reg_read")) {
+			ctrl->status_mode = SPI_ESD_REG;
+			ctrl->check_status =
+				mdss_spi_reg_status_check;
+		} else if (!strcmp(string, "send_init_command")) {
+			ctrl->status_mode = SPI_SEND_PANEL_COMMAND;
+			ctrl->check_status =
+				mdss_send_panel_cmd_for_esd;
+				return;
+		} else {
+			pr_err("No valid panel-status-check-mode string\n");
+			pinfo->esd_check_enabled = false;
+			return;
+		}
+	}
+
+	rc = of_property_read_u8(np, "qcom,mdss-spi-panel-status-reg",
+				&ctrl->panel_status_reg);
+	if (rc) {
+		pr_warn("%s:%d, Read status reg failed, disable ESD check\n",
+				__func__, __LINE__);
+		pinfo->esd_check_enabled = false;
+		return;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-spi-panel-status-read-length",
+					&tmp);
+	if (rc) {
+		pr_warn("%s:%d, Read reg length failed, disable ESD check\n",
+				__func__, __LINE__);
+		pinfo->esd_check_enabled = false;
+		return;
+	}
+
+	ctrl->status_cmds_rlen = (!rc ? tmp : 1);
+
+	ctrl->exp_status_value = kzalloc(sizeof(u8) *
+				 (ctrl->status_cmds_rlen + 1), GFP_KERNEL);
+	ctrl->act_status_value = kzalloc(sizeof(u8) *
+				(ctrl->status_cmds_rlen + 1), GFP_KERNEL);
+
+	if (!ctrl->exp_status_value || !ctrl->act_status_value) {
+		pr_err("%s: Error allocating memory for status buffer\n",
+						__func__);
+		pinfo->esd_check_enabled = false;
+		return;
+	}
+
+	data = of_find_property(np, "qcom,mdss-spi-panel-status-value", &tmp);
+	tmp /= sizeof(u8);
+	if (!data || (tmp != ctrl->status_cmds_rlen)) {
+		pr_err("%s: Panel status values not found\n", __func__);
+		pinfo->esd_check_enabled = false;
+		memset(ctrl->exp_status_value, 0, ctrl->status_cmds_rlen);
+	} else {
+		rc = of_property_read_u8_array(np,
+			"qcom,mdss-spi-panel-status-value",
+			ctrl->exp_status_value, tmp);
+		if (rc) {
+			pr_err("%s: Error reading panel status values\n",
+				__func__);
+			pinfo->esd_check_enabled = false;
+			memset(ctrl->exp_status_value, 0,
+				ctrl->status_cmds_rlen);
+		}
+	}
+}
+
+static int mdss_spi_panel_parse_dt(struct device_node *np,
+		struct spi_panel_data	*ctrl_pdata)
+{
+	u32 tmp;
+	int rc;
+	const char *data;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+	pinfo->cont_splash_enabled = of_property_read_bool(np,
+					"qcom,cont-splash-enabled");
+
+	rc = of_property_read_u32(np, "qcom,mdss-spi-panel-width", &tmp);
+	if (rc) {
+		pr_err("%s: panel width not specified\n", __func__);
+		return -EINVAL;
+	}
+	pinfo->xres = (!rc ? tmp : 240);
+
+	rc = of_property_read_u32(np, "qcom,mdss-spi-panel-height", &tmp);
+	if (rc) {
+		pr_err("%s:panel height not specified\n", __func__);
+		return -EINVAL;
+	}
+	pinfo->yres = (!rc ? tmp : 320);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-physical-width-dimension", &tmp);
+	pinfo->physical_width = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-physical-height-dimension", &tmp);
+	pinfo->physical_height = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-panel-framerate", &tmp);
+	pinfo->spi.frame_rate = (!rc ? tmp : 30);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-h-front-porch", &tmp);
+	pinfo->lcdc.h_front_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-h-back-porch", &tmp);
+	pinfo->lcdc.h_back_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-h-pulse-width", &tmp);
+	pinfo->lcdc.h_pulse_width = (!rc ? tmp : 2);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-h-sync-skew", &tmp);
+	pinfo->lcdc.hsync_skew = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-v-back-porch", &tmp);
+	pinfo->lcdc.v_back_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-v-front-porch", &tmp);
+	pinfo->lcdc.v_front_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-v-pulse-width", &tmp);
+	pinfo->lcdc.v_pulse_width = (!rc ? tmp : 2);
+
+
+	rc = of_property_read_u32(np, "qcom,mdss-spi-bpp", &tmp);
+	if (rc) {
+		pr_err("%s: bpp not specified\n", __func__);
+		return -EINVAL;
+	}
+	pinfo->bpp = (!rc ? tmp : 16);
+
+	pinfo->pdest = DISPLAY_1;
+
+	ctrl_pdata->bklt_ctrl = SPI_UNKNOWN_CTRL;
+	data = of_get_property(np, "qcom,mdss-spi-bl-pmic-control-type", NULL);
+	if (data) {
+		if (!strcmp(data, "bl_ctrl_wled")) {
+			led_trigger_register_simple("bkl-trigger",
+				&bl_led_trigger);
+			pr_debug("%s: SUCCESS-> WLED TRIGGER register\n",
+				__func__);
+			ctrl_pdata->bklt_ctrl = SPI_BL_WLED;
+		} else if (!strcmp(data, "bl_gpio_pulse")) {
+			led_trigger_register_simple("gpio-bklt-trigger",
+				&bl_led_trigger);
+			pr_debug("%s: SUCCESS-> GPIO PULSE TRIGGER register\n",
+				__func__);
+			ctrl_pdata->bklt_ctrl = SPI_BL_WLED;
+		} else if (!strcmp(data, "bl_ctrl_pwm")) {
+			ctrl_pdata->bklt_ctrl = SPI_BL_PWM;
+			ctrl_pdata->pwm_pmi = of_property_read_bool(np,
+					"qcom,mdss-spi-bl-pwm-pmi");
+			rc = of_property_read_u32(np,
+				"qcom,mdss-spi-bl-pmic-pwm-frequency", &tmp);
+			if (rc) {
+				pr_err("%s: Error, panel pwm_period\n",
+					 __func__);
+				return -EINVAL;
+			}
+			ctrl_pdata->pwm_period = tmp;
+			if (ctrl_pdata->pwm_pmi) {
+				ctrl_pdata->pwm_bl = of_pwm_get(np, NULL);
+				if (IS_ERR(ctrl_pdata->pwm_bl)) {
+					pr_err("%s: Error, pwm device\n",
+							__func__);
+					ctrl_pdata->pwm_bl = NULL;
+					return -EINVAL;
+				}
+			} else {
+				rc = of_property_read_u32(np,
+					"qcom,mdss-spi-bl-pmic-bank-select",
+								 &tmp);
+				if (rc) {
+					pr_err("%s: Error, lpg channel\n",
+						__func__);
+					return -EINVAL;
+				}
+				ctrl_pdata->pwm_lpg_chan = tmp;
+				tmp = of_get_named_gpio(np,
+					"qcom,mdss-spi-pwm-gpio", 0);
+				ctrl_pdata->pwm_pmic_gpio = tmp;
+				pr_debug("%s: Configured PWM bklt ctrl\n",
+								 __func__);
+			}
+		}
+	}
+	rc = of_property_read_u32(np, "qcom,mdss-brightness-max-level", &tmp);
+	pinfo->brightness_max = (!rc ? tmp : MDSS_MAX_BL_BRIGHTNESS);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-bl-min-level", &tmp);
+	pinfo->bl_min = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-bl-max-level", &tmp);
+	pinfo->bl_max = (!rc ? tmp : 255);
+	ctrl_pdata->bklt_max = pinfo->bl_max;
+
+
+	mdss_spi_panel_parse_reset_seq(np, pinfo->rst_seq,
+					&(pinfo->rst_seq_len),
+					"qcom,mdss-spi-reset-sequence");
+
+	mdss_spi_panel_parse_cmds(np, &ctrl_pdata->on_cmds,
+		"qcom,mdss-spi-on-command");
+
+	mdss_spi_panel_parse_cmds(np, &ctrl_pdata->off_cmds,
+		"qcom,mdss-spi-off-command");
+
+	mdss_spi_parse_esd_params(np, ctrl_pdata);
+
+
+	return 0;
+}
+
+static void mdss_spi_panel_pwm_cfg(struct spi_panel_data *ctrl)
+{
+	if (ctrl->pwm_pmi)
+		return;
+
+	ctrl->pwm_bl = pwm_request(ctrl->pwm_lpg_chan, "lcd-bklt");
+	if (ctrl->pwm_bl == NULL || IS_ERR(ctrl->pwm_bl)) {
+		pr_err("%s: Error: lpg_chan=%d pwm request failed",
+				__func__, ctrl->pwm_lpg_chan);
+	}
+	ctrl->pwm_enabled = 0;
+}
+
+static void mdss_spi_panel_bklt_pwm(struct spi_panel_data *ctrl, int level)
+{
+	int ret;
+	u32 duty;
+	u32 period_ns;
+
+	if (ctrl->pwm_bl == NULL) {
+		pr_err("%s: no PWM\n", __func__);
+		return;
+	}
+
+	if (level == 0) {
+		if (ctrl->pwm_enabled) {
+			ret = pwm_config_us(ctrl->pwm_bl, level,
+					ctrl->pwm_period);
+			if (ret)
+				pr_err("%s: pwm_config_us() failed err=%d.\n",
+						__func__, ret);
+			pwm_disable(ctrl->pwm_bl);
+		}
+		ctrl->pwm_enabled = 0;
+		return;
+	}
+
+	duty = level * ctrl->pwm_period;
+	duty /= ctrl->bklt_max;
+
+	pr_debug("%s: bklt_ctrl=%d pwm_period=%d pwm_gpio=%d pwm_lpg_chan=%d\n",
+			__func__, ctrl->bklt_ctrl, ctrl->pwm_period,
+				ctrl->pwm_pmic_gpio, ctrl->pwm_lpg_chan);
+
+	if (ctrl->pwm_period >= USEC_PER_SEC) {
+		ret = pwm_config_us(ctrl->pwm_bl, duty, ctrl->pwm_period);
+		if (ret) {
+			pr_err("%s: pwm_config_us() failed err=%d\n",
+					__func__, ret);
+			return;
+		}
+	} else {
+		period_ns = ctrl->pwm_period * NSEC_PER_USEC;
+		ret = pwm_config(ctrl->pwm_bl,
+				level * period_ns / ctrl->bklt_max,
+				period_ns);
+		if (ret) {
+			pr_err("%s: pwm_config() failed err=%d\n",
+					__func__, ret);
+			return;
+		}
+	}
+
+	if (!ctrl->pwm_enabled) {
+		ret = pwm_enable(ctrl->pwm_bl);
+		if (ret)
+			pr_err("%s: pwm_enable() failed err=%d\n", __func__,
+				ret);
+		ctrl->pwm_enabled = 1;
+	}
+}
+
+static void mdss_spi_panel_bl_ctrl(struct mdss_panel_data *pdata,
+							u32 bl_level)
+{
+	if (bl_level) {
+		mdp3_res->bklt_level = bl_level;
+		mdp3_res->bklt_update = true;
+	} else {
+		mdss_spi_panel_bl_ctrl_update(pdata, bl_level);
+	}
+}
+
+#if defined(CONFIG_FB_MSM_MDSS_SPI_PANEL) && defined(CONFIG_SPI_QUP)
+void mdss_spi_panel_bl_ctrl_update(struct mdss_panel_data *pdata,
+							u32 bl_level)
+{
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	if ((bl_level < pdata->panel_info.bl_min) && (bl_level != 0))
+		bl_level = pdata->panel_info.bl_min;
+
+	switch (ctrl_pdata->bklt_ctrl) {
+	case SPI_BL_WLED:
+		led_trigger_event(bl_led_trigger, bl_level);
+		break;
+	case SPI_BL_PWM:
+		mdss_spi_panel_bklt_pwm(ctrl_pdata, bl_level);
+		break;
+	default:
+		pr_err("%s: Unknown bl_ctrl configuration %d\n",
+			__func__, ctrl_pdata->bklt_ctrl);
+		break;
+	}
+}
+#endif
+
+static int mdss_spi_panel_init(struct device_node *node,
+	struct spi_panel_data	*ctrl_pdata,
+	bool cmd_cfg_cont_splash)
+{
+	int rc = 0;
+	static const char *panel_name;
+	struct mdss_panel_info *pinfo;
+
+	if (!node || !ctrl_pdata) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return -ENODEV;
+	}
+
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+
+	pr_debug("%s:%d\n", __func__, __LINE__);
+	pinfo->panel_name[0] = '\0';
+	panel_name = of_get_property(node, "qcom,mdss-spi-panel-name", NULL);
+	if (!panel_name) {
+		pr_info("%s:%d, Panel name not specified\n",
+						__func__, __LINE__);
+	} else {
+		pr_debug("%s: Panel Name = %s\n", __func__, panel_name);
+		strlcpy(&pinfo->panel_name[0], panel_name, MDSS_MAX_PANEL_LEN);
+	}
+	rc = mdss_spi_panel_parse_dt(node, ctrl_pdata);
+	if (rc) {
+		pr_err("%s:%d panel dt parse failed\n", __func__, __LINE__);
+		return rc;
+	}
+
+	ctrl_pdata->byte_pre_frame = pinfo->xres * pinfo->yres * pinfo->bpp/8;
+
+	ctrl_pdata->tx_buf = kmalloc(ctrl_pdata->byte_pre_frame, GFP_KERNEL);
+
+	if (!cmd_cfg_cont_splash)
+		pinfo->cont_splash_enabled = false;
+
+	pr_info("%s: Continuous splash %s\n", __func__,
+		pinfo->cont_splash_enabled ? "enabled" : "disabled");
+
+	pinfo->dynamic_switch_pending = false;
+	pinfo->is_lpm_mode = false;
+	pinfo->esd_rdy = false;
+
+	ctrl_pdata->on = mdss_spi_panel_on;
+	ctrl_pdata->off = mdss_spi_panel_off;
+	ctrl_pdata->panel_data.set_backlight = mdss_spi_panel_bl_ctrl;
+
+	return 0;
+}
+
+static int mdss_spi_get_panel_cfg(char *panel_cfg,
+				struct spi_panel_data	*ctrl_pdata)
+{
+	int rc;
+	struct mdss_panel_cfg *pan_cfg = NULL;
+
+	if (!ctrl_pdata)
+		return MDSS_PANEL_INTF_INVALID;
+
+	pan_cfg = ctrl_pdata->mdss_util->panel_intf_type(MDSS_PANEL_INTF_SPI);
+	if (IS_ERR(pan_cfg)) {
+		return PTR_ERR(pan_cfg);
+	} else if (!pan_cfg) {
+		panel_cfg[0] = 0;
+		return 0;
+	}
+
+	pr_debug("%s:%d: cfg:[%s]\n", __func__, __LINE__,
+		 pan_cfg->arg_cfg);
+	ctrl_pdata->panel_data.panel_info.is_prim_panel = true;
+	rc = strlcpy(panel_cfg, pan_cfg->arg_cfg,
+		     sizeof(pan_cfg->arg_cfg));
+	return rc;
+}
+
+static int mdss_spi_panel_regulator_init(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (!pdev) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = platform_get_drvdata(pdev);
+	if (!ctrl_pdata) {
+		pr_err("%s: invalid driver data\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_mdss_config_vreg(&pdev->dev,
+		ctrl_pdata->panel_power_data.vreg_config,
+		ctrl_pdata->panel_power_data.num_vreg, 1);
+	if (rc)
+		pr_err("%s: failed to init vregs for %s\n",
+			__func__, "PANEL_PM");
+
+	return rc;
+
+}
+
+static irqreturn_t spi_panel_te_handler(int irq, void *data)
+{
+	struct spi_panel_data *ctrl_pdata = (struct spi_panel_data *)data;
+	static int count = 2;
+
+	if (!ctrl_pdata) {
+		pr_err("%s: SPI display not available\n", __func__);
+		return IRQ_HANDLED;
+	}
+	complete(&ctrl_pdata->spi_panel_te);
+
+	if (ctrl_pdata->vsync_client.handler && !(--count)) {
+		ctrl_pdata->vsync_client.handler(ctrl_pdata->vsync_client.arg);
+		count = 2;
+	}
+
+	return IRQ_HANDLED;
+}
+
+void mdp3_spi_vsync_enable(struct mdss_panel_data *pdata,
+				struct mdp3_notification *vsync_client)
+{
+	int updated = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	if (vsync_client) {
+		if (ctrl_pdata->vsync_client.handler != vsync_client->handler) {
+			ctrl_pdata->vsync_client = *vsync_client;
+			updated = 1;
+		}
+	} else {
+		if (ctrl_pdata->vsync_client.handler) {
+			ctrl_pdata->vsync_client.handler = NULL;
+			ctrl_pdata->vsync_client.arg = NULL;
+			updated = 1;
+		}
+	}
+
+	if (updated) {
+		if (vsync_client && vsync_client->handler)
+			enable_spi_panel_te_irq(ctrl_pdata, true);
+		else
+			enable_spi_panel_te_irq(ctrl_pdata, false);
+	}
+}
+
+static struct device_node *mdss_spi_pref_prim_panel(
+		struct platform_device *pdev)
+{
+	struct device_node *spi_pan_node = NULL;
+
+	pr_debug("%s:%d: Select primary panel from dt\n",
+					__func__, __LINE__);
+	spi_pan_node = of_parse_phandle(pdev->dev.of_node,
+					"qcom,spi-pref-prim-pan", 0);
+	if (!spi_pan_node)
+		pr_err("%s:can't find panel phandle\n", __func__);
+
+	return spi_pan_node;
+}
+
+static int spi_panel_device_register(struct device_node *pan_node,
+				struct spi_panel_data *ctrl_pdata)
+{
+	int rc;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+	struct device_node *spi_ctrl_np = NULL;
+	struct platform_device *ctrl_pdev = NULL;
+
+	pinfo->type = SPI_PANEL;
+
+	spi_ctrl_np = of_parse_phandle(pan_node,
+				"qcom,mdss-spi-panel-controller", 0);
+	if (!spi_ctrl_np) {
+		pr_err("%s: SPI controller node not initialized\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	ctrl_pdev = of_find_device_by_node(spi_ctrl_np);
+	if (!ctrl_pdev) {
+		of_node_put(spi_ctrl_np);
+		pr_err("%s: SPI controller node not find\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	rc = mdss_spi_panel_regulator_init(ctrl_pdev);
+	if (rc) {
+		pr_err("%s: failed to init regulator, rc=%d\n",
+						__func__, rc);
+		return rc;
+	}
+
+	pinfo->panel_max_fps = mdss_panel_get_framerate(pinfo,
+		FPS_RESOLUTION_HZ);
+	pinfo->panel_max_vtotal = mdss_panel_get_vtotal(pinfo);
+
+	ctrl_pdata->disp_te_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+		"qcom,platform-te-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->disp_te_gpio))
+		pr_err("%s:%d, TE gpio not specified\n",
+						__func__, __LINE__);
+
+	ctrl_pdata->disp_dc_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+		"qcom,platform-spi-dc-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->disp_dc_gpio))
+		pr_err("%s:%d, SPI DC gpio not specified\n",
+						__func__, __LINE__);
+
+	ctrl_pdata->rst_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+			 "qcom,platform-reset-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->rst_gpio))
+		pr_err("%s:%d, reset gpio not specified\n",
+						__func__, __LINE__);
+
+	ctrl_pdata->panel_data.event_handler = mdss_spi_panel_event_handler;
+
+	if (ctrl_pdata->bklt_ctrl == SPI_BL_PWM)
+		mdss_spi_panel_pwm_cfg(ctrl_pdata);
+
+	ctrl_pdata->ctrl_state = CTRL_STATE_UNKNOWN;
+
+	if (pinfo->cont_splash_enabled) {
+		rc = mdss_spi_panel_power_ctrl(&(ctrl_pdata->panel_data),
+			MDSS_PANEL_POWER_ON);
+		if (rc) {
+			pr_err("%s: Panel power on failed\n", __func__);
+			return rc;
+		}
+		if (ctrl_pdata->bklt_ctrl == SPI_BL_PWM)
+			ctrl_pdata->pwm_enabled = 1;
+		pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK;
+		ctrl_pdata->ctrl_state |=
+			(CTRL_STATE_PANEL_INIT | CTRL_STATE_MDP_ACTIVE);
+	} else {
+		pinfo->panel_power_state = MDSS_PANEL_POWER_OFF;
+	}
+
+	rc = mdss_register_panel(ctrl_pdev, &(ctrl_pdata->panel_data));
+	if (rc) {
+		pr_err("%s: unable to register SPI panel\n", __func__);
+		return rc;
+	}
+
+	pr_debug("%s: Panel data initialized\n", __func__);
+	return 0;
+}
+
+
+/**
+ * mdss_spi_find_panel_of_node(): find device node of spi panel
+ * @pdev: platform_device of the spi ctrl node
+ * @panel_cfg: string containing intf specific config data
+ *
+ * Function finds the panel device node using the interface
+ * specific configuration data. This configuration data is
+ * could be derived from the result of bootloader's GCDB
+ * panel detection mechanism. If such config data doesn't
+ * exist then this panel returns the default panel configured
+ * in the device tree.
+ *
+ * returns pointer to panel node on success, NULL on error.
+ */
+static struct device_node *mdss_spi_find_panel_of_node(
+		struct platform_device *pdev, char *panel_cfg)
+{
+	int len, i;
+	int ctrl_id = pdev->id - 1;
+	char panel_name[MDSS_MAX_PANEL_LEN] = "";
+	char ctrl_id_stream[3] =  "0:";
+	char *stream = NULL, *pan = NULL;
+	struct device_node *spi_pan_node = NULL, *mdss_node = NULL;
+
+	len = strlen(panel_cfg);
+	if (!len) {
+		/* no panel cfg chg, parse dt */
+		pr_err("%s:%d: no cmd line cfg present\n",
+			 __func__, __LINE__);
+		goto end;
+	} else {
+		if (ctrl_id == 1)
+			strlcpy(ctrl_id_stream, "1:", 3);
+
+		stream = strnstr(panel_cfg, ctrl_id_stream, len);
+		if (!stream) {
+			pr_err("controller config is not present\n");
+			goto end;
+		}
+		stream += 2;
+
+		pan = strnchr(stream, strlen(stream), ':');
+		if (!pan) {
+			strlcpy(panel_name, stream, MDSS_MAX_PANEL_LEN);
+		} else {
+			for (i = 0; (stream + i) < pan; i++)
+				panel_name[i] = *(stream + i);
+			panel_name[i] = 0;
+		}
+
+		pr_debug("%s:%d:%s:%s\n", __func__, __LINE__,
+			 panel_cfg, panel_name);
+
+		mdss_node = of_parse_phandle(pdev->dev.of_node,
+					     "qcom,mdss-mdp", 0);
+		if (!mdss_node) {
+			pr_err("%s: %d: mdss_node null\n",
+			       __func__, __LINE__);
+			return NULL;
+		}
+
+		spi_pan_node = of_find_node_by_name(mdss_node,
+						    panel_name);
+		if (!spi_pan_node) {
+			pr_err("%s: invalid pan node, selecting prim panel\n",
+			       __func__);
+			goto end;
+		}
+		return spi_pan_node;
+	}
+end:
+	if (strcmp(panel_name, NONE_PANEL))
+		spi_pan_node = mdss_spi_pref_prim_panel(pdev);
+	of_node_put(mdss_node);
+	return spi_pan_node;
+}
+
+
+static int mdss_spi_panel_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct spi_panel_data	*ctrl_pdata;
+	struct mdss_panel_cfg *pan_cfg = NULL;
+	struct device_node *spi_pan_node = NULL;
+	bool cmd_cfg_cont_splash = true;
+	char panel_cfg[MDSS_MAX_PANEL_LEN];
+	struct mdss_util_intf *util;
+	const char *ctrl_name;
+
+	util = mdss_get_util_intf();
+	if (util == NULL) {
+		pr_err("Failed to get mdss utility functions\n");
+		return -ENODEV;
+	}
+
+	if (!util->mdp_probe_done) {
+		pr_err("%s: MDP not probed yet\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (!pdev->dev.of_node) {
+		pr_err("SPI driver only supports device tree probe\n");
+		return -ENOTSUPP;
+	}
+
+	pan_cfg = util->panel_intf_type(MDSS_PANEL_INTF_DSI);
+	if (IS_ERR(pan_cfg)) {
+		pr_err("%s: return MDSS_PANEL_INTF_DSI\n", __func__);
+		return PTR_ERR(pan_cfg);
+	} else if (pan_cfg) {
+		pr_err("%s: DSI is primary\n", __func__);
+		return -ENODEV;
+	}
+
+	ctrl_pdata = platform_get_drvdata(pdev);
+	if (!ctrl_pdata) {
+		ctrl_pdata = devm_kzalloc(&pdev->dev,
+					  sizeof(struct spi_panel_data),
+					  GFP_KERNEL);
+		if (!ctrl_pdata) {
+			pr_err("%s: FAILED: cannot alloc spi panel\n",
+			       __func__);
+			rc = -ENOMEM;
+			goto error_no_mem;
+		}
+		platform_set_drvdata(pdev, ctrl_pdata);
+	}
+
+	ctrl_pdata->mdss_util = util;
+
+	ctrl_name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!ctrl_name)
+		pr_info("%s:%d, Ctrl name not specified\n",
+			__func__, __LINE__);
+	else
+		pr_debug("%s: Ctrl name = %s\n",
+			__func__, ctrl_name);
+
+
+	rc = of_platform_populate(pdev->dev.of_node,
+				  NULL, NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: failed to add child nodes, rc=%d\n",
+			__func__, rc);
+		goto error_no_mem;
+	}
+
+	rc = mdss_spi_panel_pinctrl_init(pdev);
+	if (rc)
+		pr_warn("%s: failed to get pin resources\n", __func__);
+
+	rc = mdss_spi_get_panel_vreg_data(&pdev->dev,
+					&ctrl_pdata->panel_power_data);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: failed to get panel vreg data, rc=%d\n",
+			__func__, rc);
+		goto error_vreg;
+	}
+
+	/* SPI panels can be different between controllers */
+	rc = mdss_spi_get_panel_cfg(panel_cfg, ctrl_pdata);
+	if (!rc)
+		/* spi panel cfg not present */
+		pr_warn("%s:%d:spi specific cfg not present\n",
+			__func__, __LINE__);
+
+	/* find panel device node */
+	spi_pan_node = mdss_spi_find_panel_of_node(pdev, panel_cfg);
+	if (!spi_pan_node) {
+		pr_err("%s: can't find panel node %s\n", __func__, panel_cfg);
+		goto error_pan_node;
+	}
+
+	cmd_cfg_cont_splash = true;
+
+	rc = mdss_spi_panel_init(spi_pan_node, ctrl_pdata, cmd_cfg_cont_splash);
+	if (rc) {
+		pr_err("%s: spi panel init failed\n", __func__);
+		goto error_pan_node;
+	}
+
+	rc = spi_panel_device_register(spi_pan_node, ctrl_pdata);
+	if (rc) {
+		pr_err("%s: spi panel dev reg failed\n", __func__);
+		goto error_pan_node;
+	}
+
+	ctrl_pdata->panel_data.event_handler = mdss_spi_panel_event_handler;
+
+
+	init_completion(&ctrl_pdata->spi_panel_te);
+	mutex_init(&ctrl_pdata->spi_tx_mutex);
+
+	rc = devm_request_irq(&pdev->dev,
+		gpio_to_irq(ctrl_pdata->disp_te_gpio),
+		spi_panel_te_handler, IRQF_TRIGGER_RISING,
+		"TE_GPIO", ctrl_pdata);
+	if (rc) {
+		pr_err("TE request_irq failed.\n");
+		return rc;
+	}
+
+	pr_debug("%s: spi panel  initialized\n", __func__);
+	return 0;
+
+error_pan_node:
+	of_node_put(spi_pan_node);
+error_vreg:
+	mdss_spi_put_dt_vreg_data(&pdev->dev,
+			&ctrl_pdata->panel_power_data);
+error_no_mem:
+	devm_kfree(&pdev->dev, ctrl_pdata);
+	return rc;
+}
+
+
+static const struct of_device_id mdss_spi_panel_match[] = {
+	{ .compatible = "qcom,mdss-spi-display" },
+	{},
+};
+
+static struct platform_driver this_driver = {
+	.probe = mdss_spi_panel_probe,
+	.driver = {
+		.name = "spi_panel",
+		.owner  = THIS_MODULE,
+		.of_match_table = mdss_spi_panel_match,
+	},
+};
+
+static int __init mdss_spi_display_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&this_driver);
+	return 0;
+}
+module_init(mdss_spi_display_init);
+
+MODULE_DEVICE_TABLE(of, mdss_spi_panel_match);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/msm/mdss_spi_panel.h b/drivers/video/fbdev/msm/mdss_spi_panel.h
new file mode 100644
index 0000000..80b7ea8
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_spi_panel.h
@@ -0,0 +1,148 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_SPI_PANEL_H__
+#define __MDSS_SPI_PANEL_H__
+
+#if defined(CONFIG_FB_MSM_MDSS_SPI_PANEL) && defined(CONFIG_SPI_QUP)
+#include <linux/list.h>
+#include <linux/mdss_io_util.h>
+#include <linux/irqreturn.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/gpio.h>
+
+#include "mdss_panel.h"
+#include "mdp3_dma.h"
+
+#define MDSS_MAX_BL_BRIGHTNESS 255
+
+#define MDSS_SPI_RST_SEQ_LEN	10
+
+#define NONE_PANEL "none"
+
+#define CTRL_STATE_UNKNOWN		0x00
+#define CTRL_STATE_PANEL_INIT		BIT(0)
+#define CTRL_STATE_MDP_ACTIVE		BIT(1)
+
+#define MDSS_PINCTRL_STATE_DEFAULT "mdss_default"
+#define MDSS_PINCTRL_STATE_SLEEP  "mdss_sleep"
+#define SPI_PANEL_TE_TIMEOUT	400
+
+enum spi_panel_data_type {
+	panel_cmd,
+	panel_parameter,
+	panel_pixel,
+	UNKNOWN_FORMAT,
+};
+
+enum spi_panel_bl_ctrl {
+	SPI_BL_PWM,
+	SPI_BL_WLED,
+	SPI_BL_DCS_CMD,
+	SPI_UNKNOWN_CTRL,
+};
+
+struct spi_pinctrl_res {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+};
+#define SPI_PANEL_DST_FORMAT_RGB565		0
+
+struct spi_ctrl_hdr {
+	char wait;	/* ms */
+	char dlen;	/* 8 bits */
+};
+
+struct spi_cmd_desc {
+	struct spi_ctrl_hdr dchdr;
+	char *command;
+	char *parameter;
+};
+
+struct spi_panel_cmds {
+	char *buf;
+	int blen;
+	struct spi_cmd_desc *cmds;
+	int cmd_cnt;
+};
+
+enum spi_panel_status_mode {
+	SPI_ESD_REG,
+	SPI_SEND_PANEL_COMMAND,
+	SPI_ESD_MAX,
+};
+
+
+struct spi_panel_data {
+	struct mdss_panel_data panel_data;
+	struct mdss_util_intf *mdss_util;
+	struct spi_pinctrl_res pin_res;
+	struct mdss_module_power panel_power_data;
+	struct completion spi_panel_te;
+	struct mdp3_notification vsync_client;
+	unsigned int vsync_status;
+	int byte_pre_frame;
+	char *tx_buf;
+	u8 ctrl_state;
+	int disp_te_gpio;
+	int rst_gpio;
+	int disp_dc_gpio;	/* command or data */
+	struct spi_panel_cmds on_cmds;
+	struct spi_panel_cmds off_cmds;
+	bool (*check_status)(struct spi_panel_data *pdata);
+	int (*on)(struct mdss_panel_data *pdata);
+	int (*off)(struct mdss_panel_data *pdata);
+	struct mutex spi_tx_mutex;
+	struct pwm_device *pwm_bl;
+	int bklt_ctrl;	/* backlight ctrl */
+	bool pwm_pmi;
+	int pwm_period;
+	int pwm_pmic_gpio;
+	int pwm_lpg_chan;
+	int pwm_enabled;
+	int bklt_max;
+	int status_mode;
+	u32 status_cmds_rlen;
+	u8 panel_status_reg;
+	u8 *exp_status_value;
+	u8 *act_status_value;
+	unsigned char *return_buf;
+};
+
+int mdss_spi_panel_kickoff(struct mdss_panel_data *pdata,
+				char *buf, int len, int stride);
+int is_spi_panel_continuous_splash_on(struct mdss_panel_data *pdata);
+void mdp3_spi_vsync_enable(struct mdss_panel_data *pdata,
+				struct mdp3_notification *vsync_client);
+void mdp3_check_spi_panel_status(struct work_struct *work,
+				uint32_t interval);
+
+#else
+static inline int mdss_spi_panel_kickoff(struct mdss_panel_data *pdata,
+				char *buf, int len, int stride){
+	return 0;
+}
+static inline int is_spi_panel_continuous_splash_on(
+				struct mdss_panel_data *pdata)
+{
+	return 0;
+}
+static inline int mdp3_spi_vsync_enable(struct mdss_panel_data *pdata,
+			struct mdp3_notification *vsync_client){
+	return 0;
+}
+
+#endif/* End of CONFIG_FB_MSM_MDSS_SPI_PANEL && ONFIG_SPI_QUP */
+
+#endif /* End of __MDSS_SPI_PANEL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_util.c b/drivers/video/fbdev/msm/mdss_util.c
index 30fcf28..a5fb8b6 100644
--- a/drivers/video/fbdev/msm/mdss_util.c
+++ b/drivers/video/fbdev/msm/mdss_util.c
@@ -23,6 +23,7 @@
 {
 	unsigned long irq_flags;
 	u32 ndx_bit;
+	bool err = false;
 
 	if (!hw || hw->hw_ndx >= MDSS_MAX_HW_BLK)
 		return -EINVAL;
@@ -33,10 +34,12 @@
 	if (!mdss_irq_handlers[hw->hw_ndx])
 		mdss_irq_handlers[hw->hw_ndx] = hw;
 	else
-		pr_err("panel %d's irq at %pK is already registered\n",
-			hw->hw_ndx, hw->irq_handler);
+		err = true;
 	spin_unlock_irqrestore(&mdss_lock, irq_flags);
 
+	if (err)
+		pr_err("panel %d's irq at %pK is already registered\n",
+			hw->hw_ndx, hw->irq_handler);
 	return 0;
 }
 
@@ -76,6 +79,7 @@
 {
 	unsigned long irq_flags;
 	u32 ndx_bit;
+	bool err = false;
 
 	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
 		return;
@@ -87,7 +91,7 @@
 
 	spin_lock_irqsave(&mdss_lock, irq_flags);
 	if (!(hw->irq_info->irq_mask & ndx_bit)) {
-		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+		err = true;
 	} else {
 		hw->irq_info->irq_mask &= ~ndx_bit;
 		if (hw->irq_info->irq_mask == 0) {
@@ -96,12 +100,16 @@
 		}
 	}
 	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+
+	if (err)
+		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
 }
 
 /* called from interrupt context */
 void mdss_disable_irq_nosync(struct mdss_hw *hw)
 {
 	u32 ndx_bit;
+	bool err = false;
 
 	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
 		return;
@@ -113,7 +121,7 @@
 
 	spin_lock(&mdss_lock);
 	if (!(hw->irq_info->irq_mask & ndx_bit)) {
-		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+		err = true;
 	} else {
 		hw->irq_info->irq_mask &= ~ndx_bit;
 		if (hw->irq_info->irq_mask == 0) {
@@ -122,6 +130,9 @@
 		}
 	}
 	spin_unlock(&mdss_lock);
+
+	if (err)
+		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
 }
 
 int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
@@ -176,6 +187,7 @@
 {
 	unsigned long irq_flags;
 	u32 ndx_bit;
+	bool err = false;
 
 	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
 		return;
@@ -188,7 +200,7 @@
 
 	spin_lock_irqsave(&mdss_lock, irq_flags);
 	if (!(hw->irq_info->irq_wake_mask & ndx_bit)) {
-		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+		err = true;
 	} else {
 		hw->irq_info->irq_wake_mask &= ~ndx_bit;
 		if (hw->irq_info->irq_wake_ena) {
@@ -197,6 +209,9 @@
 		}
 	}
 	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+
+	if (err)
+		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
 }
 
 static bool check_display(char *param_string)
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
index 153b39f..9ccd428 100644
--- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -417,7 +417,7 @@
 	wmb(); /* make sure phy timings are updated*/
 }
 
-static void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl)
+void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl)
 {
 	/* start phy sw reset */
 	MIPI_OUTP(ctrl->ctrl_base + 0x12c, 0x0001);
@@ -519,8 +519,7 @@
 	 * is only done from the clock master. This will ensure that the PLL is
 	 * off when PHY reset is called.
 	 */
-	if (mdss_dsi_is_ctrl_clk_slave(ctrl) ||
-		(ctrl->shared_data->phy_rev == DSI_PHY_REV_12NM))
+	if (mdss_dsi_is_ctrl_clk_slave(ctrl))
 		return;
 
 	mdss_dsi_phy_sw_reset_sub(ctrl);
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index a350209..31c301d 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -121,7 +121,7 @@
 		unsigned char __user *ured;
 		unsigned char __user *ugreen;
 		unsigned char __user *ublue;
-		int index, count, i;
+		unsigned int index, count, i;
 
 		if (get_user(index, &c->index) ||
 		    __get_user(count, &c->count) ||
@@ -160,7 +160,7 @@
 		unsigned char __user *ugreen;
 		unsigned char __user *ublue;
 		struct fb_cmap *cmap = &info->cmap;
-		int index, count, i;
+		unsigned int index, count, i;
 		u8 red, green, blue;
 
 		if (get_user(index, &c->index) ||
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index a042329..026df9a 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -241,8 +241,23 @@
  */
 static int vfb_set_par(struct fb_info *info)
 {
+	switch (info->var.bits_per_pixel) {
+	case 1:
+		info->fix.visual = FB_VISUAL_MONO01;
+		break;
+	case 8:
+		info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+		break;
+	case 16:
+	case 24:
+	case 32:
+		info->fix.visual = FB_VISUAL_TRUECOLOR;
+		break;
+	}
+
 	info->fix.line_length = get_line_length(info->var.xres_virtual,
 						info->var.bits_per_pixel);
+
 	return 0;
 }
 
@@ -454,6 +469,8 @@
 		goto err2;
 	platform_set_drvdata(dev, info);
 
+	vfb_set_par(info);
+
 	fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
 		videomemorysize >> 10);
 	return 0;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 3eb58cb..8f8909a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -799,11 +799,12 @@
 	  the timeout module parameter.
 
 config F71808E_WDT
-	tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog"
+	tristate "Fintek F718xx, F818xx Super I/O Watchdog"
 	depends on X86
 	help
-	  This is the driver for the hardware watchdog on the Fintek
-	  F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers.
+	  This is the driver for the hardware watchdog on the Fintek F71808E,
+	  F71862FG, F71868, F71869, F71882FG, F71889FG, F81865 and F81866
+	  Super I/O controllers.
 
 	  You can compile this driver directly into the kernel, or use
 	  it as a module.  The module will be called f71808e_wdt.
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 1b7e916..88cd2a5 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -57,6 +57,7 @@
 #define SIO_F71808_ID		0x0901	/* Chipset ID */
 #define SIO_F71858_ID		0x0507	/* Chipset ID */
 #define SIO_F71862_ID		0x0601	/* Chipset ID */
+#define SIO_F71868_ID		0x1106	/* Chipset ID */
 #define SIO_F71869_ID		0x0814	/* Chipset ID */
 #define SIO_F71869A_ID		0x1007	/* Chipset ID */
 #define SIO_F71882_ID		0x0541	/* Chipset ID */
@@ -101,7 +102,7 @@
 static unsigned int pulse_width = WATCHDOG_PULSE_WIDTH;
 module_param(pulse_width, uint, 0);
 MODULE_PARM_DESC(pulse_width,
-	"Watchdog signal pulse width. 0(=level), 1 ms, 25 ms, 125 ms or 5000 ms"
+	"Watchdog signal pulse width. 0(=level), 1, 25, 30, 125, 150, 5000 or 6000 ms"
 			" (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")");
 
 static unsigned int f71862fg_pin = WATCHDOG_F71862FG_PIN;
@@ -119,13 +120,14 @@
 MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
 	" given initial timeout. Zero (default) disables this feature.");
 
-enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865,
-	     f81866};
+enum chips { f71808fg, f71858fg, f71862fg, f71868, f71869, f71882fg, f71889fg,
+	     f81865, f81866};
 
 static const char *f71808e_names[] = {
 	"f71808fg",
 	"f71858fg",
 	"f71862fg",
+	"f71868",
 	"f71869",
 	"f71882fg",
 	"f71889fg",
@@ -252,16 +254,23 @@
 static int watchdog_set_pulse_width(unsigned int pw)
 {
 	int err = 0;
+	unsigned int t1 = 25, t2 = 125, t3 = 5000;
+
+	if (watchdog.type == f71868) {
+		t1 = 30;
+		t2 = 150;
+		t3 = 6000;
+	}
 
 	mutex_lock(&watchdog.lock);
 
-	if        (pw <=    1) {
+	if        (pw <=  1) {
 		watchdog.pulse_val = 0;
-	} else if (pw <=   25) {
+	} else if (pw <= t1) {
 		watchdog.pulse_val = 1;
-	} else if (pw <=  125) {
+	} else if (pw <= t2) {
 		watchdog.pulse_val = 2;
-	} else if (pw <= 5000) {
+	} else if (pw <= t3) {
 		watchdog.pulse_val = 3;
 	} else {
 		pr_err("pulse width out of range\n");
@@ -354,6 +363,7 @@
 			goto exit_superio;
 		break;
 
+	case f71868:
 	case f71869:
 		/* GPIO14 --> WDTRST# */
 		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 4);
@@ -486,7 +496,7 @@
 
 	is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
 		&& (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
-			& F71808FG_FLAG_WD_EN);
+			& BIT(F71808FG_FLAG_WD_EN));
 
 	superio_exit(watchdog.sioaddr);
 
@@ -556,7 +566,8 @@
 				char c;
 				if (get_user(c, buf + i))
 					return -EFAULT;
-				expect_close = (c == 'V');
+				if (c == 'V')
+					expect_close = true;
 			}
 
 			/* Properly order writes across fork()ed processes */
@@ -792,6 +803,9 @@
 		watchdog.type = f71862fg;
 		err = f71862fg_pin_configure(0); /* validate module parameter */
 		break;
+	case SIO_F71868_ID:
+		watchdog.type = f71868;
+		break;
 	case SIO_F71869_ID:
 	case SIO_F71869A_ID:
 		watchdog.type = f71869;
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index ce0c38b..37523f1 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -50,6 +50,7 @@
  */
 
 #include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -159,7 +160,7 @@
 	    !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
 		timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
 
-	timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) -
+	timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
 		    arch_counter_get_cntvct();
 
 	do_div(timeleft, gwdt->clk);
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index 2b28c00..dfe20b8 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -54,7 +54,7 @@
 #define SB800_PM_WATCHDOG_CONFIG	0x4C
 
 #define SB800_PCI_WATCHDOG_DECODE_EN	(1 << 0)
-#define SB800_PM_WATCHDOG_DISABLE	(1 << 2)
+#define SB800_PM_WATCHDOG_DISABLE	(1 << 1)
 #define SB800_PM_WATCHDOG_SECOND_RES	(3 << 0)
 #define SB800_ACPI_MMIO_DECODE_EN	(1 << 0)
 #define SB800_ACPI_MMIO_SEL		(1 << 1)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index d5dbdb9..6d3b32c 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -764,8 +764,8 @@
 	mutex_unlock(&irq_mapping_update_lock);
 	return irq;
 error_irq:
-	for (; i >= 0; i--)
-		__unbind_from_irq(irq + i);
+	while (nvec--)
+		__unbind_from_irq(irq + nvec);
 	mutex_unlock(&irq_mapping_update_lock);
 	return ret;
 }
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index bb36b1e..775d419 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -327,7 +327,7 @@
 			if (entry->page) {
 				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
 					 entry->ref, page_to_pfn(entry->page));
-				__free_page(entry->page);
+				put_page(entry->page);
 			} else
 				pr_info("freeing g.e. %#x\n", entry->ref);
 			kfree(entry);
@@ -383,7 +383,7 @@
 	if (gnttab_end_foreign_access_ref(ref, readonly)) {
 		put_free_entry(ref);
 		if (page != 0)
-			free_page(page);
+			put_page(virt_to_page(page));
 	} else
 		gnttab_add_deferred(ref, readonly,
 				    page ? virt_to_page(page) : NULL);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index b68ced5..2fe7353 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -359,7 +359,7 @@
 	 * physical address */
 	phys = xen_bus_to_phys(dev_addr);
 
-	if (((dev_addr + size - 1 > dma_mask)) ||
+	if (((dev_addr + size - 1 <= dma_mask)) ||
 	    range_straddles_page_boundary(phys, size))
 		xen_destroy_contiguous_region(phys, order);
 
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 4b85746..7ff9d25 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -362,9 +362,9 @@
 	}
 	/* There are more ACPI Processor objects than in x2APIC or MADT.
 	 * This can happen with incorrect ACPI SSDT declerations. */
-	if (acpi_id > nr_acpi_bits) {
-		pr_debug("We only have %u, trying to set %u\n",
-			 nr_acpi_bits, acpi_id);
+	if (acpi_id >= nr_acpi_bits) {
+		pr_debug("max acpi id %u, trying to set %u\n",
+			 nr_acpi_bits - 1, acpi_id);
 		return AE_OK;
 	}
 	/* OK, There is a ACPI Processor object */
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 33a31cf..c2d4476 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -470,8 +470,11 @@
 
 	/* Register with generic device framework. */
 	err = device_register(&xendev->dev);
-	if (err)
+	if (err) {
+		put_device(&xendev->dev);
+		xendev = NULL;
 		goto fail;
+	}
 
 	return 0;
 fail:
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index d295d98..8ec7938 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -16,6 +16,7 @@
 #include <linux/bitops.h>
 #include <linux/string.h>
 #include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
 #include <linux/slab.h>
 
 #include <asm/byteorder.h>
@@ -185,6 +186,17 @@
 		z->dev.parent = &bus->dev;
 		z->dev.bus = &zorro_bus_type;
 		z->dev.id = i;
+		switch (z->rom.er_Type & ERT_TYPEMASK) {
+		case ERT_ZORROIII:
+			z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+			break;
+
+		case ERT_ZORROII:
+		default:
+			z->dev.coherent_dma_mask = DMA_BIT_MASK(24);
+			break;
+		}
+		z->dev.dma_mask = &z->dev.coherent_dma_mask;
 	}
 
 	/* ... then register them */
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 29186d2..2d4d495 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -224,9 +224,10 @@
 
 	affs_lock_dir(dir);
 	bh = affs_find_entry(dir, dentry);
-	affs_unlock_dir(dir);
-	if (IS_ERR(bh))
+	if (IS_ERR(bh)) {
+		affs_unlock_dir(dir);
 		return ERR_CAST(bh);
+	}
 	if (bh) {
 		u32 ino = bh->b_blocknr;
 
@@ -240,10 +241,13 @@
 		}
 		affs_brelse(bh);
 		inode = affs_iget(sb, ino);
-		if (IS_ERR(inode))
+		if (IS_ERR(inode)) {
+			affs_unlock_dir(dir);
 			return ERR_CAST(inode);
+		}
 	}
 	d_add(dentry, inode);
+	affs_unlock_dir(dir);
 	return NULL;
 }
 
diff --git a/fs/aio.c b/fs/aio.c
index 0606f03..42d8c09 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1074,8 +1074,8 @@
 
 	ctx = rcu_dereference(table->table[id]);
 	if (ctx && ctx->user_id == ctx_id) {
-		percpu_ref_get(&ctx->users);
-		ret = ctx;
+		if (percpu_ref_tryget_live(&ctx->users))
+			ret = ctx;
 	}
 out:
 	rcu_read_unlock();
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index a11f731..6182d69 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -746,7 +746,7 @@
 
 	autofs4_del_active(dentry);
 
-	inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
+	inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
 	if (!inode)
 		return -ENOMEM;
 	d_add(dentry, inode);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index f6ba165..c94d339 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2486,10 +2486,8 @@
 	if (p->reada != READA_NONE)
 		reada_for_search(root, p, level, slot, key->objectid);
 
-	btrfs_release_path(p);
-
 	ret = -EAGAIN;
-	tmp = read_tree_block(root, blocknr, 0);
+	tmp = read_tree_block(root, blocknr, gen);
 	if (!IS_ERR(tmp)) {
 		/*
 		 * If the read above didn't mark this buffer up to date,
@@ -2503,6 +2501,8 @@
 	} else {
 		ret = PTR_ERR(tmp);
 	}
+
+	btrfs_release_path(p);
 	return ret;
 }
 
@@ -2760,6 +2760,8 @@
 		 * contention with the cow code
 		 */
 		if (cow) {
+			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
+
 			/*
 			 * if we don't really need to cow this block
 			 * then we don't want to set the path blocking,
@@ -2784,9 +2786,13 @@
 			}
 
 			btrfs_set_path_blocking(p);
-			err = btrfs_cow_block(trans, root, b,
-					      p->nodes[level + 1],
-					      p->slots[level + 1], &b);
+			if (last_level)
+				err = btrfs_cow_block(trans, root, b, NULL, 0,
+						      &b);
+			else
+				err = btrfs_cow_block(trans, root, b,
+						      p->nodes[level + 1],
+						      p->slots[level + 1], &b);
 			if (err) {
 				ret = err;
 				goto done;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index c66054c..9557a31 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1281,7 +1281,7 @@
 	if (!writers)
 		return ERR_PTR(-ENOMEM);
 
-	ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
+	ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
 	if (ret < 0) {
 		kfree(writers);
 		return ERR_PTR(ret);
@@ -4142,9 +4142,11 @@
 		btrfs_err(fs_info, "no valid FS found");
 		ret = -EINVAL;
 	}
-	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
-		btrfs_warn(fs_info, "unrecognized super flag: %llu",
+	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
+		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
+		ret = -EINVAL;
+	}
 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a29730c..44a4385 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4527,6 +4527,7 @@
 	if (wait_for_alloc) {
 		mutex_unlock(&fs_info->chunk_mutex);
 		wait_for_alloc = 0;
+		cond_resched();
 		goto again;
 	}
 
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8ed05d9..03ac3ab 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2453,7 +2453,7 @@
 	if (!uptodate) {
 		ClearPageUptodate(page);
 		SetPageError(page);
-		ret = ret < 0 ? ret : -EIO;
+		ret = err < 0 ? err : -EIO;
 		mapping_set_error(page->mapping, ret);
 	}
 }
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c95ff09..4375448 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1912,10 +1912,19 @@
 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
 {
 	int ret;
+	struct blk_plug plug;
 
+	/*
+	 * This is only called in fsync, which would do synchronous writes, so
+	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
+	 * multiple disks using raid profile, a large IO can be split to
+	 * several segments of stripe length (currently 64K).
+	 */
+	blk_start_plug(&plug);
 	atomic_inc(&BTRFS_I(inode)->sync_writers);
 	ret = btrfs_fdatawrite_range(inode, start, end);
 	atomic_dec(&BTRFS_I(inode)->sync_writers);
+	blk_finish_plug(&plug);
 
 	return ret;
 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ffd5831..f073de6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6491,8 +6491,7 @@
 		goto out_unlock_inode;
 	} else {
 		btrfs_update_inode(trans, root, inode);
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
+		d_instantiate_new(dentry, inode);
 	}
 
 out_unlock:
@@ -6567,8 +6566,7 @@
 		goto out_unlock_inode;
 
 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 
 out_unlock:
 	btrfs_end_transaction(trans, root);
@@ -6711,12 +6709,7 @@
 	if (err)
 		goto out_fail_inode;
 
-	d_instantiate(dentry, inode);
-	/*
-	 * mkdir is special.  We're unlocking after we call d_instantiate
-	 * to avoid a race with nfsd calling d_instantiate.
-	 */
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 	drop_on_err = 0;
 
 out_fail:
@@ -10354,8 +10347,7 @@
 		goto out_unlock_inode;
 	}
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 
 out_unlock:
 	btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index d016d4a..af6a776 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2161,11 +2161,21 @@
 	}
 
 	/*
-	 * reconstruct from the q stripe if they are
-	 * asking for mirror 3
+	 * Loop retry:
+	 * for 'mirror == 2', reconstruct from all other stripes.
+	 * for 'mirror_num > 2', select a stripe to fail on every retry.
 	 */
-	if (mirror_num == 3)
-		rbio->failb = rbio->real_stripes - 2;
+	if (mirror_num > 2) {
+		/*
+		 * 'mirror == 3' is to fail the p stripe and
+		 * reconstruct from the q stripe.  'mirror > 3' is to
+		 * fail a data stripe and reconstruct from p+q stripe.
+		 */
+		rbio->failb = rbio->real_stripes - (mirror_num - 1);
+		ASSERT(rbio->failb > 0);
+		if (rbio->failb <= rbio->faila)
+			rbio->failb--;
+	}
 
 	ret = lock_stripe_add(rbio);
 
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index d040afc..c8d2eec 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4822,6 +4822,9 @@
 	u64 len;
 	int ret = 0;
 
+	if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
+		return send_update_extent(sctx, offset, end - offset);
+
 	p = fs_path_alloc();
 	if (!p)
 		return -ENOMEM;
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index ca7cb5e..9c66666 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -63,7 +63,7 @@
 	btrfs_set_extent_generation(leaf, item, 1);
 	btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK);
 	block_info = (struct btrfs_tree_block_info *)(item + 1);
-	btrfs_set_tree_block_level(leaf, block_info, 1);
+	btrfs_set_tree_block_level(leaf, block_info, 0);
 	iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
 	if (parent > 0) {
 		btrfs_set_extent_inline_ref_type(leaf, iref,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 5539f0b..44d3492 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2241,8 +2241,10 @@
 			nritems = btrfs_header_nritems(path->nodes[0]);
 			if (path->slots[0] >= nritems) {
 				ret = btrfs_next_leaf(root, path);
-				if (ret)
+				if (ret == 1)
 					break;
+				else if (ret < 0)
+					goto out;
 			}
 			btrfs_item_key_to_cpu(path->nodes[0], &found_key,
 					      path->slots[0]);
@@ -3397,8 +3399,11 @@
 		 * from this directory and from this transaction
 		 */
 		ret = btrfs_next_leaf(root, path);
-		if (ret == 1) {
-			last_offset = (u64)-1;
+		if (ret) {
+			if (ret == 1)
+				last_offset = (u64)-1;
+			else
+				err = ret;
 			goto done;
 		}
 		btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
@@ -3664,7 +3669,7 @@
 
 		src_offset = btrfs_item_ptr_offset(src, start_slot + i);
 
-		if ((i == (nr - 1)))
+		if (i == nr - 1)
 			last_key = ins_keys[i];
 
 		if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
@@ -3849,6 +3854,7 @@
 			ASSERT(ret == 0);
 			src = src_path->nodes[0];
 			i = 0;
+			need_find_last_extent = true;
 		}
 
 		btrfs_item_key_to_cpu(src, &key, i);
@@ -4614,6 +4620,7 @@
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 	u64 logged_isize = 0;
 	bool need_log_inode_item = true;
+	bool xattrs_logged = false;
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -4918,6 +4925,7 @@
 	err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
 	if (err)
 		goto out_unlock;
+	xattrs_logged = true;
 	if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
 		btrfs_release_path(path);
 		btrfs_release_path(dst_path);
@@ -4930,6 +4938,11 @@
 	btrfs_release_path(dst_path);
 	if (need_log_inode_item) {
 		err = log_inode_item(trans, log, dst_path, inode);
+		if (!err && !xattrs_logged) {
+			err = btrfs_log_all_xattrs(trans, root, inode, path,
+						   dst_path);
+			btrfs_release_path(path);
+		}
 		if (err)
 			goto out_unlock;
 	}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 39b917b7..64ab90d 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3966,6 +3966,15 @@
 		return 0;
 	}
 
+	/*
+	 * A ro->rw remount sequence should continue with the paused balance
+	 * regardless of who pauses it, system or the user as of now, so set
+	 * the resume flag.
+	 */
+	spin_lock(&fs_info->balance_lock);
+	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
+	spin_unlock(&fs_info->balance_lock);
+
 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
 	return PTR_ERR_OR_ZERO(tsk);
 }
@@ -5177,7 +5186,14 @@
 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
 		ret = 2;
 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
-		ret = 3;
+		/*
+		 * There could be two corrupted data stripes, we need
+		 * to loop retry in order to rebuild the correct data.
+		 *
+		 * Fail a stripe at a time on every retry except the
+		 * stripe under reconstruction.
+		 */
+		ret = map->num_stripes;
 	else
 		ret = 1;
 	free_extent_map(em);
diff --git a/fs/buffer.c b/fs/buffer.c
index 5d8f496..3a8064d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1455,12 +1455,48 @@
 	return 0;
 }
 
+static void __evict_bh_lru(void *arg)
+{
+	struct bh_lru *b = &get_cpu_var(bh_lrus);
+	struct buffer_head *bh = arg;
+	int i;
+
+	for (i = 0; i < BH_LRU_SIZE; i++) {
+		if (b->bhs[i] == bh) {
+			brelse(b->bhs[i]);
+			b->bhs[i] = NULL;
+			goto out;
+		}
+	}
+out:
+	put_cpu_var(bh_lrus);
+}
+
+static bool bh_exists_in_lru(int cpu, void *arg)
+{
+	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
+	struct buffer_head *bh = arg;
+	int i;
+
+	for (i = 0; i < BH_LRU_SIZE; i++) {
+		if (b->bhs[i] == bh)
+			return 1;
+	}
+
+	return 0;
+
+}
 void invalidate_bh_lrus(void)
 {
 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
+static void evict_bh_lrus(struct buffer_head *bh)
+{
+	on_each_cpu_cond(bh_exists_in_lru, __evict_bh_lru, bh, 1, GFP_ATOMIC);
+}
+
 void set_bh_page(struct buffer_head *bh,
 		struct page *page, unsigned long offset)
 {
@@ -3250,8 +3286,15 @@
 	do {
 		if (buffer_write_io_error(bh) && page->mapping)
 			mapping_set_error(page->mapping, -EIO);
-		if (buffer_busy(bh))
-			goto failed;
+		if (buffer_busy(bh)) {
+			/*
+			 * Check if the busy failure was due to an
+			 * outstanding LRU reference
+			 */
+			evict_bh_lrus(bh);
+			if (buffer_busy(bh))
+				goto failed;
+		}
 		bh = bh->b_this_page;
 	} while (bh != head);
 
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ca3f630..e7ddb23 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -598,7 +598,8 @@
 struct ceph_aio_request {
 	struct kiocb *iocb;
 	size_t total_len;
-	int write;
+	bool write;
+	bool should_dirty;
 	int error;
 	struct list_head osd_reqs;
 	unsigned num_reqs;
@@ -708,7 +709,7 @@
 		}
 	}
 
-	ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write);
+	ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty);
 	ceph_osdc_put_request(req);
 
 	if (rc < 0)
@@ -890,6 +891,7 @@
 	size_t count = iov_iter_count(iter);
 	loff_t pos = iocb->ki_pos;
 	bool write = iov_iter_rw(iter) == WRITE;
+	bool should_dirty = !write && iter_is_iovec(iter);
 
 	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
 		return -EROFS;
@@ -954,6 +956,7 @@
 			if (aio_req) {
 				aio_req->iocb = iocb;
 				aio_req->write = write;
+				aio_req->should_dirty = should_dirty;
 				INIT_LIST_HEAD(&aio_req->osd_reqs);
 				if (write) {
 					aio_req->mtime = mtime;
@@ -1012,7 +1015,7 @@
 				len = ret;
 		}
 
-		ceph_put_page_vector(pages, num_pages, !write);
+		ceph_put_page_vector(pages, num_pages, should_dirty);
 
 		ceph_osdc_put_request(req);
 		if (ret < 0)
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index b382e59..2a89030 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -816,7 +816,6 @@
 	int err;
 	unsigned long started = jiffies;  /* note the start time */
 	struct dentry *root;
-	int first = 0;   /* first vfsmount for this super_block */
 
 	dout("mount start %p\n", fsc);
 	mutex_lock(&fsc->client->mount_mutex);
@@ -834,17 +833,17 @@
 			path = fsc->mount_options->server_path + 1;
 			dout("mount opening path %s\n", path);
 		}
+
+		err = ceph_fs_debugfs_init(fsc);
+		if (err < 0)
+			goto out;
+
 		root = open_root_dentry(fsc, path, started);
 		if (IS_ERR(root)) {
 			err = PTR_ERR(root);
 			goto out;
 		}
 		fsc->sb->s_root = dget(root);
-		first = 1;
-
-		err = ceph_fs_debugfs_init(fsc);
-		if (err < 0)
-			goto fail;
 	} else {
 		root = dget(fsc->sb->s_root);
 	}
@@ -854,11 +853,6 @@
 	mutex_unlock(&fsc->client->mount_mutex);
 	return root;
 
-fail:
-	if (first) {
-		dput(fsc->sb->s_root);
-		fsc->sb->s_root = NULL;
-	}
 out:
 	mutex_unlock(&fsc->client->mount_mutex);
 	return ERR_PTR(err);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 7b496a4..4ed4736 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1412,6 +1412,7 @@
 #define CIFS_FATTR_NEED_REVAL		0x4
 #define CIFS_FATTR_INO_COLLISION	0x8
 #define CIFS_FATTR_UNKNOWN_NLINK	0x10
+#define CIFS_FATTR_FAKE_ROOT_INO	0x20
 
 struct cifs_fattr {
 	u32		cf_flags;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index cc420d6..d572228 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -6413,9 +6413,7 @@
 	pSMB->InformationLevel =
 		cpu_to_le16(SMB_SET_FILE_EA);
 
-	parm_data =
-		(struct fealist *) (((char *) &pSMB->hdr.Protocol) +
-				       offset);
+	parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset;
 	pSMB->ParameterOffset = cpu_to_le16(param_offset);
 	pSMB->DataOffset = cpu_to_le16(offset);
 	pSMB->SetupCount = 1;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d9cbda2..331ddd0 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -673,6 +673,9 @@
 		goto mknod_out;
 	}
 
+	if (!S_ISCHR(mode) && !S_ISBLK(mode))
+		goto mknod_out;
+
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
 		goto mknod_out;
 
@@ -681,10 +684,8 @@
 
 	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
 	if (buf == NULL) {
-		kfree(full_path);
 		rc = -ENOMEM;
-		free_xid(xid);
-		return rc;
+		goto mknod_out;
 	}
 
 	if (backup_cred(cifs_sb))
@@ -731,7 +732,7 @@
 		pdev->minor = cpu_to_le64(MINOR(device_number));
 		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
 							&bytes_written, iov, 1);
-	} /* else if (S_ISFIFO) */
+	}
 	tcon->ses->server->ops->close(xid, tcon, &fid);
 	d_drop(direntry);
 
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 02e403a..49eeed2 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -589,7 +589,7 @@
 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
 	int rc = 0;
 
-	down_read(&cinode->lock_sem);
+	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
 	if (cinode->can_cache_brlcks) {
 		/* can cache locks - no need to relock */
 		up_read(&cinode->lock_sem);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 7ab5be7..24c19eb 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -701,6 +701,18 @@
 	return rc;
 }
 
+/* Simple function to return a 64 bit hash of string.  Rarely called */
+static __u64 simple_hashstr(const char *str)
+{
+	const __u64 hash_mult =  1125899906842597L; /* a big enough prime */
+	__u64 hash = 0;
+
+	while (*str)
+		hash = (hash + (__u64) *str++) * hash_mult;
+
+	return hash;
+}
+
 int
 cifs_get_inode_info(struct inode **inode, const char *full_path,
 		    FILE_ALL_INFO *data, struct super_block *sb, int xid,
@@ -810,6 +822,14 @@
 						 tmprc);
 					fattr.cf_uniqueid = iunique(sb, ROOT_I);
 					cifs_autodisable_serverino(cifs_sb);
+				} else if ((fattr.cf_uniqueid == 0) &&
+						strlen(full_path) == 0) {
+					/* some servers ret bad root ino ie 0 */
+					cifs_dbg(FYI, "Invalid (0) inodenum\n");
+					fattr.cf_flags |=
+						CIFS_FATTR_FAKE_ROOT_INO;
+					fattr.cf_uniqueid =
+						simple_hashstr(tcon->treeName);
 				}
 			}
 		} else
@@ -826,6 +846,16 @@
 				&fattr.cf_uniqueid, data);
 			if (tmprc)
 				fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
+			else if ((fattr.cf_uniqueid == 0) &&
+					strlen(full_path) == 0) {
+				/*
+				 * Reuse existing root inode num since
+				 * inum zero for root causes ls of . and .. to
+				 * not be returned
+				 */
+				cifs_dbg(FYI, "Srv ret 0 inode num for root\n");
+				fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
+			}
 		} else
 			fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
 	}
@@ -887,6 +917,9 @@
 	}
 
 cgii_exit:
+	if ((*inode) && ((*inode)->i_ino == 0))
+		cifs_dbg(FYI, "inode number of zero returned\n");
+
 	kfree(buf);
 	cifs_put_tlink(tlink);
 	return rc;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 7c26286..44b7ccb 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1151,15 +1151,19 @@
 		goto tcon_exit;
 	}
 
-	if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
+	switch (rsp->ShareType) {
+	case SMB2_SHARE_TYPE_DISK:
 		cifs_dbg(FYI, "connection to disk share\n");
-	else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
+		break;
+	case SMB2_SHARE_TYPE_PIPE:
 		tcon->ipc = true;
 		cifs_dbg(FYI, "connection to pipe share\n");
-	} else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
-		tcon->print = true;
+		break;
+	case SMB2_SHARE_TYPE_PRINT:
+		tcon->ipc = true;
 		cifs_dbg(FYI, "connection to printer\n");
-	} else {
+		break;
+	default:
 		cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
 		rc = -EOPNOTSUPP;
 		goto tcon_error_exit;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index f2d7402..93c8e4a 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -833,7 +833,7 @@
  */
 #define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
 
-#define COMPATIBLE_IOCTL(cmd) XFORM(cmd),
+#define COMPATIBLE_IOCTL(cmd) XFORM((u32)cmd),
 /* ioctl should not be warned about even if it's not implemented.
    Valid reasons to use this:
    - It is implemented with ->compat_ioctl on some device, but programs
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index cb49698..cc42e5e 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,4 +1,7 @@
 obj-$(CONFIG_FS_ENCRYPTION)	+= fscrypto.o
 
-fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
+ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/f2fs
+
+fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o fscrypt_ice.o
 fscrypto-$(CONFIG_BLOCK) += bio.o
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 2596c9a..c629e97 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -25,42 +25,55 @@
 #include <linux/namei.h>
 #include "fscrypt_private.h"
 
-/*
- * Call fscrypt_decrypt_page on every single page, reusing the encryption
- * context.
- */
-static void completion_pages(struct work_struct *work)
+static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
 {
-	struct fscrypt_ctx *ctx =
-		container_of(work, struct fscrypt_ctx, r.work);
-	struct bio *bio = ctx->r.bio;
 	struct bio_vec *bv;
 	int i;
 
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
-		int ret = fscrypt_decrypt_page(page->mapping->host, page,
-				PAGE_SIZE, 0, page->index);
 
-		if (ret) {
-			WARN_ON_ONCE(1);
-			SetPageError(page);
-		} else {
+		if (fscrypt_using_hardware_encryption(page->mapping->host)) {
 			SetPageUptodate(page);
+		} else {
+			int ret = fscrypt_decrypt_page(page->mapping->host,
+				page, PAGE_SIZE, 0, page->index);
+			if (ret) {
+				WARN_ON_ONCE(1);
+				SetPageError(page);
+			} else if (done) {
+				SetPageUptodate(page);
+			}
 		}
-		unlock_page(page);
+		if (done)
+			unlock_page(page);
 	}
+}
+
+void fscrypt_decrypt_bio(struct bio *bio)
+{
+	__fscrypt_decrypt_bio(bio, false);
+}
+EXPORT_SYMBOL(fscrypt_decrypt_bio);
+
+static void completion_pages(struct work_struct *work)
+{
+	struct fscrypt_ctx *ctx =
+		container_of(work, struct fscrypt_ctx, r.work);
+	struct bio *bio = ctx->r.bio;
+
+	__fscrypt_decrypt_bio(bio, true);
 	fscrypt_release_ctx(ctx);
 	bio_put(bio);
 }
 
-void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
+void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
 {
 	INIT_WORK(&ctx->r.work, completion_pages);
 	ctx->r.bio = bio;
-	queue_work(fscrypt_read_workqueue, &ctx->r.work);
+	fscrypt_enqueue_decrypt_work(&ctx->r.work);
 }
-EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
+EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
 
 void fscrypt_pullback_bio_page(struct page **page, bool restore)
 {
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 732a786..0758d32 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -27,6 +27,7 @@
 #include <linux/dcache.h>
 #include <linux/namei.h>
 #include <crypto/aes.h>
+#include <crypto/skcipher.h>
 #include "fscrypt_private.h"
 
 static unsigned int num_prealloc_crypto_pages = 32;
@@ -44,12 +45,18 @@
 static LIST_HEAD(fscrypt_free_ctxs);
 static DEFINE_SPINLOCK(fscrypt_ctx_lock);
 
-struct workqueue_struct *fscrypt_read_workqueue;
+static struct workqueue_struct *fscrypt_read_workqueue;
 static DEFINE_MUTEX(fscrypt_init_mutex);
 
 static struct kmem_cache *fscrypt_ctx_cachep;
 struct kmem_cache *fscrypt_info_cachep;
 
+void fscrypt_enqueue_decrypt_work(struct work_struct *work)
+{
+	queue_work(fscrypt_read_workqueue, work);
+}
+EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
+
 /**
  * fscrypt_release_ctx() - Releases an encryption context
  * @ctx: The encryption context to release.
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 6eb4343..b18fa32 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -12,42 +12,46 @@
 
 #include <linux/scatterlist.h>
 #include <linux/ratelimit.h>
+#include <crypto/skcipher.h>
 #include "fscrypt_private.h"
 
+static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+{
+	if (str->len == 1 && str->name[0] == '.')
+		return true;
+
+	if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+		return true;
+
+	return false;
+}
+
 /**
  * fname_encrypt() - encrypt a filename
  *
- * The caller must have allocated sufficient memory for the @oname string.
+ * The output buffer must be at least as large as the input buffer.
+ * Any extra space is filled with NUL padding before encryption.
  *
  * Return: 0 on success, -errno on failure
  */
-static int fname_encrypt(struct inode *inode,
-			const struct qstr *iname, struct fscrypt_str *oname)
+int fname_encrypt(struct inode *inode, const struct qstr *iname,
+		  u8 *out, unsigned int olen)
 {
 	struct skcipher_request *req = NULL;
 	DECLARE_CRYPTO_WAIT(wait);
-	struct fscrypt_info *ci = inode->i_crypt_info;
-	struct crypto_skcipher *tfm = ci->ci_ctfm;
+	struct crypto_skcipher *tfm = inode->i_crypt_info->ci_ctfm;
 	int res = 0;
 	char iv[FS_CRYPTO_BLOCK_SIZE];
 	struct scatterlist sg;
-	int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
-	unsigned int lim;
-	unsigned int cryptlen;
-
-	lim = inode->i_sb->s_cop->max_namelen(inode);
-	if (iname->len <= 0 || iname->len > lim)
-		return -EIO;
 
 	/*
 	 * Copy the filename to the output buffer for encrypting in-place and
 	 * pad it with the needed number of NUL bytes.
 	 */
-	cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE);
-	cryptlen = round_up(cryptlen, padding);
-	cryptlen = min(cryptlen, lim);
-	memcpy(oname->name, iname->name, iname->len);
-	memset(oname->name + iname->len, 0, cryptlen - iname->len);
+	if (WARN_ON(olen < iname->len))
+		return -ENOBUFS;
+	memcpy(out, iname->name, iname->len);
+	memset(out + iname->len, 0, olen - iname->len);
 
 	/* Initialize the IV */
 	memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
@@ -62,8 +66,8 @@
 	skcipher_request_set_callback(req,
 			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
 			crypto_req_done, &wait);
-	sg_init_one(&sg, oname->name, cryptlen);
-	skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
+	sg_init_one(&sg, out, olen);
+	skcipher_request_set_crypt(req, &sg, &sg, olen, iv);
 
 	/* Do the encryption */
 	res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
@@ -74,7 +78,6 @@
 		return res;
 	}
 
-	oname->len = cryptlen;
 	return 0;
 }
 
@@ -187,50 +190,52 @@
 	return cp - dst;
 }
 
-u32 fscrypt_fname_encrypted_size(const struct inode *inode, u32 ilen)
+bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
+				  u32 max_len, u32 *encrypted_len_ret)
 {
-	int padding = 32;
-	struct fscrypt_info *ci = inode->i_crypt_info;
+	int padding = 4 << (inode->i_crypt_info->ci_flags &
+			    FS_POLICY_FLAGS_PAD_MASK);
+	u32 encrypted_len;
 
-	if (ci)
-		padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
-	ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
-	return round_up(ilen, padding);
+	if (orig_len > max_len)
+		return false;
+	encrypted_len = max(orig_len, (u32)FS_CRYPTO_BLOCK_SIZE);
+	encrypted_len = round_up(encrypted_len, padding);
+	*encrypted_len_ret = min(encrypted_len, max_len);
+	return true;
 }
-EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
 
 /**
- * fscrypt_fname_crypto_alloc_obuff() -
+ * fscrypt_fname_alloc_buffer - allocate a buffer for presented filenames
  *
- * Allocates an output buffer that is sufficient for the crypto operation
- * specified by the context and the direction.
+ * Allocate a buffer that is large enough to hold any decrypted or encoded
+ * filename (null-terminated), for the given maximum encrypted filename length.
+ *
+ * Return: 0 on success, -errno on failure
  */
 int fscrypt_fname_alloc_buffer(const struct inode *inode,
-				u32 ilen, struct fscrypt_str *crypto_str)
+			       u32 max_encrypted_len,
+			       struct fscrypt_str *crypto_str)
 {
-	u32 olen = fscrypt_fname_encrypted_size(inode, ilen);
 	const u32 max_encoded_len =
 		max_t(u32, BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE),
 		      1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name)));
+	u32 max_presented_len;
 
-	crypto_str->len = olen;
-	olen = max(olen, max_encoded_len);
+	max_presented_len = max(max_encoded_len, max_encrypted_len);
 
-	/*
-	 * Allocated buffer can hold one more character to null-terminate the
-	 * string
-	 */
-	crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
-	if (!(crypto_str->name))
+	crypto_str->name = kmalloc(max_presented_len + 1, GFP_NOFS);
+	if (!crypto_str->name)
 		return -ENOMEM;
+	crypto_str->len = max_presented_len;
 	return 0;
 }
 EXPORT_SYMBOL(fscrypt_fname_alloc_buffer);
 
 /**
- * fscrypt_fname_crypto_free_buffer() -
+ * fscrypt_fname_free_buffer - free the buffer for presented filenames
  *
- * Frees the buffer allocated for crypto operation.
+ * Free the buffer allocated by fscrypt_fname_alloc_buffer().
  */
 void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
 {
@@ -297,35 +302,6 @@
 EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
 
 /**
- * fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
- * space
- *
- * The caller must have allocated sufficient memory for the @oname string.
- *
- * Return: 0 on success, -errno on failure
- */
-int fscrypt_fname_usr_to_disk(struct inode *inode,
-			const struct qstr *iname,
-			struct fscrypt_str *oname)
-{
-	if (fscrypt_is_dot_dotdot(iname)) {
-		oname->name[0] = '.';
-		oname->name[iname->len - 1] = '.';
-		oname->len = iname->len;
-		return 0;
-	}
-	if (inode->i_crypt_info)
-		return fname_encrypt(inode, iname, oname);
-	/*
-	 * Without a proper key, a user is not allowed to modify the filenames
-	 * in a directory. Consequently, a user space name cannot be mapped to
-	 * a disk-space name
-	 */
-	return -ENOKEY;
-}
-EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
-
-/**
  * fscrypt_setup_filename() - prepare to search a possibly encrypted directory
  * @dir: the directory that will be searched
  * @iname: the user-provided filename being searched for
@@ -368,11 +344,17 @@
 		return ret;
 
 	if (dir->i_crypt_info) {
-		ret = fscrypt_fname_alloc_buffer(dir, iname->len,
-							&fname->crypto_buf);
-		if (ret)
-			return ret;
-		ret = fname_encrypt(dir, iname, &fname->crypto_buf);
+		if (!fscrypt_fname_encrypted_size(dir, iname->len,
+						  dir->i_sb->s_cop->max_namelen(dir),
+						  &fname->crypto_buf.len))
+			return -ENAMETOOLONG;
+		fname->crypto_buf.name = kmalloc(fname->crypto_buf.len,
+						 GFP_NOFS);
+		if (!fname->crypto_buf.name)
+			return -ENOMEM;
+
+		ret = fname_encrypt(dir, iname, fname->crypto_buf.name,
+				    fname->crypto_buf.len);
 		if (ret)
 			goto errout;
 		fname->disk_name.name = fname->crypto_buf.name;
@@ -424,7 +406,7 @@
 	return 0;
 
 errout:
-	fscrypt_fname_free_buffer(&fname->crypto_buf);
+	kfree(fname->crypto_buf.name);
 	return ret;
 }
 EXPORT_SYMBOL(fscrypt_setup_filename);
diff --git a/fs/crypto/fscrypt_ice.c b/fs/crypto/fscrypt_ice.c
new file mode 100644
index 0000000..62dae83
--- /dev/null
+++ b/fs/crypto/fscrypt_ice.c
@@ -0,0 +1,146 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "fscrypt_ice.h"
+
+int fscrypt_using_hardware_encryption(const struct inode *inode)
+{
+	struct fscrypt_info *ci = inode->i_crypt_info;
+
+	return S_ISREG(inode->i_mode) && ci &&
+		ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+EXPORT_SYMBOL(fscrypt_using_hardware_encryption);
+
+/*
+ * Retrieves encryption key from the inode
+ */
+char *fscrypt_get_ice_encryption_key(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	if (!inode)
+		return NULL;
+
+	ci = inode->i_crypt_info;
+	if (!ci)
+		return NULL;
+
+	return &(ci->ci_raw_key[0]);
+}
+
+/*
+ * Retrieves encryption salt from the inode
+ */
+char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	if (!inode)
+		return NULL;
+
+	ci = inode->i_crypt_info;
+	if (!ci)
+		return NULL;
+
+	return &(ci->ci_raw_key[fscrypt_get_ice_encryption_key_size(inode)]);
+}
+
+/*
+ * returns true if the cipher mode in inode is AES XTS
+ */
+int fscrypt_is_aes_xts_cipher(const struct inode *inode)
+{
+	struct fscrypt_info *ci = inode->i_crypt_info;
+
+	if (!ci)
+		return 0;
+
+	return (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE);
+}
+
+/*
+ * returns true if encryption info in both inodes is equal
+ */
+bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
+					const struct inode *inode2)
+{
+	char *key1 = NULL;
+	char *key2 = NULL;
+	char *salt1 = NULL;
+	char *salt2 = NULL;
+
+	if (!inode1 || !inode2)
+		return false;
+
+	if (inode1 == inode2)
+		return true;
+
+	/* both do not belong to ice, so we don't care, they are equal
+	 *for us
+	 */
+	if (!fscrypt_should_be_processed_by_ice(inode1) &&
+			!fscrypt_should_be_processed_by_ice(inode2))
+		return true;
+
+	/* one belongs to ice, the other does not -> not equal */
+	if (fscrypt_should_be_processed_by_ice(inode1) ^
+			fscrypt_should_be_processed_by_ice(inode2))
+		return false;
+
+	key1 = fscrypt_get_ice_encryption_key(inode1);
+	key2 = fscrypt_get_ice_encryption_key(inode2);
+	salt1 = fscrypt_get_ice_encryption_salt(inode1);
+	salt2 = fscrypt_get_ice_encryption_salt(inode2);
+
+	/* key and salt should not be null by this point */
+	if (!key1 || !key2 || !salt1 || !salt2 ||
+		(fscrypt_get_ice_encryption_key_size(inode1) !=
+		 fscrypt_get_ice_encryption_key_size(inode2)) ||
+		(fscrypt_get_ice_encryption_salt_size(inode1) !=
+		 fscrypt_get_ice_encryption_salt_size(inode2)))
+		return false;
+
+	if ((memcmp(key1, key2,
+			fscrypt_get_ice_encryption_key_size(inode1)) == 0) &&
+		(memcmp(salt1, salt2,
+			fscrypt_get_ice_encryption_salt_size(inode1)) == 0))
+		return true;
+
+	return false;
+}
+
+void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun)
+{
+	if (fscrypt_should_be_processed_by_ice(inode))
+		bio->bi_iter.bi_dun = dun;
+}
+EXPORT_SYMBOL(fscrypt_set_ice_dun);
+
+/*
+ * This function will be used for filesystem when deciding to merge bios.
+ * Basic assumption is, if inline_encryption is set, single bio has to
+ * guarantee consecutive LBAs as well as ino|pg->index.
+ */
+bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted)
+{
+	if (!bio)
+		return true;
+
+	/* if both of them are not encrypted, no further check is needed */
+	if (!bio_dun(bio) && !bio_encrypted)
+		return true;
+
+	/* ICE allows only consecutive iv_key stream. */
+	return bio_end_dun(bio) == dun;
+}
+EXPORT_SYMBOL(fscrypt_mergeable_bio);
diff --git a/fs/crypto/fscrypt_ice.h b/fs/crypto/fscrypt_ice.h
new file mode 100644
index 0000000..c540506
--- /dev/null
+++ b/fs/crypto/fscrypt_ice.h
@@ -0,0 +1,106 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _FSCRYPT_ICE_H
+#define _FSCRYPT_ICE_H
+
+#include <linux/blkdev.h>
+#include "fscrypt_private.h"
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
+{
+	if (!inode->i_sb->s_cop)
+		return 0;
+	if (!inode->i_sb->s_cop->is_encrypted((struct inode *)inode))
+	return 0;
+
+	return fscrypt_using_hardware_encryption(inode);
+}
+
+static inline int fscrypt_is_ice_capable(const struct super_block *sb)
+{
+	return blk_queue_inlinecrypt(bdev_get_queue(sb->s_bdev));
+}
+
+int fscrypt_is_aes_xts_cipher(const struct inode *inode);
+
+char *fscrypt_get_ice_encryption_key(const struct inode *inode);
+char *fscrypt_get_ice_encryption_salt(const struct inode *inode);
+
+bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
+					const struct inode *inode2);
+
+static inline size_t fscrypt_get_ice_encryption_key_size(
+					const struct inode *inode)
+{
+	return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+static inline size_t fscrypt_get_ice_encryption_salt_size(
+					const struct inode *inode)
+{
+	return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+#else
+static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
+{
+	return 0;
+}
+
+static inline int fscrypt_is_ice_capable(const struct super_block *sb)
+{
+	return 0;
+}
+
+static inline char *fscrypt_get_ice_encryption_key(const struct inode *inode)
+{
+	return NULL;
+}
+
+static inline char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
+{
+	return NULL;
+}
+
+static inline size_t fscrypt_get_ice_encryption_key_size(
+					const struct inode *inode)
+{
+	return 0;
+}
+
+static inline size_t fscrypt_get_ice_encryption_salt_size(
+					const struct inode *inode)
+{
+	return 0;
+}
+
+static inline int fscrypt_is_xts_cipher(const struct inode *inode)
+{
+	return 0;
+}
+
+static inline bool fscrypt_is_ice_encryption_info_equal(
+					const struct inode *inode1,
+					const struct inode *inode2)
+{
+	return 0;
+}
+
+static inline int fscrypt_is_aes_xts_cipher(const struct inode *inode)
+{
+	return 0;
+}
+
+#endif
+
+#endif	/* _FSCRYPT_ICE_H */
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 4688a64..03ff3aa 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -11,9 +11,12 @@
 #ifndef _FSCRYPT_PRIVATE_H
 #define _FSCRYPT_PRIVATE_H
 
+#ifndef __FS_HAS_ENCRYPTION
 #define __FS_HAS_ENCRYPTION 1
+#endif
 #include <linux/fscrypt.h>
 #include <crypto/hash.h>
+#include <linux/pfk.h>
 
 /* Encryption parameters */
 #define FS_IV_SIZE			16
@@ -49,17 +52,34 @@
 
 #define FS_ENCRYPTION_CONTEXT_FORMAT_V1		1
 
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct fscrypt_symlink_data {
+	__le16 len;
+	char encrypted_path[1];
+} __packed;
+
+enum ci_mode_info {
+	CI_NONE_MODE = 0,
+	CI_DATA_MODE,
+	CI_FNAME_MODE,
+};
+
 /*
  * A pointer to this structure is stored in the file system's in-core
  * representation of an inode.
  */
 struct fscrypt_info {
+	u8 ci_mode;
 	u8 ci_data_mode;
 	u8 ci_filename_mode;
 	u8 ci_flags;
 	struct crypto_skcipher *ci_ctfm;
 	struct crypto_cipher *ci_essiv_tfm;
 	u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+	u8 ci_raw_key[FS_MAX_KEY_SIZE];
 };
 
 typedef enum {
@@ -70,9 +90,36 @@
 #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL		0x00000001
 #define FS_CTX_HAS_BOUNCE_BUFFER_FL		0x00000002
 
+static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
+					   u32 filenames_mode)
+{
+	if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
+	    filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
+		return true;
+
+	if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
+	    filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
+		return true;
+
+	if (contents_mode == FS_ENCRYPTION_MODE_SPECK128_256_XTS &&
+	    filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS)
+		return true;
+
+	if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE)
+		return true;
+
+	return false;
+}
+
+static inline bool is_private_data_mode(struct fscrypt_info *ci)
+{
+	return ci->ci_mode == CI_DATA_MODE &&
+		ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+
 /* crypto.c */
+extern struct kmem_cache *fscrypt_info_cachep;
 extern int fscrypt_initialize(unsigned int cop_flags);
-extern struct workqueue_struct *fscrypt_read_workqueue;
 extern int fscrypt_do_page_crypto(const struct inode *inode,
 				  fscrypt_direction_t rw, u64 lblk_num,
 				  struct page *src_page,
@@ -82,6 +129,13 @@
 extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
 					      gfp_t gfp_flags);
 
+/* fname.c */
+extern int fname_encrypt(struct inode *inode, const struct qstr *iname,
+			 u8 *out, unsigned int olen);
+extern bool fscrypt_fname_encrypted_size(const struct inode *inode,
+					 u32 orig_len, u32 max_len,
+					 u32 *encrypted_len_ret);
+
 /* keyinfo.c */
 extern void __exit fscrypt_essiv_cleanup(void);
 
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index 9f5fb2e..bec0649 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -110,3 +110,161 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup);
+
+int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
+			      unsigned int max_len,
+			      struct fscrypt_str *disk_link)
+{
+	int err;
+
+	/*
+	 * To calculate the size of the encrypted symlink target we need to know
+	 * the amount of NUL padding, which is determined by the flags set in
+	 * the encryption policy which will be inherited from the directory.
+	 * The easiest way to get access to this is to just load the directory's
+	 * fscrypt_info, since we'll need it to create the dir_entry anyway.
+	 *
+	 * Note: in test_dummy_encryption mode, @dir may be unencrypted.
+	 */
+	err = fscrypt_get_encryption_info(dir);
+	if (err)
+		return err;
+	if (!fscrypt_has_encryption_key(dir))
+		return -ENOKEY;
+
+	/*
+	 * Calculate the size of the encrypted symlink and verify it won't
+	 * exceed max_len.  Note that for historical reasons, encrypted symlink
+	 * targets are prefixed with the ciphertext length, despite this
+	 * actually being redundant with i_size.  This decreases by 2 bytes the
+	 * longest symlink target we can accept.
+	 *
+	 * We could recover 1 byte by not counting a null terminator, but
+	 * counting it (even though it is meaningless for ciphertext) is simpler
+	 * for now since filesystems will assume it is there and subtract it.
+	 */
+	if (!fscrypt_fname_encrypted_size(dir, len,
+					  max_len - sizeof(struct fscrypt_symlink_data),
+					  &disk_link->len))
+		return -ENAMETOOLONG;
+	disk_link->len += sizeof(struct fscrypt_symlink_data);
+
+	disk_link->name = NULL;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__fscrypt_prepare_symlink);
+
+int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+			      unsigned int len, struct fscrypt_str *disk_link)
+{
+	int err;
+	struct qstr iname = QSTR_INIT(target, len);
+	struct fscrypt_symlink_data *sd;
+	unsigned int ciphertext_len;
+
+	err = fscrypt_require_key(inode);
+	if (err)
+		return err;
+
+	if (disk_link->name) {
+		/* filesystem-provided buffer */
+		sd = (struct fscrypt_symlink_data *)disk_link->name;
+	} else {
+		sd = kmalloc(disk_link->len, GFP_NOFS);
+		if (!sd)
+			return -ENOMEM;
+	}
+	ciphertext_len = disk_link->len - sizeof(*sd);
+	sd->len = cpu_to_le16(ciphertext_len);
+
+	err = fname_encrypt(inode, &iname, sd->encrypted_path, ciphertext_len);
+	if (err) {
+		if (!disk_link->name)
+			kfree(sd);
+		return err;
+	}
+	/*
+	 * Null-terminating the ciphertext doesn't make sense, but we still
+	 * count the null terminator in the length, so we might as well
+	 * initialize it just in case the filesystem writes it out.
+	 */
+	sd->encrypted_path[ciphertext_len] = '\0';
+
+	if (!disk_link->name)
+		disk_link->name = (unsigned char *)sd;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__fscrypt_encrypt_symlink);
+
+/**
+ * fscrypt_get_symlink - get the target of an encrypted symlink
+ * @inode: the symlink inode
+ * @caddr: the on-disk contents of the symlink
+ * @max_size: size of @caddr buffer
+ * @done: if successful, will be set up to free the returned target
+ *
+ * If the symlink's encryption key is available, we decrypt its target.
+ * Otherwise, we encode its target for presentation.
+ *
+ * This may sleep, so the filesystem must have dropped out of RCU mode already.
+ *
+ * Return: the presentable symlink target or an ERR_PTR()
+ */
+const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+				unsigned int max_size,
+				struct delayed_call *done)
+{
+	const struct fscrypt_symlink_data *sd;
+	struct fscrypt_str cstr, pstr;
+	int err;
+
+	/* This is for encrypted symlinks only */
+	if (WARN_ON(!IS_ENCRYPTED(inode)))
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Try to set up the symlink's encryption key, but we can continue
+	 * regardless of whether the key is available or not.
+	 */
+	err = fscrypt_get_encryption_info(inode);
+	if (err)
+		return ERR_PTR(err);
+
+	/*
+	 * For historical reasons, encrypted symlink targets are prefixed with
+	 * the ciphertext length, even though this is redundant with i_size.
+	 */
+
+	if (max_size < sizeof(*sd))
+		return ERR_PTR(-EUCLEAN);
+	sd = caddr;
+	cstr.name = (unsigned char *)sd->encrypted_path;
+	cstr.len = le16_to_cpu(sd->len);
+
+	if (cstr.len == 0)
+		return ERR_PTR(-EUCLEAN);
+
+	if (cstr.len + sizeof(*sd) - 1 > max_size)
+		return ERR_PTR(-EUCLEAN);
+
+	err = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
+	if (err)
+		return ERR_PTR(err);
+
+	err = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
+	if (err)
+		goto err_kfree;
+
+	err = -EUCLEAN;
+	if (pstr.name[0] == '\0')
+		goto err_kfree;
+
+	pstr.name[pstr.len] = '\0';
+	set_delayed_call(done, kfree_link, pstr.name);
+	return pstr.name;
+
+err_kfree:
+	kfree(pstr.name);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(fscrypt_get_symlink);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index c891d2e..737b6fc 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -13,7 +13,9 @@
 #include <linux/ratelimit.h>
 #include <crypto/aes.h>
 #include <crypto/sha.h>
+#include <crypto/skcipher.h>
 #include "fscrypt_private.h"
+#include "fscrypt_ice.h"
 
 static struct crypto_shash *essiv_hash_tfm;
 
@@ -66,7 +68,7 @@
 }
 
 static int validate_user_key(struct fscrypt_info *crypt_info,
-			struct fscrypt_context *ctx, u8 *raw_key,
+			struct fscrypt_context *ctx,
 			const char *prefix, int min_keysize)
 {
 	char *description;
@@ -114,7 +116,24 @@
 		res = -ENOKEY;
 		goto out;
 	}
-	res = derive_key_aes(ctx->nonce, master_key, raw_key);
+	res = derive_key_aes(ctx->nonce, master_key, crypt_info->ci_raw_key);
+	/* If we don't need to derive, we still want to do everything
+	 * up until now to validate the key. It's cleaner to fail now
+	 * than to fail in block I/O.
+	if (!is_private_data_mode(crypt_info)) {
+		res = derive_key_aes(ctx->nonce, master_key,
+				crypt_info->ci_raw_key);
+	} else {
+		 * Inline encryption: no key derivation required because IVs are
+		 * assigned based on iv_sector.
+
+		BUILD_BUG_ON(sizeof(crypt_info->ci_raw_key) !=
+				sizeof(master_key->raw));
+		memcpy(crypt_info->ci_raw_key,
+			master_key->raw, sizeof(crypt_info->ci_raw_key));
+		res = 0;
+	}
+	 */
 out:
 	up_read(&keyring_key->sem);
 	key_put(keyring_key);
@@ -133,10 +152,14 @@
 					     FS_AES_128_CBC_KEY_SIZE },
 	[FS_ENCRYPTION_MODE_AES_128_CTS] = { "cts(cbc(aes))",
 					     FS_AES_128_CTS_KEY_SIZE },
+	[FS_ENCRYPTION_MODE_SPECK128_256_XTS] = { "xts(speck128)",	64 },
+	[FS_ENCRYPTION_MODE_SPECK128_256_CTS] = { "cts(cbc(speck128))",	32 },
+	[FS_ENCRYPTION_MODE_PRIVATE]	 = { "bugon",
+					     FS_AES_256_XTS_KEY_SIZE },
 };
 
 static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
-				 const char **cipher_str_ret, int *keysize_ret)
+		const char **cipher_str_ret, int *keysize_ret, int *fname)
 {
 	u32 mode;
 
@@ -148,9 +171,12 @@
 	}
 
 	if (S_ISREG(inode->i_mode)) {
+		ci->ci_mode = CI_DATA_MODE;
 		mode = ci->ci_data_mode;
 	} else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+		ci->ci_mode = CI_FNAME_MODE;
 		mode = ci->ci_filename_mode;
+		*fname = 1;
 	} else {
 		WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
 			  inode->i_ino, (inode->i_mode & S_IFMT));
@@ -169,6 +195,7 @@
 
 	crypto_free_skcipher(ci->ci_ctfm);
 	crypto_free_cipher(ci->ci_essiv_tfm);
+	memset(ci, 0, sizeof(*ci)); /* sanitizes ->ci_raw_key */
 	kmem_cache_free(fscrypt_info_cachep, ci);
 }
 
@@ -238,6 +265,12 @@
 	crypto_free_shash(essiv_hash_tfm);
 }
 
+static int fscrypt_data_encryption_mode(struct inode *inode)
+{
+	return fscrypt_should_be_processed_by_ice(inode) ?
+	FS_ENCRYPTION_MODE_PRIVATE : FS_ENCRYPTION_MODE_AES_256_XTS;
+}
+
 int fscrypt_get_encryption_info(struct inode *inode)
 {
 	struct fscrypt_info *crypt_info;
@@ -245,8 +278,8 @@
 	struct crypto_skcipher *ctfm;
 	const char *cipher_str;
 	int keysize;
-	u8 *raw_key = NULL;
 	int res;
+	int fname = 0;
 
 	if (inode->i_crypt_info)
 		return 0;
@@ -263,7 +296,8 @@
 		/* Fake up a context for an unencrypted directory */
 		memset(&ctx, 0, sizeof(ctx));
 		ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
-		ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+		ctx.contents_encryption_mode =
+			fscrypt_data_encryption_mode(inode);
 		ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
 		memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
 	} else if (res != sizeof(ctx)) {
@@ -288,7 +322,8 @@
 	memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
 				sizeof(crypt_info->ci_master_key));
 
-	res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+	res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize,
+				&fname);
 	if (res)
 		goto out;
 
@@ -297,14 +332,11 @@
 	 * crypto API as part of key derivation.
 	 */
 	res = -ENOMEM;
-	raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
-	if (!raw_key)
-		goto out;
 
-	res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX,
+	res = validate_user_key(crypt_info, &ctx, FS_KEY_DESC_PREFIX,
 				keysize);
 	if (res && inode->i_sb->s_cop->key_prefix) {
-		int res2 = validate_user_key(crypt_info, &ctx, raw_key,
+		int res2 = validate_user_key(crypt_info, &ctx,
 					     inode->i_sb->s_cop->key_prefix,
 					     keysize);
 		if (res2) {
@@ -312,9 +344,23 @@
 				res = -ENOKEY;
 			goto out;
 		}
+		res = 0;
 	} else if (res) {
 		goto out;
 	}
+
+	if (is_private_data_mode(crypt_info)) {
+		if (!fscrypt_is_ice_capable(inode->i_sb)) {
+			pr_warn("%s: ICE support not available\n",
+					__func__);
+			res = -EINVAL;
+			goto out;
+		}
+		/* Let's encrypt/decrypt by ICE */
+		goto do_ice;
+	}
+
+
 	ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
 	if (!ctfm || IS_ERR(ctfm)) {
 		res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
@@ -329,43 +375,36 @@
 	 * if the provided key is longer than keysize, we use the first
 	 * keysize bytes of the derived key only
 	 */
-	res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
+	res = crypto_skcipher_setkey(ctfm, crypt_info->ci_raw_key, keysize);
 	if (res)
 		goto out;
 
 	if (S_ISREG(inode->i_mode) &&
 	    crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
-		res = init_essiv_generator(crypt_info, raw_key, keysize);
+		res = init_essiv_generator(crypt_info, crypt_info->ci_raw_key,
+						keysize);
 		if (res) {
 			pr_debug("%s: error %d (inode %lu) allocating essiv tfm\n",
 				 __func__, res, inode->i_ino);
 			goto out;
 		}
 	}
+	memzero_explicit(crypt_info->ci_raw_key,
+		sizeof(crypt_info->ci_raw_key));
+do_ice:
 	if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
 		crypt_info = NULL;
 out:
 	if (res == -ENOKEY)
 		res = 0;
 	put_crypt_info(crypt_info);
-	kzfree(raw_key);
 	return res;
 }
 EXPORT_SYMBOL(fscrypt_get_encryption_info);
 
-void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
+void fscrypt_put_encryption_info(struct inode *inode)
 {
-	struct fscrypt_info *prev;
-
-	if (ci == NULL)
-		ci = ACCESS_ONCE(inode->i_crypt_info);
-	if (ci == NULL)
-		return;
-
-	prev = cmpxchg(&inode->i_crypt_info, ci, NULL);
-	if (prev != ci)
-		return;
-
-	put_crypt_info(ci);
+	put_crypt_info(inode->i_crypt_info);
+	inode->i_crypt_info = NULL;
 }
 EXPORT_SYMBOL(fscrypt_put_encryption_info);
diff --git a/fs/dcache.c b/fs/dcache.c
index 210a135..5ff9b41 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -461,9 +461,11 @@
  * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
  * reason (NFS timeouts or autofs deletes).
  *
- * __d_drop requires dentry->d_lock.
+ * __d_drop requires dentry->d_lock
+ * ___d_drop doesn't mark dentry as "unhashed"
+ *   (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
  */
-void __d_drop(struct dentry *dentry)
+static void ___d_drop(struct dentry *dentry)
 {
 	if (!d_unhashed(dentry)) {
 		struct hlist_bl_head *b;
@@ -479,12 +481,17 @@
 
 		hlist_bl_lock(b);
 		__hlist_bl_del(&dentry->d_hash);
-		dentry->d_hash.pprev = NULL;
 		hlist_bl_unlock(b);
 		/* After this call, in-progress rcu-walk path lookup will fail. */
 		write_seqcount_invalidate(&dentry->d_seq);
 	}
 }
+
+void __d_drop(struct dentry *dentry)
+{
+	___d_drop(dentry);
+	dentry->d_hash.pprev = NULL;
+}
 EXPORT_SYMBOL(__d_drop);
 
 void d_drop(struct dentry *dentry)
@@ -1852,6 +1859,28 @@
 }
 EXPORT_SYMBOL(d_instantiate);
 
+/*
+ * This should be equivalent to d_instantiate() + unlock_new_inode(),
+ * with lockdep-related part of unlock_new_inode() done before
+ * anything else.  Use that instead of open-coding d_instantiate()/
+ * unlock_new_inode() combinations.
+ */
+void d_instantiate_new(struct dentry *entry, struct inode *inode)
+{
+	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
+	BUG_ON(!inode);
+	lockdep_annotate_inode_mutex_key(inode);
+	security_d_instantiate(entry, inode);
+	spin_lock(&inode->i_lock);
+	__d_instantiate(entry, inode);
+	WARN_ON(!(inode->i_state & I_NEW));
+	inode->i_state &= ~I_NEW;
+	smp_mb();
+	wake_up_bit(&inode->i_state, __I_NEW);
+	spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL(d_instantiate_new);
+
 /**
  * d_instantiate_no_diralias - instantiate a non-aliased dentry
  * @entry: dentry to complete
@@ -2378,7 +2407,7 @@
 static void __d_rehash(struct dentry *entry)
 {
 	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
-	BUG_ON(!d_unhashed(entry));
+
 	hlist_bl_lock(b);
 	hlist_bl_add_head_rcu(&entry->d_hash, b);
 	hlist_bl_unlock(b);
@@ -2445,7 +2474,7 @@
 
 retry:
 	rcu_read_lock();
-	seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
+	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
 	r_seq = read_seqbegin(&rename_lock);
 	dentry = __d_lookup_rcu(parent, name, &d_seq);
 	if (unlikely(dentry)) {
@@ -2466,8 +2495,14 @@
 		rcu_read_unlock();
 		goto retry;
 	}
+
+	if (unlikely(seq & 1)) {
+		rcu_read_unlock();
+		goto retry;
+	}
+
 	hlist_bl_lock(b);
-	if (unlikely(parent->d_inode->i_dir_seq != seq)) {
+	if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
 		hlist_bl_unlock(b);
 		rcu_read_unlock();
 		goto retry;
@@ -2815,9 +2850,9 @@
 	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
 
 	/* unhash both */
-	/* __d_drop does write_seqcount_barrier, but they're OK to nest. */
-	__d_drop(dentry);
-	__d_drop(target);
+	/* ___d_drop does write_seqcount_barrier, but they're OK to nest. */
+	___d_drop(dentry);
+	___d_drop(target);
 
 	/* Switch the names.. */
 	if (exchange)
@@ -2829,6 +2864,8 @@
 	__d_rehash(dentry);
 	if (exchange)
 		__d_rehash(target);
+	else
+		target->d_hash.pprev = NULL;
 
 	/* ... and switch them in the tree */
 	if (IS_ROOT(dentry)) {
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 354e2ab..0536072 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -22,7 +22,6 @@
 #include <linux/slab.h>
 #include <linux/atomic.h>
 #include <linux/device.h>
-#include <linux/srcu.h>
 #include <asm/poll.h>
 
 #include "internal.h"
@@ -48,66 +47,108 @@
 	.llseek =	noop_llseek,
 };
 
+#define F_DENTRY(filp) ((filp)->f_path.dentry)
+
+const struct file_operations *debugfs_real_fops(const struct file *filp)
+{
+	struct debugfs_fsdata *fsd = F_DENTRY(filp)->d_fsdata;
+
+	if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) {
+		/*
+		 * Urgh, we've been called w/o a protecting
+		 * debugfs_file_get().
+		 */
+		WARN_ON(1);
+		return NULL;
+	}
+
+	return fsd->real_fops;
+}
+EXPORT_SYMBOL_GPL(debugfs_real_fops);
+
 /**
- * debugfs_use_file_start - mark the beginning of file data access
+ * debugfs_file_get - mark the beginning of file data access
  * @dentry: the dentry object whose data is being accessed.
- * @srcu_idx: a pointer to some memory to store a SRCU index in.
  *
- * Up to a matching call to debugfs_use_file_finish(), any
- * successive call into the file removing functions debugfs_remove()
- * and debugfs_remove_recursive() will block. Since associated private
+ * Up to a matching call to debugfs_file_put(), any successive call
+ * into the file removing functions debugfs_remove() and
+ * debugfs_remove_recursive() will block. Since associated private
  * file data may only get freed after a successful return of any of
  * the removal functions, you may safely access it after a successful
- * call to debugfs_use_file_start() without worrying about
- * lifetime issues.
+ * call to debugfs_file_get() without worrying about lifetime issues.
  *
  * If -%EIO is returned, the file has already been removed and thus,
  * it is not safe to access any of its data. If, on the other hand,
  * it is allowed to access the file data, zero is returned.
- *
- * Regardless of the return code, any call to
- * debugfs_use_file_start() must be followed by a matching call
- * to debugfs_use_file_finish().
  */
-int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx)
-	__acquires(&debugfs_srcu)
+int debugfs_file_get(struct dentry *dentry)
 {
-	*srcu_idx = srcu_read_lock(&debugfs_srcu);
-	barrier();
+	struct debugfs_fsdata *fsd;
+	void *d_fsd;
+
+	d_fsd = READ_ONCE(dentry->d_fsdata);
+	if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
+		fsd = d_fsd;
+	} else {
+		fsd = kmalloc(sizeof(*fsd), GFP_KERNEL);
+		if (!fsd)
+			return -ENOMEM;
+
+		fsd->real_fops = (void *)((unsigned long)d_fsd &
+					~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
+		refcount_set(&fsd->active_users, 1);
+		init_completion(&fsd->active_users_drained);
+		if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) {
+			kfree(fsd);
+			fsd = READ_ONCE(dentry->d_fsdata);
+		}
+	}
+
+	/*
+	 * In case of a successful cmpxchg() above, this check is
+	 * strictly necessary and must follow it, see the comment in
+	 * __debugfs_remove_file().
+	 * OTOH, if the cmpxchg() hasn't been executed or wasn't
+	 * successful, this serves the purpose of not starving
+	 * removers.
+	 */
 	if (d_unlinked(dentry))
 		return -EIO;
+
+	if (!refcount_inc_not_zero(&fsd->active_users))
+		return -EIO;
+
 	return 0;
 }
-EXPORT_SYMBOL_GPL(debugfs_use_file_start);
+EXPORT_SYMBOL_GPL(debugfs_file_get);
 
 /**
- * debugfs_use_file_finish - mark the end of file data access
- * @srcu_idx: the SRCU index "created" by a former call to
- *            debugfs_use_file_start().
+ * debugfs_file_put - mark the end of file data access
+ * @dentry: the dentry object formerly passed to
+ *          debugfs_file_get().
  *
  * Allow any ongoing concurrent call into debugfs_remove() or
  * debugfs_remove_recursive() blocked by a former call to
- * debugfs_use_file_start() to proceed and return to its caller.
+ * debugfs_file_get() to proceed and return to its caller.
  */
-void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu)
+void debugfs_file_put(struct dentry *dentry)
 {
-	srcu_read_unlock(&debugfs_srcu, srcu_idx);
-}
-EXPORT_SYMBOL_GPL(debugfs_use_file_finish);
+	struct debugfs_fsdata *fsd = READ_ONCE(dentry->d_fsdata);
 
-#define F_DENTRY(filp) ((filp)->f_path.dentry)
+	if (refcount_dec_and_test(&fsd->active_users))
+		complete(&fsd->active_users_drained);
+}
+EXPORT_SYMBOL_GPL(debugfs_file_put);
 
 static int open_proxy_open(struct inode *inode, struct file *filp)
 {
-	const struct dentry *dentry = F_DENTRY(filp);
+	struct dentry *dentry = F_DENTRY(filp);
 	const struct file_operations *real_fops = NULL;
-	int srcu_idx, r;
+	int r;
 
-	r = debugfs_use_file_start(dentry, &srcu_idx);
-	if (r) {
-		r = -ENOENT;
-		goto out;
-	}
+	r = debugfs_file_get(dentry);
+	if (r)
+		return r == -EIO ? -ENOENT : r;
 
 	real_fops = debugfs_real_fops(filp);
 	real_fops = fops_get(real_fops);
@@ -124,7 +165,7 @@
 		r = real_fops->open(inode, filp);
 
 out:
-	debugfs_use_file_finish(srcu_idx);
+	debugfs_file_put(dentry);
 	return r;
 }
 
@@ -138,16 +179,16 @@
 #define FULL_PROXY_FUNC(name, ret_type, filp, proto, args)		\
 static ret_type full_proxy_ ## name(proto)				\
 {									\
-	const struct dentry *dentry = F_DENTRY(filp);			\
-	const struct file_operations *real_fops =			\
-		debugfs_real_fops(filp);				\
-	int srcu_idx;							\
+	struct dentry *dentry = F_DENTRY(filp);			\
+	const struct file_operations *real_fops;			\
 	ret_type r;							\
 									\
-	r = debugfs_use_file_start(dentry, &srcu_idx);			\
-	if (likely(!r))						\
-		r = real_fops->name(args);				\
-	debugfs_use_file_finish(srcu_idx);				\
+	r = debugfs_file_get(dentry);					\
+	if (unlikely(r))						\
+		return r;						\
+	real_fops = debugfs_real_fops(filp);				\
+	r = real_fops->name(args);					\
+	debugfs_file_put(dentry);					\
 	return r;							\
 }
 
@@ -172,18 +213,16 @@
 static unsigned int full_proxy_poll(struct file *filp,
 				struct poll_table_struct *wait)
 {
-	const struct dentry *dentry = F_DENTRY(filp);
-	const struct file_operations *real_fops = debugfs_real_fops(filp);
-	int srcu_idx;
+	struct dentry *dentry = F_DENTRY(filp);
 	unsigned int r = 0;
+	const struct file_operations *real_fops;
 
-	if (debugfs_use_file_start(dentry, &srcu_idx)) {
-		debugfs_use_file_finish(srcu_idx);
+	if (debugfs_file_get(dentry))
 		return POLLHUP;
-	}
 
+	real_fops = debugfs_real_fops(filp);
 	r = real_fops->poll(filp, wait);
-	debugfs_use_file_finish(srcu_idx);
+	debugfs_file_put(dentry);
 	return r;
 }
 
@@ -227,16 +266,14 @@
 
 static int full_proxy_open(struct inode *inode, struct file *filp)
 {
-	const struct dentry *dentry = F_DENTRY(filp);
+	struct dentry *dentry = F_DENTRY(filp);
 	const struct file_operations *real_fops = NULL;
 	struct file_operations *proxy_fops = NULL;
-	int srcu_idx, r;
+	int r;
 
-	r = debugfs_use_file_start(dentry, &srcu_idx);
-	if (r) {
-		r = -ENOENT;
-		goto out;
-	}
+	r = debugfs_file_get(dentry);
+	if (r)
+		return r == -EIO ? -ENOENT : r;
 
 	real_fops = debugfs_real_fops(filp);
 	real_fops = fops_get(real_fops);
@@ -274,7 +311,7 @@
 	kfree(proxy_fops);
 	fops_put(real_fops);
 out:
-	debugfs_use_file_finish(srcu_idx);
+	debugfs_file_put(dentry);
 	return r;
 }
 
@@ -285,13 +322,14 @@
 ssize_t debugfs_attr_read(struct file *file, char __user *buf,
 			size_t len, loff_t *ppos)
 {
+	struct dentry *dentry = F_DENTRY(file);
 	ssize_t ret;
-	int srcu_idx;
 
-	ret = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
-	if (likely(!ret))
-		ret = simple_attr_read(file, buf, len, ppos);
-	debugfs_use_file_finish(srcu_idx);
+	ret = debugfs_file_get(dentry);
+	if (unlikely(ret))
+		return ret;
+	ret = simple_attr_read(file, buf, len, ppos);
+	debugfs_file_put(dentry);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(debugfs_attr_read);
@@ -299,13 +337,14 @@
 ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
 			 size_t len, loff_t *ppos)
 {
+	struct dentry *dentry = F_DENTRY(file);
 	ssize_t ret;
-	int srcu_idx;
 
-	ret = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
-	if (likely(!ret))
-		ret = simple_attr_write(file, buf, len, ppos);
-	debugfs_use_file_finish(srcu_idx);
+	ret = debugfs_file_get(dentry);
+	if (unlikely(ret))
+		return ret;
+	ret = simple_attr_write(file, buf, len, ppos);
+	debugfs_file_put(dentry);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(debugfs_attr_write);
@@ -739,14 +778,14 @@
 {
 	char buf[3];
 	bool val;
-	int r, srcu_idx;
+	int r;
+	struct dentry *dentry = F_DENTRY(file);
 
-	r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
-	if (likely(!r))
-		val = *(bool *)file->private_data;
-	debugfs_use_file_finish(srcu_idx);
-	if (r)
+	r = debugfs_file_get(dentry);
+	if (unlikely(r))
 		return r;
+	val = *(bool *)file->private_data;
+	debugfs_file_put(dentry);
 
 	if (val)
 		buf[0] = 'Y';
@@ -764,8 +803,9 @@
 	char buf[32];
 	size_t buf_size;
 	bool bv;
-	int r, srcu_idx;
+	int r;
 	bool *val = file->private_data;
+	struct dentry *dentry = F_DENTRY(file);
 
 	buf_size = min(count, (sizeof(buf)-1));
 	if (copy_from_user(buf, user_buf, buf_size))
@@ -773,12 +813,11 @@
 
 	buf[buf_size] = '\0';
 	if (strtobool(buf, &bv) == 0) {
-		r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
-		if (likely(!r))
-			*val = bv;
-		debugfs_use_file_finish(srcu_idx);
-		if (r)
+		r = debugfs_file_get(dentry);
+		if (unlikely(r))
 			return r;
+		*val = bv;
+		debugfs_file_put(dentry);
 	}
 
 	return count;
@@ -840,14 +879,15 @@
 			      size_t count, loff_t *ppos)
 {
 	struct debugfs_blob_wrapper *blob = file->private_data;
+	struct dentry *dentry = F_DENTRY(file);
 	ssize_t r;
-	int srcu_idx;
 
-	r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
-	if (likely(!r))
-		r = simple_read_from_buffer(user_buf, count, ppos, blob->data,
-					blob->size);
-	debugfs_use_file_finish(srcu_idx);
+	r = debugfs_file_get(dentry);
+	if (unlikely(r))
+		return r;
+	r = simple_read_from_buffer(user_buf, count, ppos, blob->data,
+				blob->size);
+	debugfs_file_put(dentry);
 	return r;
 }
 
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 3d7de9f..f7f672d 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -27,14 +27,11 @@
 #include <linux/parser.h>
 #include <linux/magic.h>
 #include <linux/slab.h>
-#include <linux/srcu.h>
 
 #include "internal.h"
 
 #define DEBUGFS_DEFAULT_MODE	0700
 
-DEFINE_SRCU(debugfs_srcu);
-
 static struct vfsmount *debugfs_mount;
 static int debugfs_mount_count;
 static bool debugfs_registered;
@@ -185,6 +182,14 @@
 	.evict_inode	= debugfs_evict_inode,
 };
 
+static void debugfs_release_dentry(struct dentry *dentry)
+{
+	void *fsd = dentry->d_fsdata;
+
+	if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
+		kfree(dentry->d_fsdata);
+}
+
 static struct vfsmount *debugfs_automount(struct path *path)
 {
 	debugfs_automount_t f;
@@ -194,6 +199,7 @@
 
 static const struct dentry_operations debugfs_dops = {
 	.d_delete = always_delete_dentry,
+	.d_release = debugfs_release_dentry,
 	.d_automount = debugfs_automount,
 };
 
@@ -324,7 +330,8 @@
 	inode->i_private = data;
 
 	inode->i_fop = proxy_fops;
-	dentry->d_fsdata = (void *)real_fops;
+	dentry->d_fsdata = (void *)((unsigned long)real_fops |
+				DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
 
 	d_instantiate(dentry, inode);
 	fsnotify_create(d_inode(dentry->d_parent), dentry);
@@ -581,18 +588,43 @@
 }
 EXPORT_SYMBOL_GPL(debugfs_create_symlink);
 
+static void __debugfs_remove_file(struct dentry *dentry, struct dentry *parent)
+{
+	struct debugfs_fsdata *fsd;
+
+	simple_unlink(d_inode(parent), dentry);
+	d_delete(dentry);
+
+	/*
+	 * Paired with the closing smp_mb() implied by a successful
+	 * cmpxchg() in debugfs_file_get(): either
+	 * debugfs_file_get() must see a dead dentry or we must see a
+	 * debugfs_fsdata instance at ->d_fsdata here (or both).
+	 */
+	smp_mb();
+	fsd = READ_ONCE(dentry->d_fsdata);
+	if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
+		return;
+	if (!refcount_dec_and_test(&fsd->active_users))
+		wait_for_completion(&fsd->active_users_drained);
+}
+
 static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
 {
 	int ret = 0;
 
 	if (simple_positive(dentry)) {
 		dget(dentry);
-		if (d_is_dir(dentry))
-			ret = simple_rmdir(d_inode(parent), dentry);
-		else
-			simple_unlink(d_inode(parent), dentry);
-		if (!ret)
-			d_delete(dentry);
+		if (!d_is_reg(dentry)) {
+			if (d_is_dir(dentry))
+				ret = simple_rmdir(d_inode(parent), dentry);
+			else
+				simple_unlink(d_inode(parent), dentry);
+			if (!ret)
+				d_delete(dentry);
+		} else {
+			__debugfs_remove_file(dentry, parent);
+		}
 		dput(dentry);
 	}
 	return ret;
@@ -626,8 +658,6 @@
 	inode_unlock(d_inode(parent));
 	if (!ret)
 		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
-
-	synchronize_srcu(&debugfs_srcu);
 }
 EXPORT_SYMBOL_GPL(debugfs_remove);
 
@@ -701,8 +731,6 @@
 	if (!__debugfs_remove(child, parent))
 		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
 	inode_unlock(d_inode(parent));
-
-	synchronize_srcu(&debugfs_srcu);
 }
 EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
 
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index b3e8443..49ba8dd 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -12,6 +12,8 @@
 #ifndef _DEBUGFS_INTERNAL_H_
 #define _DEBUGFS_INTERNAL_H_
 
+#include <linux/refcount.h>
+
 struct file_operations;
 
 /* declared over in file.c */
@@ -19,4 +21,18 @@
 extern const struct file_operations debugfs_open_proxy_file_operations;
 extern const struct file_operations debugfs_full_proxy_file_operations;
 
+struct debugfs_fsdata {
+	const struct file_operations *real_fops;
+	refcount_t active_users;
+	struct completion active_users_drained;
+};
+
+/*
+ * A dentry's ->d_fsdata either points to the real fops or to a
+ * dynamically allocated debugfs_fsdata instance.
+ * In order to distinguish between these two cases, a real fops
+ * pointer gets its lowest bit set.
+ */
+#define DEBUGFS_FSDATA_IS_REAL_FOPS_BIT BIT(0)
+
 #endif /* _DEBUGFS_INTERNAL_H_ */
diff --git a/fs/direct-io.c b/fs/direct-io.c
index c6220a2..bf03a92 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -411,6 +411,7 @@
 	if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
 		bio_set_pages_dirty(bio);
 
+	bio->bi_dio_inode = dio->inode;
 	dio->bio_bdev = bio->bi_bdev;
 
 	if (sdio->submit_io) {
@@ -424,6 +425,18 @@
 	sdio->logical_offset_in_bio = 0;
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio)
+{
+	struct inode *inode = NULL;
+
+	if (bio == NULL)
+		return NULL;
+
+	inode = bio->bi_dio_inode;
+
+	return inode;
+}
+EXPORT_SYMBOL(dio_bio_get_inode);
 /*
  * Release any resources in case of a failure
  */
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index cf390dc..5c5ff9f 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -284,8 +284,7 @@
 		iget_failed(ecryptfs_inode);
 		goto out;
 	}
-	unlock_new_inode(ecryptfs_inode);
-	d_instantiate(ecryptfs_dentry, ecryptfs_inode);
+	d_instantiate_new(ecryptfs_dentry, ecryptfs_inode);
 out:
 	return rc;
 }
diff --git a/fs/exec.c b/fs/exec.c
index 3e2de29..d27f5e9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -306,7 +306,7 @@
 	vma->vm_start = vma->vm_end - PAGE_SIZE;
 	vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-	INIT_LIST_HEAD(&vma->anon_vma_chain);
+	INIT_VMA(vma);
 
 	err = insert_vm_struct(mm, vma);
 	if (err)
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 41b8b44..85449a6 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1258,21 +1258,11 @@
 
 static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
 {
-	/*
-	 * XXX: it seems like a bug here that we don't allow
-	 * IS_APPEND inode to have blocks-past-i_size trimmed off.
-	 * review and fix this.
-	 *
-	 * Also would be nice to be able to handle IO errors and such,
-	 * but that's probably too much to ask.
-	 */
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 	    S_ISLNK(inode->i_mode)))
 		return;
 	if (ext2_inode_is_fast_symlink(inode))
 		return;
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-		return;
 
 	dax_sem_down_write(EXT2_I(inode));
 	__ext2_truncate_blocks(inode, offset);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 814e405..c8efc5e 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -40,8 +40,7 @@
 {
 	int err = ext2_add_link(dentry, inode);
 	if (!err) {
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
+		d_instantiate_new(dentry, inode);
 		return 0;
 	}
 	inode_dec_link_count(inode);
@@ -268,8 +267,7 @@
 	if (err)
 		goto out_fail;
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 out:
 	return err;
 
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e38039f..e9232a0 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -109,10 +109,16 @@
 	  decrypted pages in the page cache.
 
 config EXT4_FS_ENCRYPTION
-	bool
-	default y
+	bool "Ext4 FS Encryption"
+	default n
 	depends on EXT4_ENCRYPTION
 
+config EXT4_FS_ICE_ENCRYPTION
+	bool "Ext4 Encryption with ICE support"
+	default n
+	depends on EXT4_FS_ENCRYPTION
+	depends on PFK
+
 config EXT4_DEBUG
 	bool "EXT4 debugging support"
 	depends on EXT4_FS
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 354103f..b9dfa0d 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for the linux ext4-filesystem routines.
 #
+ccflags-y += -Ifs/crypto
 
 obj-$(CONFIG_EXT4_FS) += ext4.o
 
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index e04ec86..6776f4a 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -242,8 +242,6 @@
 	 */
 	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
 			     sb->s_blocksize * 8, bh->b_data);
-	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
-	ext4_group_desc_csum_set(sb, block_group, gdp);
 	return 0;
 }
 
@@ -322,6 +320,7 @@
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	ext4_grpblk_t offset;
 	ext4_grpblk_t next_zero_bit;
+	ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
 	ext4_fsblk_t blk;
 	ext4_fsblk_t group_first_block;
 
@@ -339,20 +338,25 @@
 	/* check whether block bitmap block number is set */
 	blk = ext4_block_bitmap(sb, desc);
 	offset = blk - group_first_block;
-	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+	if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+	    !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
 		/* bad block bitmap */
 		return blk;
 
 	/* check whether the inode bitmap block number is set */
 	blk = ext4_inode_bitmap(sb, desc);
 	offset = blk - group_first_block;
-	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+	if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+	    !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
 		/* bad block bitmap */
 		return blk;
 
 	/* check whether the inode table block number is set */
 	blk = ext4_inode_table(sb, desc);
 	offset = blk - group_first_block;
+	if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+	    EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
+		return blk;
 	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
 			EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
 			EXT4_B2C(sbi, offset));
@@ -418,6 +422,7 @@
 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
 {
 	struct ext4_group_desc *desc;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct buffer_head *bh;
 	ext4_fsblk_t bitmap_blk;
 	int err;
@@ -426,6 +431,12 @@
 	if (!desc)
 		return ERR_PTR(-EFSCORRUPTED);
 	bitmap_blk = ext4_block_bitmap(sb, desc);
+	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+		ext4_error(sb, "Invalid block bitmap block %llu in "
+			   "block_group %u", bitmap_blk, block_group);
+		return ERR_PTR(-EFSCORRUPTED);
+	}
 	bh = sb_getblk(sb, bitmap_blk);
 	if (unlikely(!bh)) {
 		ext4_error(sb, "Cannot get buffer for block bitmap - "
@@ -447,6 +458,7 @@
 		err = ext4_init_block_bitmap(sb, bh, block_group, desc);
 		set_bitmap_uptodate(bh);
 		set_buffer_uptodate(bh);
+		set_buffer_verified(bh);
 		ext4_unlock_group(sb, block_group);
 		unlock_buffer(bh);
 		if (err) {
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a8573fa..fb9ae76 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5358,8 +5358,9 @@
 	stop = le32_to_cpu(extent->ee_block);
 
        /*
-	 * In case of left shift, Don't start shifting extents until we make
-	 * sure the hole is big enough to accommodate the shift.
+	* For left shifts, make sure the hole on the left is big enough to
+	* accommodate the shift.  For right shifts, make sure the last extent
+	* won't be shifted beyond EXT_MAX_BLOCKS.
 	*/
 	if (SHIFT == SHIFT_LEFT) {
 		path = ext4_find_extent(inode, start - 1, &path,
@@ -5379,9 +5380,14 @@
 
 		if ((start == ex_start && shift > ex_start) ||
 		    (shift > start - ex_end)) {
-			ext4_ext_drop_refs(path);
-			kfree(path);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto out;
+		}
+	} else {
+		if (shift > EXT_MAX_BLOCKS -
+		    (stop + ext4_ext_get_actual_len(extent))) {
+			ret = -EINVAL;
+			goto out;
 		}
 	}
 
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 510e664..08fca4a 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -429,7 +429,7 @@
 		int i, num;
 		unsigned long nr_pages;
 
-		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
 					  (pgoff_t)num);
 		if (nr_pages == 0)
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2cec605..1ee26da 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -63,44 +63,6 @@
 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
 }
 
-/* Initializes an uninitialized inode bitmap */
-static int ext4_init_inode_bitmap(struct super_block *sb,
-				       struct buffer_head *bh,
-				       ext4_group_t block_group,
-				       struct ext4_group_desc *gdp)
-{
-	struct ext4_group_info *grp;
-	struct ext4_sb_info *sbi = EXT4_SB(sb);
-	J_ASSERT_BH(bh, buffer_locked(bh));
-
-	/* If checksum is bad mark all blocks and inodes use to prevent
-	 * allocation, essentially implementing a per-group read-only flag. */
-	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
-		grp = ext4_get_group_info(sb, block_group);
-		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
-			percpu_counter_sub(&sbi->s_freeclusters_counter,
-					   grp->bb_free);
-		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
-		if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
-			int count;
-			count = ext4_free_inodes_count(sb, gdp);
-			percpu_counter_sub(&sbi->s_freeinodes_counter,
-					   count);
-		}
-		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
-		return -EFSBADCRC;
-	}
-
-	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
-	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
-			bh->b_data);
-	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
-				   EXT4_INODES_PER_GROUP(sb) / 8);
-	ext4_group_desc_csum_set(sb, block_group, gdp);
-
-	return 0;
-}
-
 void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
 {
 	if (uptodate) {
@@ -157,6 +119,7 @@
 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
 {
 	struct ext4_group_desc *desc;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct buffer_head *bh = NULL;
 	ext4_fsblk_t bitmap_blk;
 	int err;
@@ -166,6 +129,12 @@
 		return ERR_PTR(-EFSCORRUPTED);
 
 	bitmap_blk = ext4_inode_bitmap(sb, desc);
+	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+		ext4_error(sb, "Invalid inode bitmap blk %llu in "
+			   "block_group %u", bitmap_blk, block_group);
+		return ERR_PTR(-EFSCORRUPTED);
+	}
 	bh = sb_getblk(sb, bitmap_blk);
 	if (unlikely(!bh)) {
 		ext4_error(sb, "Cannot read inode bitmap - "
@@ -184,17 +153,14 @@
 
 	ext4_lock_group(sb, block_group);
 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
-		err = ext4_init_inode_bitmap(sb, bh, block_group, desc);
+		memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
+		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
+				     sb->s_blocksize * 8, bh->b_data);
 		set_bitmap_uptodate(bh);
 		set_buffer_uptodate(bh);
 		set_buffer_verified(bh);
 		ext4_unlock_group(sb, block_group);
 		unlock_buffer(bh);
-		if (err) {
-			ext4_error(sb, "Failed to init inode bitmap for group "
-				   "%u: %d", block_group, err);
-			goto out;
-		}
 		return bh;
 	}
 	ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 21c0670..ecee29a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -42,6 +42,7 @@
 #include "xattr.h"
 #include "acl.h"
 #include "truncate.h"
+//#include "ext4_ice.h"
 
 #include <trace/events/ext4.h>
 #include <trace/events/android_fs.h>
@@ -1152,7 +1153,8 @@
 			ll_rw_block(REQ_OP_READ, 0, 1, &bh);
 			*wait_bh++ = bh;
 			decrypt = ext4_encrypted_inode(inode) &&
-				S_ISREG(inode->i_mode);
+				S_ISREG(inode->i_mode) &&
+				!fscrypt_using_hardware_encryption(inode);
 		}
 	}
 	/*
@@ -3421,7 +3423,6 @@
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
-	struct ext4_inode_info *ei = EXT4_I(inode);
 	ssize_t ret;
 	loff_t offset = iocb->ki_pos;
 	size_t count = iov_iter_count(iter);
@@ -3445,7 +3446,7 @@
 			goto out;
 		}
 		orphan = 1;
-		ei->i_disksize = inode->i_size;
+		ext4_update_i_disksize(inode, inode->i_size);
 		ext4_journal_stop(handle);
 	}
 
@@ -3510,8 +3511,9 @@
 		get_block_func = ext4_dio_get_block_unwritten_async;
 		dio_flags = DIO_LOCKING;
 	}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-	BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
+#if defined(CONFIG_EXT4_FS_ENCRYPTION)
+	WARN_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+		&& !fscrypt_using_hardware_encryption(inode));
 #endif
 	if (IS_DAX(inode)) {
 		ret = dax_do_io(iocb, inode, iter, get_block_func,
@@ -3573,7 +3575,7 @@
 		if (ret > 0) {
 			loff_t end = offset + ret;
 			if (end > inode->i_size) {
-				ei->i_disksize = end;
+				ext4_update_i_disksize(inode, end);
 				i_size_write(inode, end);
 				/*
 				 * We're going to return a positive `ret'
@@ -3632,8 +3634,9 @@
 	ssize_t ret;
 	int rw = iov_iter_rw(iter);
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+#if defined(CONFIG_EXT4_FS_ENCRYPTION)
+	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+		&& !fscrypt_using_hardware_encryption(inode))
 		return 0;
 #endif
 
@@ -3829,7 +3832,8 @@
 		if (!buffer_uptodate(bh))
 			goto unlock;
 		if (S_ISREG(inode->i_mode) &&
-		    ext4_encrypted_inode(inode)) {
+		    ext4_encrypted_inode(inode) &&
+		    !fscrypt_using_hardware_encryption(inode)) {
 			/* We expect the key to be set. */
 			BUG_ON(!fscrypt_has_encryption_key(inode));
 			BUG_ON(blocksize != PAGE_SIZE);
@@ -4561,6 +4565,12 @@
 		goto bad_inode;
 	raw_inode = ext4_raw_inode(&iloc);
 
+	if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
+		EXT4_ERROR_INODE(inode, "root inode unallocated");
+		ret = -EFSCORRUPTED;
+		goto bad_inode;
+	}
+
 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 15ca15c..3585e26 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -774,8 +774,8 @@
 		return ext4_ext_precache(inode);
 
 	case EXT4_IOC_SET_ENCRYPTION_POLICY:
-		if (!ext4_has_feature_encrypt(sb))
-			return -EOPNOTSUPP;
+//		if (!ext4_has_feature_encrypt(sb))
+//			return -EOPNOTSUPP;
 		return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
 
 	case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index e5e99a7..4beca06 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3878,7 +3878,8 @@
 
 	err = ext4_mb_load_buddy(sb, group, &e4b);
 	if (err) {
-		ext4_error(sb, "Error loading buddy information for %u", group);
+		ext4_warning(sb, "Error %d loading buddy information for %u",
+			     err, group);
 		put_bh(bitmap_bh);
 		return 0;
 	}
@@ -4035,10 +4036,11 @@
 		BUG_ON(pa->pa_type != MB_INODE_PA);
 		group = ext4_get_group_number(sb, pa->pa_pstart);
 
-		err = ext4_mb_load_buddy(sb, group, &e4b);
+		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+					     GFP_NOFS|__GFP_NOFAIL);
 		if (err) {
-			ext4_error(sb, "Error loading buddy information for %u",
-					group);
+			ext4_error(sb, "Error %d loading buddy information for %u",
+				   err, group);
 			continue;
 		}
 
@@ -4294,11 +4296,14 @@
 	spin_unlock(&lg->lg_prealloc_lock);
 
 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
+		int err;
 
 		group = ext4_get_group_number(sb, pa->pa_pstart);
-		if (ext4_mb_load_buddy(sb, group, &e4b)) {
-			ext4_error(sb, "Error loading buddy information for %u",
-					group);
+		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+					     GFP_NOFS|__GFP_NOFAIL);
+		if (err) {
+			ext4_error(sb, "Error %d loading buddy information for %u",
+				   err, group);
 			continue;
 		}
 		ext4_lock_group(sb, group);
@@ -5122,8 +5127,8 @@
 
 	ret = ext4_mb_load_buddy(sb, group, &e4b);
 	if (ret) {
-		ext4_error(sb, "Error in loading buddy "
-				"information for %u", group);
+		ext4_warning(sb, "Error %d loading buddy information for %u",
+			     ret, group);
 		return ret;
 	}
 	bitmap = e4b.bd_bitmap;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 8338cd4..d536e0a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2387,8 +2387,7 @@
 	int err = ext4_add_entry(handle, dentry, inode);
 	if (!err) {
 		ext4_mark_inode_dirty(handle, inode);
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
+		d_instantiate_new(dentry, inode);
 		return 0;
 	}
 	drop_nlink(inode);
@@ -2627,8 +2626,7 @@
 	err = ext4_mark_inode_dirty(handle, dir);
 	if (err)
 		goto out_clear_inode;
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	if (IS_DIRSYNC(dir))
 		ext4_handle_sync(handle);
 
@@ -3027,36 +3025,16 @@
 	struct inode *inode;
 	int err, len = strlen(symname);
 	int credits;
-	bool encryption_required;
 	struct fscrypt_str disk_link;
-	struct fscrypt_symlink_data *sd = NULL;
 
-	disk_link.len = len + 1;
-	disk_link.name = (char *) symname;
-
-	encryption_required = (ext4_encrypted_inode(dir) ||
-			       DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb)));
-	if (encryption_required) {
-		err = fscrypt_get_encryption_info(dir);
-		if (err)
-			return err;
-		if (!fscrypt_has_encryption_key(dir))
-			return -ENOKEY;
-		disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
-				 sizeof(struct fscrypt_symlink_data));
-		sd = kzalloc(disk_link.len, GFP_KERNEL);
-		if (!sd)
-			return -ENOMEM;
-	}
-
-	if (disk_link.len > dir->i_sb->s_blocksize) {
-		err = -ENAMETOOLONG;
-		goto err_free_sd;
-	}
+	err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize,
+				      &disk_link);
+	if (err)
+		return err;
 
 	err = dquot_initialize(dir);
 	if (err)
-		goto err_free_sd;
+		return err;
 
 	if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
 		/*
@@ -3085,27 +3063,18 @@
 	if (IS_ERR(inode)) {
 		if (handle)
 			ext4_journal_stop(handle);
-		err = PTR_ERR(inode);
-		goto err_free_sd;
+		return PTR_ERR(inode);
 	}
 
-	if (encryption_required) {
-		struct qstr istr;
-		struct fscrypt_str ostr =
-			FSTR_INIT(sd->encrypted_path, disk_link.len);
-
-		istr.name = (const unsigned char *) symname;
-		istr.len = len;
-		err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
+	if (IS_ENCRYPTED(inode)) {
+		err = fscrypt_encrypt_symlink(inode, symname, len, &disk_link);
 		if (err)
 			goto err_drop_inode;
-		sd->len = cpu_to_le16(ostr.len);
-		disk_link.name = (char *) sd;
 		inode->i_op = &ext4_encrypted_symlink_inode_operations;
 	}
 
 	if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
-		if (!encryption_required)
+		if (!IS_ENCRYPTED(inode))
 			inode->i_op = &ext4_symlink_inode_operations;
 		inode_nohighmem(inode);
 		ext4_set_aops(inode);
@@ -3147,7 +3116,7 @@
 	} else {
 		/* clear the extent format for fast symlink */
 		ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
-		if (!encryption_required) {
+		if (!IS_ENCRYPTED(inode)) {
 			inode->i_op = &ext4_fast_symlink_inode_operations;
 			inode->i_link = (char *)&EXT4_I(inode)->i_data;
 		}
@@ -3162,16 +3131,17 @@
 
 	if (handle)
 		ext4_journal_stop(handle);
-	kfree(sd);
-	return err;
+	goto out_free_encrypted_link;
+
 err_drop_inode:
 	if (handle)
 		ext4_journal_stop(handle);
 	clear_nlink(inode);
 	unlock_new_inode(inode);
 	iput(inode);
-err_free_sd:
-	kfree(sd);
+out_free_encrypted_link:
+	if (disk_link.name != (unsigned char *)symname)
+		kfree(disk_link.name);
 	return err;
 }
 
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 0718a86..8d4ec1a 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -28,6 +28,7 @@
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
+//#include "ext4_ice.h"
 
 static struct kmem_cache *io_end_cachep;
 
@@ -469,6 +470,7 @@
 		gfp_t gfp_flags = GFP_NOFS;
 
 	retry_encrypt:
+	if (!fscrypt_using_hardware_encryption(inode))
 		data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
 						page->index, gfp_flags);
 		if (IS_ERR(data_page)) {
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 2531cc1..c39a12d 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -91,7 +91,7 @@
 		if (bio->bi_error) {
 			fscrypt_release_ctx(bio->bi_private);
 		} else {
-			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
+			fscrypt_enqueue_decrypt_bio(bio->bi_private, bio);
 			return;
 		}
 	}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 43ce5fc..23c5716 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1028,9 +1028,7 @@
 		jbd2_free_inode(EXT4_I(inode)->jinode);
 		EXT4_I(inode)->jinode = NULL;
 	}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-	fscrypt_put_encryption_info(inode, NULL);
-#endif
+	fscrypt_put_encryption_info(inode);
 }
 
 static struct inode *ext4_nfs_get_inode(struct super_block *sb,
@@ -1182,6 +1180,11 @@
 		EXT4_NAME_LEN;
 }
 
+static inline bool ext4_is_encrypted(struct inode *inode)
+{
+	return ext4_encrypted_inode(inode);
+}
+
 static const struct fscrypt_operations ext4_cryptops = {
 	.key_prefix		= "ext4:",
 	.get_context		= ext4_get_context,
@@ -1189,6 +1192,7 @@
 	.dummy_context		= ext4_dummy_context,
 	.empty_dir		= ext4_empty_dir,
 	.max_namelen		= ext4_max_namelen,
+	.is_encrypted       = ext4_is_encrypted,
 };
 #endif
 
@@ -2272,6 +2276,8 @@
 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
 				 "Block bitmap for group %u overlaps "
 				 "superblock", i);
+			if (!(sb->s_flags & MS_RDONLY))
+				return 0;
 		}
 		if (block_bitmap < first_block || block_bitmap > last_block) {
 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2284,6 +2290,8 @@
 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
 				 "Inode bitmap for group %u overlaps "
 				 "superblock", i);
+			if (!(sb->s_flags & MS_RDONLY))
+				return 0;
 		}
 		if (inode_bitmap < first_block || inode_bitmap > last_block) {
 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2296,6 +2304,8 @@
 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
 				 "Inode table for group %u overlaps "
 				 "superblock", i);
+			if (!(sb->s_flags & MS_RDONLY))
+				return 0;
 		}
 		if (inode_table < first_block ||
 		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 557b3b0..d4ce3af 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -27,59 +27,28 @@
 					   struct delayed_call *done)
 {
 	struct page *cpage = NULL;
-	char *caddr, *paddr = NULL;
-	struct fscrypt_str cstr, pstr;
-	struct fscrypt_symlink_data *sd;
-	int res;
-	u32 max_size = inode->i_sb->s_blocksize;
+	const void *caddr;
+	unsigned int max_size;
+	const char *paddr;
 
 	if (!dentry)
 		return ERR_PTR(-ECHILD);
 
-	res = fscrypt_get_encryption_info(inode);
-	if (res)
-		return ERR_PTR(res);
-
 	if (ext4_inode_is_fast_symlink(inode)) {
-		caddr = (char *) EXT4_I(inode)->i_data;
+		caddr = EXT4_I(inode)->i_data;
 		max_size = sizeof(EXT4_I(inode)->i_data);
 	} else {
 		cpage = read_mapping_page(inode->i_mapping, 0, NULL);
 		if (IS_ERR(cpage))
 			return ERR_CAST(cpage);
 		caddr = page_address(cpage);
+		max_size = inode->i_sb->s_blocksize;
 	}
 
-	/* Symlink is encrypted */
-	sd = (struct fscrypt_symlink_data *)caddr;
-	cstr.name = sd->encrypted_path;
-	cstr.len  = le16_to_cpu(sd->len);
-	if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
-		/* Symlink data on the disk is corrupted */
-		res = -EFSCORRUPTED;
-		goto errout;
-	}
-
-	res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
-	if (res)
-		goto errout;
-	paddr = pstr.name;
-
-	res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
-	if (res)
-		goto errout;
-
-	/* Null-terminate the name */
-	paddr[pstr.len] = '\0';
+	paddr = fscrypt_get_symlink(inode, caddr, max_size, done);
 	if (cpage)
 		put_page(cpage);
-	set_delayed_call(done, kfree_link, paddr);
 	return paddr;
-errout:
-	if (cpage)
-		put_page(cpage);
-	kfree(paddr);
-	return ERR_PTR(res);
 }
 
 const struct inode_operations ext4_encrypted_symlink_inode_operations = {
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 701781a..91b2d00 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -68,6 +68,7 @@
 		.old_blkaddr = index,
 		.new_blkaddr = index,
 		.encrypted_page = NULL,
+		.is_meta = is_meta,
 	};
 
 	if (unlikely(!is_meta))
@@ -162,6 +163,7 @@
 		.op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
 		.encrypted_page = NULL,
 		.in_list = false,
+		.is_meta = (type != META_POR),
 	};
 	struct blk_plug plug;
 
@@ -383,7 +385,7 @@
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 	if (!PageDirty(page)) {
-		f2fs_set_page_dirty_nobuffers(page);
+		__set_page_dirty_nobuffers(page);
 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
 		SetPagePrivate(page);
 		f2fs_trace_pid(page);
@@ -572,13 +574,8 @@
 	struct node_info ni;
 	int err = acquire_orphan_inode(sbi);
 
-	if (err) {
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-		f2fs_msg(sbi->sb, KERN_WARNING,
-				"%s: orphan failed (ino=%x), run fsck to fix.",
-				__func__, ino);
-		return err;
-	}
+	if (err)
+		goto err_out;
 
 	__add_ino_entry(sbi, ino, 0, ORPHAN_INO);
 
@@ -592,6 +589,11 @@
 		return PTR_ERR(inode);
 	}
 
+	err = dquot_initialize(inode);
+	if (err)
+		goto err_out;
+
+	dquot_initialize(inode);
 	clear_nlink(inode);
 
 	/* truncate all the data during iput */
@@ -601,14 +603,18 @@
 
 	/* ENOMEM was fully retried in f2fs_evict_inode. */
 	if (ni.blk_addr != NULL_ADDR) {
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-		f2fs_msg(sbi->sb, KERN_WARNING,
-			"%s: orphan failed (ino=%x) by kernel, retry mount.",
-				__func__, ino);
-		return -EIO;
+		err = -EIO;
+		goto err_out;
 	}
 	__remove_ino_entry(sbi, ino, ORPHAN_INO);
 	return 0;
+
+err_out:
+	set_sbi_flag(sbi, SBI_NEED_FSCK);
+	f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: orphan failed (ino=%x), run fsck to fix.",
+			__func__, ino);
+	return err;
 }
 
 int recover_orphan_inodes(struct f2fs_sb_info *sbi)
@@ -1139,6 +1145,8 @@
 
 	if (cpc->reason & CP_TRIMMED)
 		__set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
+	else
+		__clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
 
 	if (cpc->reason & CP_UMOUNT)
 		__set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
@@ -1165,6 +1173,39 @@
 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
 }
 
+static void commit_checkpoint(struct f2fs_sb_info *sbi,
+	void *src, block_t blk_addr)
+{
+	struct writeback_control wbc = {
+		.for_reclaim = 0,
+	};
+
+	/*
+	 * pagevec_lookup_tag and lock_page again will take
+	 * some extra time. Therefore, update_meta_pages and
+	 * sync_meta_pages are combined in this function.
+	 */
+	struct page *page = grab_meta_page(sbi, blk_addr);
+	int err;
+
+	memcpy(page_address(page), src, PAGE_SIZE);
+	set_page_dirty(page);
+
+	f2fs_wait_on_page_writeback(page, META, true);
+	f2fs_bug_on(sbi, PageWriteback(page));
+	if (unlikely(!clear_page_dirty_for_io(page)))
+		f2fs_bug_on(sbi, 1);
+
+	/* writeout cp pack 2 page */
+	err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
+	f2fs_bug_on(sbi, err);
+
+	f2fs_put_page(page, 0);
+
+	/* submit checkpoint (with barrier if NOBARRIER is not set) */
+	f2fs_submit_merged_write(sbi, META_FLUSH);
+}
+
 static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 {
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -1267,16 +1308,6 @@
 		}
 	}
 
-	/* need to wait for end_io results */
-	wait_on_all_pages_writeback(sbi);
-	if (unlikely(f2fs_cp_error(sbi)))
-		return -EIO;
-
-	/* flush all device cache */
-	err = f2fs_flush_device_cache(sbi);
-	if (err)
-		return err;
-
 	/* write out checkpoint buffer at block 0 */
 	update_meta_page(sbi, ckpt, start_blk++);
 
@@ -1304,26 +1335,26 @@
 		start_blk += NR_CURSEG_NODE_TYPE;
 	}
 
-	/* writeout checkpoint block */
-	update_meta_page(sbi, ckpt, start_blk);
+	/* update user_block_counts */
+	sbi->last_valid_block_count = sbi->total_valid_block_count;
+	percpu_counter_set(&sbi->alloc_valid_block_count, 0);
 
-	/* wait for previous submitted node/meta pages writeback */
+	/* Here, we have one bio having CP pack except cp pack 2 page */
+	sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+
+	/* wait for previous submitted meta pages writeback */
 	wait_on_all_pages_writeback(sbi);
 
 	if (unlikely(f2fs_cp_error(sbi)))
 		return -EIO;
 
-	filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
-	filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
+	/* flush all device cache */
+	err = f2fs_flush_device_cache(sbi);
+	if (err)
+		return err;
 
-	/* update user_block_counts */
-	sbi->last_valid_block_count = sbi->total_valid_block_count;
-	percpu_counter_set(&sbi->alloc_valid_block_count, 0);
-
-	/* Here, we only have one bio having CP pack */
-	sync_meta_pages(sbi, META_FLUSH, LONG_MAX, FS_CP_META_IO);
-
-	/* wait for previous submitted meta pages writeback */
+	/* barrier and flush checkpoint cp pack 2 page if it can */
+	commit_checkpoint(sbi, ckpt, start_blk);
 	wait_on_all_pages_writeback(sbi);
 
 	release_ino_entry(sbi, false);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 6c13ee3..9efe77e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -19,8 +19,6 @@
 #include <linux/bio.h>
 #include <linux/prefetch.h>
 #include <linux/uio.h>
-#include <linux/mm.h>
-#include <linux/memcontrol.h>
 #include <linux/cleancache.h>
 
 #include "f2fs.h"
@@ -30,6 +28,11 @@
 #include <trace/events/f2fs.h>
 #include <trace/events/android_fs.h>
 
+#define NUM_PREALLOC_POST_READ_CTXS	128
+
+static struct kmem_cache *bio_post_read_ctx_cache;
+static mempool_t *bio_post_read_ctx_pool;
+
 static bool __is_cp_guaranteed(struct page *page)
 {
 	struct address_space *mapping = page->mapping;
@@ -50,11 +53,77 @@
 	return false;
 }
 
-static void f2fs_read_end_io(struct bio *bio)
+/* postprocessing steps for read bios */
+enum bio_post_read_step {
+	STEP_INITIAL = 0,
+	STEP_DECRYPT,
+};
+
+struct bio_post_read_ctx {
+	struct bio *bio;
+	struct work_struct work;
+	unsigned int cur_step;
+	unsigned int enabled_steps;
+};
+
+static void __read_end_io(struct bio *bio)
 {
-	struct bio_vec *bvec;
+	struct page *page;
+	struct bio_vec *bv;
 	int i;
 
+	bio_for_each_segment_all(bv, bio, i) {
+		page = bv->bv_page;
+
+		/* PG_error was set if any post_read step failed */
+		if (bio->bi_error || PageError(page)) {
+			ClearPageUptodate(page);
+			SetPageError(page);
+		} else {
+			SetPageUptodate(page);
+		}
+		unlock_page(page);
+	}
+	if (bio->bi_private)
+		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
+	bio_put(bio);
+}
+
+static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
+
+static void decrypt_work(struct work_struct *work)
+{
+	struct bio_post_read_ctx *ctx =
+		container_of(work, struct bio_post_read_ctx, work);
+
+	fscrypt_decrypt_bio(ctx->bio);
+
+	bio_post_read_processing(ctx);
+}
+
+static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
+{
+	switch (++ctx->cur_step) {
+	case STEP_DECRYPT:
+		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
+			INIT_WORK(&ctx->work, decrypt_work);
+			fscrypt_enqueue_decrypt_work(&ctx->work);
+			return;
+		}
+		ctx->cur_step++;
+		/* fall-through */
+	default:
+		__read_end_io(ctx->bio);
+	}
+}
+
+static bool f2fs_bio_post_read_required(struct bio *bio)
+{
+	return bio->bi_private && !bio->bi_error;
+}
+
+static void f2fs_read_end_io(struct bio *bio)
+{
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
 		f2fs_show_injection_info(FAULT_IO);
@@ -62,28 +131,15 @@
 	}
 #endif
 
-	if (f2fs_bio_encrypted(bio)) {
-		if (bio->bi_error) {
-			fscrypt_release_ctx(bio->bi_private);
-		} else {
-			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
-			return;
-		}
+	if (f2fs_bio_post_read_required(bio)) {
+		struct bio_post_read_ctx *ctx = bio->bi_private;
+
+		ctx->cur_step = STEP_INITIAL;
+		bio_post_read_processing(ctx);
+		return;
 	}
 
-	bio_for_each_segment_all(bvec, bio, i) {
-		struct page *page = bvec->bv_page;
-
-		if (!bio->bi_error) {
-			if (!PageUptodate(page))
-				SetPageUptodate(page);
-		} else {
-			ClearPageUptodate(page);
-			SetPageError(page);
-		}
-		unlock_page(page);
-	}
-	bio_put(bio);
+	__read_end_io(bio);
 }
 
 static void f2fs_write_end_io(struct bio *bio)
@@ -174,15 +230,22 @@
  */
 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
 				struct writeback_control *wbc,
-				int npages, bool is_read)
+				int npages, bool is_read,
+				enum page_type type, enum temp_type temp)
 {
 	struct bio *bio;
 
 	bio = f2fs_bio_alloc(sbi, npages, true);
 
 	f2fs_target_device(sbi, blk_addr, bio);
-	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
-	bio->bi_private = is_read ? NULL : sbi;
+	if (is_read) {
+		bio->bi_end_io = f2fs_read_end_io;
+		bio->bi_private = NULL;
+	} else {
+		bio->bi_end_io = f2fs_write_end_io;
+		bio->bi_private = sbi;
+		bio->bi_write_hint = io_type_to_rw_hint(sbi, type, temp);
+	}
 	if (wbc)
 		wbc_init_bio(wbc, bio);
 
@@ -195,13 +258,12 @@
 	if (!is_read_io(bio_op(bio))) {
 		unsigned int start;
 
-		if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
-			current->plug && (type == DATA || type == NODE))
-			blk_finish_plug(current->plug);
-
 		if (type != DATA && type != NODE)
 			goto submit_io;
 
+		if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
+			blk_finish_plug(current->plug);
+
 		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
 		start %= F2FS_IO_SIZE(sbi);
 
@@ -375,13 +437,18 @@
 	struct bio *bio;
 	struct page *page = fio->encrypted_page ?
 			fio->encrypted_page : fio->page;
+	struct inode *inode = fio->page->mapping->host;
 
+	verify_block_addr(fio, fio->new_blkaddr);
 	trace_f2fs_submit_page_bio(page, fio);
 	f2fs_trace_ios(fio, 0);
 
 	/* Allocate a new bio */
 	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
-				1, is_read_io(fio->op));
+				1, is_read_io(fio->op), fio->type, fio->temp);
+
+	if (f2fs_may_encrypt_bio(inode, fio))
+	fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page));
 
 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 		bio_put(bio);
@@ -402,6 +469,9 @@
 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
 	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
 	struct page *bio_page;
+	struct inode *inode;
+	bool bio_encrypted;
+	u64 dun;
 	int err = 0;
 
 	f2fs_bug_on(sbi, is_read_io(fio->op));
@@ -421,10 +491,13 @@
 	}
 
 	if (fio->old_blkaddr != NEW_ADDR)
-		verify_block_addr(sbi, fio->old_blkaddr);
-	verify_block_addr(sbi, fio->new_blkaddr);
+		verify_block_addr(fio, fio->old_blkaddr);
+	verify_block_addr(fio, fio->new_blkaddr);
 
 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+	inode = fio->page->mapping->host;
+	dun = PG_DUN(inode, fio->page);
+	bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
 
 	/* set submitted = true as a return value */
 	fio->submitted = true;
@@ -435,6 +508,11 @@
 	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
 			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
 		__submit_merged_bio(io);
+
+	/* ICE support */
+	if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted))
+		__submit_merged_bio(io);
+
 alloc_new:
 	if (io->bio == NULL) {
 		if ((fio->type == DATA || fio->type == NODE) &&
@@ -444,7 +522,11 @@
 			goto out_fail;
 		}
 		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
-						BIO_MAX_PAGES, false);
+						BIO_MAX_PAGES, false,
+						fio->type, fio->temp);
+		if (bio_encrypted)
+			fscrypt_set_ice_dun(inode, io->bio, dun);
+
 		io->fio = *fio;
 	}
 
@@ -472,29 +554,33 @@
 							 unsigned nr_pages)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct fscrypt_ctx *ctx = NULL;
 	struct bio *bio;
+	struct bio_post_read_ctx *ctx;
+	unsigned int post_read_steps = 0;
 
-	if (f2fs_encrypted_file(inode)) {
-		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
-		if (IS_ERR(ctx))
-			return ERR_CAST(ctx);
+	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
+	if (!bio)
+		return ERR_PTR(-ENOMEM);
+	f2fs_target_device(sbi, blkaddr, bio);
+	bio->bi_end_io = f2fs_read_end_io;
+	bio_set_op_attrs(bio, REQ_OP_READ, 0);
+
+	if (f2fs_encrypted_file(inode))
+		post_read_steps |= 1 << STEP_DECRYPT;
+	if (post_read_steps) {
+		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
+		if (!ctx) {
+			bio_put(bio);
+			return ERR_PTR(-ENOMEM);
+		}
+		ctx->bio = bio;
+		ctx->enabled_steps = post_read_steps;
+		bio->bi_private = ctx;
 
 		/* wait the page to be moved by cleaning */
 		f2fs_wait_on_block_writeback(sbi, blkaddr);
 	}
 
-	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
-	if (!bio) {
-		if (ctx)
-			fscrypt_release_ctx(ctx);
-		return ERR_PTR(-ENOMEM);
-	}
-	f2fs_target_device(sbi, blkaddr, bio);
-	bio->bi_end_io = f2fs_read_end_io;
-	bio->bi_private = ctx;
-	bio_set_op_attrs(bio, REQ_OP_READ, 0);
-
 	return bio;
 }
 
@@ -507,6 +593,9 @@
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
+	if (f2fs_may_encrypt_bio(inode, NULL))
+		fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, page));
+
 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 		bio_put(bio);
 		return -EFAULT;
@@ -831,13 +920,6 @@
 	return 0;
 }
 
-static inline bool __force_buffered_io(struct inode *inode, int rw)
-{
-	return (f2fs_encrypted_file(inode) ||
-			(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
-			F2FS_I_SB(inode)->s_ndevs);
-}
-
 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct inode *inode = file_inode(iocb->ki_filp);
@@ -868,9 +950,8 @@
 	map.m_seg_type = NO_CHECK_TYPE;
 
 	if (direct_io) {
-		/* map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint); */
-		map.m_seg_type = rw_hint_to_seg_type(WRITE_LIFE_NOT_SET);
-		flag = __force_buffered_io(inode, WRITE) ?
+		map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint);
+		flag = f2fs_force_buffered_io(inode, WRITE) ?
 					F2FS_GET_BLOCK_PRE_AIO :
 					F2FS_GET_BLOCK_PRE_DIO;
 		goto map_blocks;
@@ -1114,6 +1195,31 @@
 	return err;
 }
 
+bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
+{
+	struct f2fs_map_blocks map;
+	block_t last_lblk;
+	int err;
+
+	if (pos + len > i_size_read(inode))
+		return false;
+
+	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
+	map.m_next_pgofs = NULL;
+	map.m_next_extent = NULL;
+	map.m_seg_type = NO_CHECK_TYPE;
+	last_lblk = F2FS_BLK_ALIGN(pos + len);
+
+	while (map.m_lblk < last_lblk) {
+		map.m_len = last_lblk - map.m_lblk;
+		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
+		if (err || map.m_len == 0)
+			return false;
+		map.m_lblk += map.m_len;
+	}
+	return true;
+}
+
 static int __get_data_block(struct inode *inode, sector_t iblock,
 			struct buffer_head *bh, int create, int flag,
 			pgoff_t *next_pgofs, int seg_type)
@@ -1151,8 +1257,7 @@
 	return __get_data_block(inode, iblock, bh_result, create,
 						F2FS_GET_BLOCK_DEFAULT, NULL,
 						rw_hint_to_seg_type(
-							WRITE_LIFE_NOT_SET));
-						/* inode->i_write_hint)); */
+							inode->i_write_hint));
 }
 
 static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -1350,6 +1455,8 @@
 	sector_t last_block_in_file;
 	sector_t block_nr;
 	struct f2fs_map_blocks map;
+	bool bio_encrypted;
+	u64 dun;
 
 	map.m_pblk = 0;
 	map.m_lblk = 0;
@@ -1427,6 +1534,14 @@
 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
 			bio = NULL;
 		}
+
+		dun = PG_DUN(inode, page);
+		bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
+		if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted)) {
+			__submit_bio(F2FS_I_SB(inode), bio, DATA);
+			bio = NULL;
+		}
+
 		if (bio == NULL) {
 			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
 			if (IS_ERR(bio)) {
@@ -1434,7 +1549,8 @@
 				goto set_error_page;
 			}
 		}
-
+		if (bio_encrypted)
+			fscrypt_set_ice_dun(inode, bio, dun);
 		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
 			goto submit_and_realloc;
 
@@ -1500,10 +1616,13 @@
 	if (!f2fs_encrypted_file(inode))
 		return 0;
 
-	/* wait for GCed encrypted page writeback */
+	/* wait for GCed page writeback via META_MAPPING */
 	f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
 
 retry_encrypt:
+	if (fscrypt_using_hardware_encryption(inode))
+		return 0;
+
 	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
 			PAGE_SIZE, 0, fio->page->index, gfp_flags);
 	if (!IS_ERR(fio->encrypted_page))
@@ -1650,6 +1769,7 @@
 			goto out_writepage;
 
 		set_page_writeback(page);
+		ClearPageError(page);
 		f2fs_put_dnode(&dn);
 		if (fio->need_lock == LOCK_REQ)
 			f2fs_unlock_op(fio->sbi);
@@ -1672,6 +1792,7 @@
 		goto out_writepage;
 
 	set_page_writeback(page);
+	ClearPageError(page);
 
 	/* LFS mode write path */
 	write_data_page(&dn, fio);
@@ -1817,7 +1938,13 @@
 
 redirty_out:
 	redirty_page_for_writepage(wbc, page);
-	if (!err)
+	/*
+	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
+	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
+	 * file_write_and_wait_range() will see EIO error, which is critical
+	 * to return value of fsync() followed by atomic_write failure to user.
+	 */
+	if (!err || wbc->for_reclaim)
 		return AOP_WRITEPAGE_ACTIVATE;
 	unlock_page(page);
 	return err;
@@ -2212,8 +2339,8 @@
 
 	f2fs_wait_on_page_writeback(page, DATA, false);
 
-	/* wait for GCed encrypted page writeback */
-	if (f2fs_encrypted_file(inode))
+	/* wait for GCed page writeback via META_MAPPING */
+	if (f2fs_post_read_required(inode))
 		f2fs_wait_on_block_writeback(sbi, blkaddr);
 
 	if (len == PAGE_SIZE || PageUptodate(page))
@@ -2304,16 +2431,19 @@
 {
 	struct address_space *mapping = iocb->ki_filp->f_mapping;
 	struct inode *inode = mapping->host;
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	size_t count = iov_iter_count(iter);
 	loff_t offset = iocb->ki_pos;
 	int rw = iov_iter_rw(iter);
 	int err;
+	enum rw_hint hint = iocb->ki_hint;
+	int whint_mode = F2FS_OPTION(sbi).whint_mode;
 
 	err = check_direct_IO(inode, iter, offset);
 	if (err)
 		return err;
 
-	if (__force_buffered_io(inode, rw))
+	if (f2fs_force_buffered_io(inode, rw))
 		return 0;
 
 	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
@@ -2340,12 +2470,24 @@
 						 current->pid, path,
 						 current->comm);
 	}
+	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
+		iocb->ki_hint = WRITE_LIFE_NOT_SET;
 
-	down_read(&F2FS_I(inode)->dio_rwsem[rw]);
+	if (!down_read_trylock(&F2FS_I(inode)->dio_rwsem[rw])) {
+		if (iocb->ki_flags & IOCB_NOWAIT) {
+			iocb->ki_hint = hint;
+			err = -EAGAIN;
+			goto out;
+		}
+		down_read(&F2FS_I(inode)->dio_rwsem[rw]);
+	}
+
 	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
 	up_read(&F2FS_I(inode)->dio_rwsem[rw]);
 
 	if (rw == WRITE) {
+		if (whint_mode == WHINT_MODE_OFF)
+			iocb->ki_hint = hint;
 		if (err > 0) {
 			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
 									err);
@@ -2354,7 +2496,7 @@
 			f2fs_write_failed(mapping, offset + count);
 		}
 	}
-
+out:
 	if (trace_android_fs_dataread_start_enabled() &&
 	    (rw == READ))
 		trace_android_fs_dataread_end(inode, offset, count);
@@ -2411,35 +2553,6 @@
 	return 1;
 }
 
-/*
- * This was copied from __set_page_dirty_buffers which gives higher performance
- * in very high speed storages. (e.g., pmem)
- */
-void f2fs_set_page_dirty_nobuffers(struct page *page)
-{
-	struct address_space *mapping = page->mapping;
-	unsigned long flags;
-
-	if (unlikely(!mapping))
-		return;
-
-	spin_lock(&mapping->private_lock);
-	lock_page_memcg(page);
-	SetPageDirty(page);
-	spin_unlock(&mapping->private_lock);
-
-	spin_lock_irqsave(&mapping->tree_lock, flags);
-	WARN_ON_ONCE(!PageUptodate(page));
-	account_page_dirtied(page, mapping);
-	radix_tree_tag_set(&mapping->page_tree,
-			page_index(page), PAGECACHE_TAG_DIRTY);
-	spin_unlock_irqrestore(&mapping->tree_lock, flags);
-	unlock_page_memcg(page);
-
-	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
-	return;
-}
-
 static int f2fs_set_data_page_dirty(struct page *page)
 {
 	struct address_space *mapping = page->mapping;
@@ -2463,7 +2576,7 @@
 	}
 
 	if (!PageDirty(page)) {
-		f2fs_set_page_dirty_nobuffers(page);
+		__set_page_dirty_nobuffers(page);
 		update_dirty_page(inode, page);
 		return 1;
 	}
@@ -2556,3 +2669,27 @@
 	.migratepage    = f2fs_migrate_page,
 #endif
 };
+
+int __init f2fs_init_post_read_processing(void)
+{
+	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
+	if (!bio_post_read_ctx_cache)
+		goto fail;
+	bio_post_read_ctx_pool =
+		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
+					 bio_post_read_ctx_cache);
+	if (!bio_post_read_ctx_pool)
+		goto fail_free_cache;
+	return 0;
+
+fail_free_cache:
+	kmem_cache_destroy(bio_post_read_ctx_cache);
+fail:
+	return -ENOMEM;
+}
+
+void __exit f2fs_destroy_post_read_processing(void)
+{
+	mempool_destroy(bio_post_read_ctx_pool);
+	kmem_cache_destroy(bio_post_read_ctx_cache);
+}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index a352ace5..f9a1e18 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -361,6 +361,7 @@
 			struct page *dpage)
 {
 	struct page *page;
+	int dummy_encrypt = DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(dir));
 	int err;
 
 	if (is_inode_flag_set(inode, FI_NEW_INODE)) {
@@ -387,7 +388,8 @@
 		if (err)
 			goto put_error;
 
-		if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) {
+		if ((f2fs_encrypted_inode(dir) || dummy_encrypt) &&
+					f2fs_may_encrypt(inode)) {
 			err = fscrypt_inherit_context(dir, inode, page, false);
 			if (err)
 				goto put_error;
@@ -396,8 +398,6 @@
 		page = get_node_page(F2FS_I_SB(dir), inode->i_ino);
 		if (IS_ERR(page))
 			return page;
-
-		set_cold_node(inode, page);
 	}
 
 	if (new_name) {
@@ -704,7 +704,8 @@
 
 	f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
 
-	add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
+	if (F2FS_OPTION(F2FS_I_SB(dir)).fsync_mode == FSYNC_MODE_STRICT)
+		add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
 
 	if (f2fs_has_inline_dentry(dir))
 		return f2fs_delete_inline_entry(dentry, page, dir, inode);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index ff2352a..d5a861b 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -460,7 +460,7 @@
 				struct rb_node *insert_parent)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct rb_node **p = &et->root.rb_node;
+	struct rb_node **p;
 	struct rb_node *parent = NULL;
 	struct extent_node *en = NULL;
 
@@ -706,6 +706,9 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct extent_tree *et = F2FS_I(inode)->extent_tree;
 
+	if (!f2fs_may_extent_tree(inode))
+		return;
+
 	set_inode_flag(inode, FI_NO_EXTENT);
 
 	write_lock(&et->lock);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e5d3e22..65e8963 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -99,9 +99,10 @@
 #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00800000
 #define F2FS_MOUNT_RESERVE_ROOT		0x01000000
 
-#define clear_opt(sbi, option)	((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option)
-#define set_opt(sbi, option)	((sbi)->mount_opt.opt |= F2FS_MOUNT_##option)
-#define test_opt(sbi, option)	((sbi)->mount_opt.opt & F2FS_MOUNT_##option)
+#define F2FS_OPTION(sbi)	((sbi)->mount_opt)
+#define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
+#define set_opt(sbi, option)	(F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
+#define test_opt(sbi, option)	(F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
 
 #define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
 		typecheck(unsigned long long, b) &&			\
@@ -114,7 +115,26 @@
 typedef u32 nid_t;
 
 struct f2fs_mount_info {
-	unsigned int	opt;
+	unsigned int opt;
+	int write_io_size_bits;		/* Write IO size bits */
+	block_t root_reserved_blocks;	/* root reserved blocks */
+	kuid_t s_resuid;		/* reserved blocks for uid */
+	kgid_t s_resgid;		/* reserved blocks for gid */
+	int active_logs;		/* # of active logs */
+	int inline_xattr_size;		/* inline xattr size */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+	struct f2fs_fault_info fault_info;	/* For fault injection */
+#endif
+#ifdef CONFIG_QUOTA
+	/* Names of quota files with journalled quota */
+	char *s_qf_names[MAXQUOTAS];
+	int s_jquota_fmt;			/* Format of quota to use */
+#endif
+	/* For which write hints are passed down to block layer */
+	int whint_mode;
+	int alloc_mode;			/* segment allocation policy */
+	int fsync_mode;			/* fsync policy */
+	bool test_dummy_encryption;	/* test dummy encryption */
 };
 
 #define F2FS_FEATURE_ENCRYPT		0x0001
@@ -126,6 +146,8 @@
 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR	0x0040
 #define F2FS_FEATURE_QUOTA_INO		0x0080
 #define F2FS_FEATURE_INODE_CRTIME	0x0100
+#define F2FS_FEATURE_LOST_FOUND		0x0200
+#define F2FS_FEATURE_VERITY		0x0400	/* reserved */
 
 #define F2FS_HAS_FEATURE(sb, mask)					\
 	((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -155,15 +177,12 @@
 #define	CP_DISCARD	0x00000010
 #define CP_TRIMMED	0x00000020
 
-#define DEF_BATCHED_TRIM_SECTIONS	2048
-#define BATCHED_TRIM_SEGMENTS(sbi)	\
-		(GET_SEG_FROM_SEC(sbi, SM_I(sbi)->trim_sections))
-#define BATCHED_TRIM_BLOCKS(sbi)	\
-		(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
 #define MAX_DISCARD_BLOCKS(sbi)		BLKS_PER_SEC(sbi)
 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
+#define DEF_MAX_DISCARD_LEN		512	/* Max. 2MB per discard */
 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
+#define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
 #define DEF_CP_INTERVAL			60	/* 60 secs */
 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
 
@@ -448,7 +467,7 @@
 	d->inode = inode;
 	d->max = NR_DENTRY_IN_BLOCK;
 	d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
-	d->bitmap = &t->dentry_bitmap;
+	d->bitmap = t->dentry_bitmap;
 	d->dentry = t->dentry;
 	d->filename = t->filename;
 }
@@ -574,6 +593,8 @@
 #define FADVISE_ENCRYPT_BIT	0x04
 #define FADVISE_ENC_NAME_BIT	0x08
 #define FADVISE_KEEP_SIZE_BIT	0x10
+#define FADVISE_HOT_BIT		0x20
+#define FADVISE_VERITY_BIT	0x40	/* reserved */
 
 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
@@ -588,6 +609,9 @@
 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
 #define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
+#define file_is_hot(inode)	is_file(inode, FADVISE_HOT_BIT)
+#define file_set_hot(inode)	set_file(inode, FADVISE_HOT_BIT)
+#define file_clear_hot(inode)	clear_file(inode, FADVISE_HOT_BIT)
 
 #define DEF_DIR_LEVEL		0
 
@@ -635,6 +659,7 @@
 	kprojid_t i_projid;		/* id for project quota */
 	int i_inline_xattr_size;	/* inline xattr size */
 	struct timespec i_crtime;	/* inode creation time */
+	struct timespec i_disk_time[4];	/* inode disk times */
 };
 
 static inline void get_extent_info(struct extent_info *ext,
@@ -664,7 +689,8 @@
 static inline bool __is_discard_mergeable(struct discard_info *back,
 						struct discard_info *front)
 {
-	return back->lstart + back->len == front->lstart;
+	return (back->lstart + back->len == front->lstart) &&
+		(back->len + front->len < DEF_MAX_DISCARD_LEN);
 }
 
 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
@@ -741,7 +767,7 @@
 	unsigned int nid_cnt[MAX_NID_STATE];	/* the number of free node id */
 	spinlock_t nid_list_lock;	/* protect nid lists ops */
 	struct mutex build_lock;	/* lock for build free nids */
-	unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
+	unsigned char **free_nid_bitmap;
 	unsigned char *nat_block_bitmap;
 	unsigned short *free_nid_count;	/* free nid count of NAT block */
 
@@ -974,6 +1000,7 @@
 	bool submitted;		/* indicate IO submission */
 	int need_lock;		/* indicate we need to lock cp_rwsem */
 	bool in_list;		/* indicate fio is in io_list */
+	bool is_meta;		/* indicate borrow meta inode mapping or not */
 	enum iostat_type io_type;	/* io type */
 	struct writeback_control *io_wbc; /* writeback control */
 };
@@ -1035,10 +1062,35 @@
 	MAX_TIME,
 };
 
+enum {
+	WHINT_MODE_OFF,		/* not pass down write hints */
+	WHINT_MODE_USER,	/* try to pass down hints given by users */
+	WHINT_MODE_FS,		/* pass down hints with F2FS policy */
+};
+
+enum {
+	ALLOC_MODE_DEFAULT,	/* stay default */
+	ALLOC_MODE_REUSE,	/* reuse segments as much as possible */
+};
+
+enum fsync_mode {
+	FSYNC_MODE_POSIX,	/* fsync follows posix semantics */
+	FSYNC_MODE_STRICT,	/* fsync behaves in line with ext4 */
+	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
+};
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#define DUMMY_ENCRYPTION_ENABLED(sbi) \
+			(unlikely(F2FS_OPTION(sbi).test_dummy_encryption))
+#else
+#define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
+#endif
+
 struct f2fs_sb_info {
 	struct super_block *sb;			/* pointer to VFS super block */
 	struct proc_dir_entry *s_proc;		/* proc entry */
 	struct f2fs_super_block *raw_super;	/* raw super block pointer */
+	struct rw_semaphore sb_lock;		/* lock for raw super block */
 	int valid_super_block;			/* valid super block no */
 	unsigned long s_flag;				/* flags for sbi */
 
@@ -1058,7 +1110,6 @@
 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
 	struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
 						/* bio ordering for NODE/DATA */
-	int write_io_size_bits;			/* Write IO size bits */
 	mempool_t *write_io_dummy;		/* Dummy pages */
 
 	/* for checkpoint */
@@ -1108,9 +1159,7 @@
 	unsigned int total_node_count;		/* total node block count */
 	unsigned int total_valid_node_count;	/* valid node block count */
 	loff_t max_file_blocks;			/* max block index of file */
-	int active_logs;			/* # of active logs */
 	int dir_level;				/* directory level */
-	int inline_xattr_size;			/* inline xattr size */
 	unsigned int trigger_ssr_threshold;	/* threshold to trigger ssr */
 	int readdir_ra;				/* readahead inode in readdir */
 
@@ -1120,9 +1169,6 @@
 	block_t last_valid_block_count;		/* for recovery */
 	block_t reserved_blocks;		/* configurable reserved blocks */
 	block_t current_reserved_blocks;	/* current reserved blocks */
-	block_t root_reserved_blocks;		/* root reserved blocks */
-	kuid_t s_resuid;			/* reserved blocks for uid */
-	kgid_t s_resgid;			/* reserved blocks for gid */
 
 	unsigned int nquota_files;		/* # of quota sysfile */
 
@@ -1207,17 +1253,6 @@
 
 	/* Precomputed FS UUID checksum for seeding other checksums */
 	__u32 s_chksum_seed;
-
-	/* For fault injection */
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-	struct f2fs_fault_info fault_info;
-#endif
-
-#ifdef CONFIG_QUOTA
-	/* Names of quota files with journalled quota */
-	char *s_qf_names[MAXQUOTAS];
-	int s_jquota_fmt;			/* Format of quota to use */
-#endif
 };
 
 #ifdef CONFIG_F2FS_FAULT_INJECTION
@@ -1227,7 +1262,7 @@
 		__func__, __builtin_return_address(0))
 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
 {
-	struct f2fs_fault_info *ffi = &sbi->fault_info;
+	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
 
 	if (!ffi->inject_rate)
 		return false;
@@ -1576,7 +1611,7 @@
 }
 
 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
-					struct inode *inode)
+					struct inode *inode, bool cap)
 {
 	if (!inode)
 		return true;
@@ -1584,12 +1619,12 @@
 		return false;
 	if (IS_NOQUOTA(inode))
 		return true;
-	if (capable(CAP_SYS_RESOURCE))
+	if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
 		return true;
-	if (uid_eq(sbi->s_resuid, current_fsuid()))
+	if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
+					in_group_p(F2FS_OPTION(sbi).s_resgid))
 		return true;
-	if (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) &&
-					in_group_p(sbi->s_resgid))
+	if (cap && capable(CAP_SYS_RESOURCE))
 		return true;
 	return false;
 }
@@ -1624,8 +1659,8 @@
 	avail_user_block_count = sbi->user_block_count -
 					sbi->current_reserved_blocks;
 
-	if (!__allow_reserved_blocks(sbi, inode))
-		avail_user_block_count -= sbi->root_reserved_blocks;
+	if (!__allow_reserved_blocks(sbi, inode, true))
+		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
 
 	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
 		diff = sbi->total_valid_block_count - avail_user_block_count;
@@ -1760,6 +1795,12 @@
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 	int offset;
 
+	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
+		offset = (flag == SIT_BITMAP) ?
+			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
+		return &ckpt->sit_nat_version_bitmap + offset;
+	}
+
 	if (__cp_payload(sbi) > 0) {
 		if (flag == NAT_BITMAP)
 			return &ckpt->sit_nat_version_bitmap;
@@ -1825,8 +1866,8 @@
 	valid_block_count = sbi->total_valid_block_count +
 					sbi->current_reserved_blocks + 1;
 
-	if (!__allow_reserved_blocks(sbi, inode))
-		valid_block_count += sbi->root_reserved_blocks;
+	if (!__allow_reserved_blocks(sbi, inode, false))
+		valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
 
 	if (unlikely(valid_block_count > sbi->user_block_count)) {
 		spin_unlock(&sbi->stat_lock);
@@ -2428,7 +2469,17 @@
 	}
 	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
 			file_keep_isize(inode) ||
-			i_size_read(inode) & PAGE_MASK)
+			i_size_read(inode) & ~PAGE_MASK)
+		return false;
+
+	if (!timespec_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
+		return false;
+	if (!timespec_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
+		return false;
+	if (!timespec_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
+		return false;
+	if (!timespec_equal(F2FS_I(inode)->i_disk_time + 3,
+						&F2FS_I(inode)->i_crtime))
 		return false;
 
 	down_read(&F2FS_I(inode)->i_sem);
@@ -2438,8 +2489,7 @@
 	return ret;
 }
 
-#define sb_rdonly	f2fs_readonly
-static inline int f2fs_readonly(struct super_block *sb)
+static inline bool f2fs_readonly(struct super_block *sb)
 {
 	return sb->s_flags & MS_RDONLY;
 }
@@ -2501,15 +2551,6 @@
 	return ret;
 }
 
-enum rw_hint {
-	WRITE_LIFE_NOT_SET	= 0,
-	WRITE_LIFE_NONE		= 1, /* RWH_WRITE_LIFE_NONE */
-	WRITE_LIFE_SHORT	= 2, /* RWH_WRITE_LIFE_SHORT */
-	WRITE_LIFE_MEDIUM	= 3, /* RWH_WRITE_LIFE_MEDIUM */
-	WRITE_LIFE_LONG		= 4, /* RWH_WRITE_LIFE_LONG */
-	WRITE_LIFE_EXTREME	= 5, /* RWH_WRITE_LIFE_EXTREME */
-};
-
 static inline int wbc_to_write_flags(struct writeback_control *wbc)
 {
 	if (wbc->sync_mode == WB_SYNC_ALL)
@@ -2628,6 +2669,8 @@
 /*
  * namei.c
  */
+int update_extension_list(struct f2fs_sb_info *sbi, const char *name,
+							bool hot, bool set);
 struct dentry *f2fs_get_parent(struct dentry *child);
 
 /*
@@ -2758,8 +2801,6 @@
 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
 bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
-void init_discard_policy(struct discard_policy *dpolicy, int discard_type,
-						unsigned int granularity);
 void drop_discard_cmd(struct f2fs_sb_info *sbi);
 void stop_discard_thread(struct f2fs_sb_info *sbi);
 bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
@@ -2800,6 +2841,8 @@
 int __init create_segment_manager_caches(void);
 void destroy_segment_manager_caches(void);
 int rw_hint_to_seg_type(enum rw_hint hint);
+enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi, enum page_type type,
+				enum temp_type temp);
 
 /*
  * checkpoint.c
@@ -2840,6 +2883,8 @@
 /*
  * data.c
  */
+int f2fs_init_post_read_processing(void);
+void f2fs_destroy_post_read_processing(void);
 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
 				struct inode *inode, nid_t ino, pgoff_t idx,
@@ -2871,7 +2916,6 @@
 			u64 start, u64 len);
 bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
 bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
-void f2fs_set_page_dirty_nobuffers(struct page *page);
 int __f2fs_write_data_pages(struct address_space *mapping,
 						struct writeback_control *wbc,
 						enum iostat_type io_type);
@@ -2882,6 +2926,7 @@
 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
 			struct page *page, enum migrate_mode mode);
 #endif
+bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
 
 /*
  * gc.c
@@ -3199,50 +3244,30 @@
 #endif
 }
 
-static inline bool f2fs_bio_encrypted(struct bio *bio)
+/*
+ * Returns true if the reads of the inode's data need to undergo some
+ * postprocessing step, like decryption or authenticity verification.
+ */
+static inline bool f2fs_post_read_required(struct inode *inode)
 {
-	return bio->bi_private != NULL;
+	return f2fs_encrypted_file(inode);
 }
 
-static inline int f2fs_sb_has_crypto(struct super_block *sb)
-{
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
+#define F2FS_FEATURE_FUNCS(name, flagname) \
+static inline int f2fs_sb_has_##name(struct super_block *sb) \
+{ \
+	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_##flagname); \
 }
 
-static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb)
-{
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED);
-}
-
-static inline int f2fs_sb_has_extra_attr(struct super_block *sb)
-{
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_EXTRA_ATTR);
-}
-
-static inline int f2fs_sb_has_project_quota(struct super_block *sb)
-{
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_PRJQUOTA);
-}
-
-static inline int f2fs_sb_has_inode_chksum(struct super_block *sb)
-{
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CHKSUM);
-}
-
-static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb)
-{
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
-}
-
-static inline int f2fs_sb_has_quota_ino(struct super_block *sb)
-{
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_QUOTA_INO);
-}
-
-static inline int f2fs_sb_has_inode_crtime(struct super_block *sb)
-{
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CRTIME);
-}
+F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
+F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
+F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
+F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
+F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
+F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
+F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
+F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
+F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
 
 #ifdef CONFIG_BLK_DEV_ZONED
 static inline int get_blkz_type(struct f2fs_sb_info *sbi,
@@ -3262,7 +3287,7 @@
 {
 	struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
 
-	return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb);
+	return blk_queue_discard(q) || f2fs_sb_has_blkzoned(sbi->sb);
 }
 
 static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
@@ -3291,4 +3316,22 @@
 #endif
 }
 
+static inline bool f2fs_force_buffered_io(struct inode *inode, int rw)
+{
+	return ((f2fs_post_read_required(inode) &&
+		!fscrypt_using_hardware_encryption(inode)) ||
+			(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
+			F2FS_I_SB(inode)->s_ndevs);
+}
+
+static inline bool f2fs_may_encrypt_bio(struct inode *inode,
+		struct f2fs_io_info *fio)
+{
+	if (fio && (fio->type != DATA || fio->encrypted_page))
+		return false;
+
+	return (f2fs_encrypted_file(inode) &&
+			fscrypt_using_hardware_encryption(inode));
+}
+
 #endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index b926df7..44a2e32 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -112,8 +112,8 @@
 	/* fill the page */
 	f2fs_wait_on_page_writeback(page, DATA, false);
 
-	/* wait for GCed encrypted page writeback */
-	if (f2fs_encrypted_file(inode))
+	/* wait for GCed page writeback via META_MAPPING */
+	if (f2fs_post_read_required(inode))
 		f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr);
 
 out_sem:
@@ -165,9 +165,10 @@
 		cp_reason = CP_NODE_NEED_CP;
 	else if (test_opt(sbi, FASTBOOT))
 		cp_reason = CP_FASTBOOT_MODE;
-	else if (sbi->active_logs == 2)
+	else if (F2FS_OPTION(sbi).active_logs == 2)
 		cp_reason = CP_SPEC_LOG_NUM;
-	else if (need_dentry_mark(sbi, inode->i_ino) &&
+	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
+		need_dentry_mark(sbi, inode->i_ino) &&
 		exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO))
 		cp_reason = CP_RECOVER_DIR;
 
@@ -307,7 +308,7 @@
 	remove_ino_entry(sbi, ino, APPEND_INO);
 	clear_inode_flag(inode, FI_APPEND_WRITE);
 flush_out:
-	if (!atomic)
+	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
 		ret = f2fs_issue_flush(sbi, inode->i_ino);
 	if (!ret) {
 		remove_ino_entry(sbi, ino, UPDATE_INO);
@@ -480,6 +481,9 @@
 
 	if (err)
 		return err;
+
+	filp->f_mode |= FMODE_NOWAIT;
+
 	return dquot_file_open(inode, filp);
 }
 
@@ -570,7 +574,6 @@
 int truncate_blocks(struct inode *inode, u64 from, bool lock)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	unsigned int blocksize = inode->i_sb->s_blocksize;
 	struct dnode_of_data dn;
 	pgoff_t free_from;
 	int count = 0, err = 0;
@@ -579,7 +582,7 @@
 
 	trace_f2fs_truncate_blocks_enter(inode, from);
 
-	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
+	free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
 
 	if (free_from >= sbi->max_file_blocks)
 		goto free_partial;
@@ -1350,8 +1353,12 @@
 	}
 
 out:
-	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
-		f2fs_i_size_write(inode, new_size);
+	if (new_size > i_size_read(inode)) {
+		if (mode & FALLOC_FL_KEEP_SIZE)
+			file_set_keep_isize(inode);
+		else
+			f2fs_i_size_write(inode, new_size);
+	}
 out_sem:
 	up_write(&F2FS_I(inode)->i_mmap_sem);
 
@@ -1705,6 +1712,8 @@
 
 	inode_lock(inode);
 
+	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+
 	if (f2fs_is_volatile_file(inode))
 		goto err_out;
 
@@ -1723,6 +1732,7 @@
 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
 	}
 err_out:
+	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
 	inode_unlock(inode);
 	mnt_drop_write_file(filp);
 	return ret;
@@ -1932,7 +1942,7 @@
 {
 	struct inode *inode = file_inode(filp);
 
-	if (!f2fs_sb_has_crypto(inode->i_sb))
+	if (!f2fs_sb_has_encrypt(inode->i_sb))
 		return -EOPNOTSUPP;
 
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
@@ -1942,7 +1952,7 @@
 
 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
 {
-	if (!f2fs_sb_has_crypto(file_inode(filp)->i_sb))
+	if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb))
 		return -EOPNOTSUPP;
 	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
 }
@@ -1953,16 +1963,18 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int err;
 
-	if (!f2fs_sb_has_crypto(inode->i_sb))
+	if (!f2fs_sb_has_encrypt(inode->i_sb))
 		return -EOPNOTSUPP;
 
-	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
-		goto got_it;
-
 	err = mnt_want_write_file(filp);
 	if (err)
 		return err;
 
+	down_write(&sbi->sb_lock);
+
+	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
+		goto got_it;
+
 	/* update superblock with uuid */
 	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
 
@@ -1970,15 +1982,16 @@
 	if (err) {
 		/* undo new data */
 		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
-		mnt_drop_write_file(filp);
-		return err;
+		goto out_err;
 	}
-	mnt_drop_write_file(filp);
 got_it:
 	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
 									16))
-		return -EFAULT;
-	return 0;
+		err = -EFAULT;
+out_err:
+	up_write(&sbi->sb_lock);
+	mnt_drop_write_file(filp);
+	return err;
 }
 
 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
@@ -2039,8 +2052,10 @@
 		return ret;
 
 	end = range.start + range.len;
-	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi))
-		return -EINVAL;
+	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
+		ret = -EINVAL;
+		goto out;
+	}
 do_more:
 	if (!range.sync) {
 		if (!mutex_trylock(&sbi->gc_mutex)) {
@@ -2681,25 +2696,54 @@
 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
 		return -EIO;
 
-	inode_lock(inode);
+	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
+		return -EINVAL;
+
+	if (!inode_trylock(inode)) {
+		if (iocb->ki_flags & IOCB_NOWAIT)
+			return -EAGAIN;
+		inode_lock(inode);
+	}
+
 	ret = generic_write_checks(iocb, from);
 	if (ret > 0) {
+		bool preallocated = false;
+		size_t target_size = 0;
 		int err;
 
 		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
 			set_inode_flag(inode, FI_NO_PREALLOC);
 
-		err = f2fs_preallocate_blocks(iocb, from);
-		if (err) {
-			clear_inode_flag(inode, FI_NO_PREALLOC);
-			inode_unlock(inode);
-			return err;
+		if ((iocb->ki_flags & IOCB_NOWAIT) &&
+			(iocb->ki_flags & IOCB_DIRECT)) {
+				if (!f2fs_overwrite_io(inode, iocb->ki_pos,
+						iov_iter_count(from)) ||
+					f2fs_has_inline_data(inode) ||
+					f2fs_force_buffered_io(inode, WRITE)) {
+						inode_unlock(inode);
+						return -EAGAIN;
+				}
+
+		} else {
+			preallocated = true;
+			target_size = iocb->ki_pos + iov_iter_count(from);
+
+			err = f2fs_preallocate_blocks(iocb, from);
+			if (err) {
+				clear_inode_flag(inode, FI_NO_PREALLOC);
+				inode_unlock(inode);
+				return err;
+			}
 		}
 		blk_start_plug(&plug);
 		ret = __generic_file_write_iter(iocb, from);
 		blk_finish_plug(&plug);
 		clear_inode_flag(inode, FI_NO_PREALLOC);
 
+		/* if we couldn't write data, we should deallocate blocks. */
+		if (preallocated && i_size_read(inode) < target_size)
+			f2fs_truncate(inode);
+
 		if (ret > 0)
 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
 	}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 3b26aa1..66044fa 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -76,14 +76,15 @@
 		 * invalidated soon after by user update or deletion.
 		 * So, I'd like to wait some time to collect dirty segments.
 		 */
-		if (!mutex_trylock(&sbi->gc_mutex))
-			goto next;
-
 		if (gc_th->gc_urgent) {
 			wait_ms = gc_th->urgent_sleep_time;
+			mutex_lock(&sbi->gc_mutex);
 			goto do_gc;
 		}
 
+		if (!mutex_trylock(&sbi->gc_mutex))
+			goto next;
+
 		if (!is_idle(sbi)) {
 			increase_sleep_time(gc_th, &wait_ms);
 			mutex_unlock(&sbi->gc_mutex);
@@ -161,12 +162,17 @@
 {
 	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
 
-	if (gc_th && gc_th->gc_idle) {
+	if (!gc_th)
+		return gc_mode;
+
+	if (gc_th->gc_idle) {
 		if (gc_th->gc_idle == 1)
 			gc_mode = GC_CB;
 		else if (gc_th->gc_idle == 2)
 			gc_mode = GC_GREEDY;
 	}
+	if (gc_th->gc_urgent)
+		gc_mode = GC_GREEDY;
 	return gc_mode;
 }
 
@@ -188,11 +194,14 @@
 	}
 
 	/* we need to check every dirty segments in the FG_GC case */
-	if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
+	if (gc_type != FG_GC &&
+			(sbi->gc_thread && !sbi->gc_thread->gc_urgent) &&
+			p->max_search > sbi->max_victim_search)
 		p->max_search = sbi->max_victim_search;
 
-	/* let's select beginning hot/small space first */
-	if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
+	/* let's select beginning hot/small space first in no_heap mode*/
+	if (test_opt(sbi, NOHEAP) &&
+		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
 		p->offset = 0;
 	else
 		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
@@ -684,6 +693,7 @@
 		dec_page_count(fio.sbi, F2FS_DIRTY_META);
 
 	set_page_writeback(fio.encrypted_page);
+	ClearPageError(page);
 
 	/* allocate block address */
 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
@@ -841,8 +851,8 @@
 			if (IS_ERR(inode) || is_bad_inode(inode))
 				continue;
 
-			/* if encrypted inode, let's go phase 3 */
-			if (f2fs_encrypted_file(inode)) {
+			/* if inode uses special I/O path, let's go phase 3 */
+			if (f2fs_post_read_required(inode)) {
 				add_gc_inode(gc_list, inode);
 				continue;
 			}
@@ -890,7 +900,7 @@
 
 			start_bidx = start_bidx_of_node(nofs, inode)
 								+ ofs_in_node;
-			if (f2fs_encrypted_file(inode))
+			if (f2fs_post_read_required(inode))
 				move_data_block(inode, start_bidx, segno, off);
 			else
 				move_data_page(inode, start_bidx, gc_type,
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 1925814..156ac4f 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -26,7 +26,7 @@
 	if (i_size_read(inode) > MAX_INLINE_DATA(inode))
 		return false;
 
-	if (f2fs_encrypted_file(inode))
+	if (f2fs_post_read_required(inode))
 		return false;
 
 	return true;
@@ -157,6 +157,7 @@
 
 	/* write data page to try to make data consistent */
 	set_page_writeback(page);
+	ClearPageError(page);
 	fio.old_blkaddr = dn->data_blkaddr;
 	set_inode_flag(dn->inode, FI_HOT_DATA);
 	write_data_page(dn, &fio);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 10be247..e0d9e8f 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -284,6 +284,10 @@
 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
 	}
 
+	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
+	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
+	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
+	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
 	f2fs_put_page(node_page, 1);
 
 	stat_inc_inline_xattr(inode);
@@ -439,12 +443,15 @@
 	}
 
 	__set_inode_rdev(inode, ri);
-	set_cold_node(inode, node_page);
 
 	/* deleted inode */
 	if (inode->i_nlink == 0)
 		clear_inline_node(node_page);
 
+	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
+	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
+	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
+	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
 }
 
 void update_inode_page(struct inode *inode)
@@ -585,7 +592,7 @@
 			!exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
 	}
 out_clear:
-	fscrypt_put_encryption_info(inode, NULL);
+	fscrypt_put_encryption_info(inode);
 	clear_inode(inode);
 }
 
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 59e8dca..f1e1ff1 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -78,7 +78,8 @@
 	set_inode_flag(inode, FI_NEW_INODE);
 
 	/* If the directory encrypted, then we should encrypt the inode. */
-	if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode))
+	if ((f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
+				f2fs_may_encrypt(inode))
 		f2fs_set_encrypted_inode(inode);
 
 	if (f2fs_sb_has_extra_attr(sbi->sb)) {
@@ -97,7 +98,7 @@
 	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
 		f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
 		if (f2fs_has_inline_xattr(inode))
-			xattr_size = sbi->inline_xattr_size;
+			xattr_size = F2FS_OPTION(sbi).inline_xattr_size;
 		/* Otherwise, will be 0 */
 	} else if (f2fs_has_inline_xattr(inode) ||
 				f2fs_has_inline_dentry(inode)) {
@@ -142,7 +143,7 @@
 	return ERR_PTR(err);
 }
 
-static int is_multimedia_file(const unsigned char *s, const char *sub)
+static int is_extension_exist(const unsigned char *s, const char *sub)
 {
 	size_t slen = strlen(s);
 	size_t sublen = strlen(sub);
@@ -168,19 +169,94 @@
 /*
  * Set multimedia files as cold files for hot/cold data separation
  */
-static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode,
+static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode,
 		const unsigned char *name)
 {
-	int i;
-	__u8 (*extlist)[8] = sbi->raw_super->extension_list;
+	__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+	int i, cold_count, hot_count;
 
-	int count = le32_to_cpu(sbi->raw_super->extension_count);
-	for (i = 0; i < count; i++) {
-		if (is_multimedia_file(name, extlist[i])) {
+	down_read(&sbi->sb_lock);
+
+	cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+	hot_count = sbi->raw_super->hot_ext_count;
+
+	for (i = 0; i < cold_count + hot_count; i++) {
+		if (!is_extension_exist(name, extlist[i]))
+			continue;
+		if (i < cold_count)
 			file_set_cold(inode);
-			break;
-		}
+		else
+			file_set_hot(inode);
+		break;
 	}
+
+	up_read(&sbi->sb_lock);
+}
+
+int update_extension_list(struct f2fs_sb_info *sbi, const char *name,
+							bool hot, bool set)
+{
+	__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+	int cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+	int hot_count = sbi->raw_super->hot_ext_count;
+	int total_count = cold_count + hot_count;
+	int start, count;
+	int i;
+
+	if (set) {
+		if (total_count == F2FS_MAX_EXTENSION)
+			return -EINVAL;
+	} else {
+		if (!hot && !cold_count)
+			return -EINVAL;
+		if (hot && !hot_count)
+			return -EINVAL;
+	}
+
+	if (hot) {
+		start = cold_count;
+		count = total_count;
+	} else {
+		start = 0;
+		count = cold_count;
+	}
+
+	for (i = start; i < count; i++) {
+		if (strcmp(name, extlist[i]))
+			continue;
+
+		if (set)
+			return -EINVAL;
+
+		memcpy(extlist[i], extlist[i + 1],
+				F2FS_EXTENSION_LEN * (total_count - i - 1));
+		memset(extlist[total_count - 1], 0, F2FS_EXTENSION_LEN);
+		if (hot)
+			sbi->raw_super->hot_ext_count = hot_count - 1;
+		else
+			sbi->raw_super->extension_count =
+						cpu_to_le32(cold_count - 1);
+		return 0;
+	}
+
+	if (!set)
+		return -EINVAL;
+
+	if (hot) {
+		strncpy(extlist[count], name, strlen(name));
+		sbi->raw_super->hot_ext_count = hot_count + 1;
+	} else {
+		char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
+
+		memcpy(buf, &extlist[cold_count],
+				F2FS_EXTENSION_LEN * hot_count);
+		memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
+		strncpy(extlist[cold_count], name, strlen(name));
+		memcpy(&extlist[cold_count + 1], buf,
+				F2FS_EXTENSION_LEN * hot_count);
+		sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
+	}
+	return 0;
 }
 
 static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
@@ -203,7 +279,7 @@
 		return PTR_ERR(inode);
 
 	if (!test_opt(sbi, DISABLE_EXT_IDENTIFY))
-		set_cold_files(sbi, inode, dentry->d_name.name);
+		set_file_temperature(sbi, inode, dentry->d_name.name);
 
 	inode->i_op = &f2fs_file_inode_operations;
 	inode->i_fop = &f2fs_file_operations;
@@ -218,8 +294,7 @@
 
 	alloc_nid_done(sbi, ino);
 
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 
 	if (IS_DIRSYNC(dir))
 		f2fs_sync_fs(sbi->sb, 1);
@@ -481,27 +556,16 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
 	struct inode *inode;
 	size_t len = strlen(symname);
-	struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1);
-	struct fscrypt_symlink_data *sd = NULL;
+	struct fscrypt_str disk_link;
 	int err;
 
 	if (unlikely(f2fs_cp_error(sbi)))
 		return -EIO;
 
-	if (f2fs_encrypted_inode(dir)) {
-		err = fscrypt_get_encryption_info(dir);
-		if (err)
-			return err;
-
-		if (!fscrypt_has_encryption_key(dir))
-			return -ENOKEY;
-
-		disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
-				sizeof(struct fscrypt_symlink_data));
-	}
-
-	if (disk_link.len > dir->i_sb->s_blocksize)
-		return -ENAMETOOLONG;
+	err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize,
+				      &disk_link);
+	if (err)
+		return err;
 
 	err = dquot_initialize(dir);
 	if (err)
@@ -511,7 +575,7 @@
 	if (IS_ERR(inode))
 		return PTR_ERR(inode);
 
-	if (f2fs_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		inode->i_op = &f2fs_encrypted_symlink_inode_operations;
 	else
 		inode->i_op = &f2fs_symlink_inode_operations;
@@ -521,44 +585,18 @@
 	f2fs_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
 	if (err)
-		goto out;
+		goto out_handle_failed_inode;
 	f2fs_unlock_op(sbi);
 	alloc_nid_done(sbi, inode->i_ino);
 
-	if (f2fs_encrypted_inode(inode)) {
-		struct qstr istr = QSTR_INIT(symname, len);
-		struct fscrypt_str ostr;
-
-		sd = f2fs_kzalloc(sbi, disk_link.len, GFP_NOFS);
-		if (!sd) {
-			err = -ENOMEM;
-			goto err_out;
-		}
-
-		err = fscrypt_get_encryption_info(inode);
-		if (err)
-			goto err_out;
-
-		if (!fscrypt_has_encryption_key(inode)) {
-			err = -ENOKEY;
-			goto err_out;
-		}
-
-		ostr.name = sd->encrypted_path;
-		ostr.len = disk_link.len;
-		err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
-		if (err)
-			goto err_out;
-
-		sd->len = cpu_to_le16(ostr.len);
-		disk_link.name = (char *)sd;
-	}
+	err = fscrypt_encrypt_symlink(inode, symname, len, &disk_link);
+	if (err)
+		goto err_out;
 
 	err = page_symlink(inode, disk_link.name, disk_link.len);
 
 err_out:
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 
 	/*
 	 * Let's flush symlink data in order to avoid broken symlink as much as
@@ -579,12 +617,14 @@
 		f2fs_unlink(dir, dentry);
 	}
 
-	kfree(sd);
-
 	f2fs_balance_fs(sbi, true);
-	return err;
-out:
+	goto out_free_encrypted_link;
+
+out_handle_failed_inode:
 	handle_failed_inode(inode);
+out_free_encrypted_link:
+	if (disk_link.name != (unsigned char *)symname)
+		kfree(disk_link.name);
 	return err;
 }
 
@@ -619,8 +659,7 @@
 
 	alloc_nid_done(sbi, inode->i_ino);
 
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 
 	if (IS_DIRSYNC(dir))
 		f2fs_sync_fs(sbi->sb, 1);
@@ -671,8 +710,7 @@
 
 	alloc_nid_done(sbi, inode->i_ino);
 
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 
 	if (IS_DIRSYNC(dir))
 		f2fs_sync_fs(sbi->sb, 1);
@@ -746,10 +784,12 @@
 
 static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
-	if (unlikely(f2fs_cp_error(F2FS_I_SB(dir))))
+	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+
+	if (unlikely(f2fs_cp_error(sbi)))
 		return -EIO;
 
-	if (f2fs_encrypted_inode(dir)) {
+	if (f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
 		int err = fscrypt_get_encryption_info(dir);
 		if (err)
 			return err;
@@ -929,7 +969,8 @@
 			f2fs_put_page(old_dir_page, 0);
 		f2fs_i_links_write(old_dir, false);
 	}
-	add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
+		add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
 
 	f2fs_unlock_op(sbi);
 
@@ -1079,8 +1120,10 @@
 	}
 	f2fs_mark_inode_dirty_sync(new_dir, false);
 
-	add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO);
-	add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) {
+		add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO);
+		add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+	}
 
 	f2fs_unlock_op(sbi);
 
@@ -1132,68 +1175,20 @@
 					   struct inode *inode,
 					   struct delayed_call *done)
 {
-	struct page *cpage = NULL;
-	char *caddr, *paddr = NULL;
-	struct fscrypt_str cstr = FSTR_INIT(NULL, 0);
-	struct fscrypt_str pstr = FSTR_INIT(NULL, 0);
-	struct fscrypt_symlink_data *sd;
-	u32 max_size = inode->i_sb->s_blocksize;
-	int res;
+	struct page *page;
+	const char *target;
 
 	if (!dentry)
 		return ERR_PTR(-ECHILD);
 
-	res = fscrypt_get_encryption_info(inode);
-	if (res)
-		return ERR_PTR(res);
+	page = read_mapping_page(inode->i_mapping, 0, NULL);
+	if (IS_ERR(page))
+		return ERR_CAST(page);
 
-	cpage = read_mapping_page(inode->i_mapping, 0, NULL);
-	if (IS_ERR(cpage))
-		return ERR_CAST(cpage);
-	caddr = page_address(cpage);
-
-	/* Symlink is encrypted */
-	sd = (struct fscrypt_symlink_data *)caddr;
-	cstr.name = sd->encrypted_path;
-	cstr.len = le16_to_cpu(sd->len);
-
-	/* this is broken symlink case */
-	if (unlikely(cstr.len == 0)) {
-		res = -ENOENT;
-		goto errout;
-	}
-
-	if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
-		/* Symlink data on the disk is corrupted */
-		res = -EIO;
-		goto errout;
-	}
-	res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
-	if (res)
-		goto errout;
-
-	res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
-	if (res)
-		goto errout;
-
-	/* this is broken symlink case */
-	if (unlikely(pstr.name[0] == 0)) {
-		res = -ENOENT;
-		goto errout;
-	}
-
-	paddr = pstr.name;
-
-	/* Null-terminate the name */
-	paddr[pstr.len] = '\0';
-
-	put_page(cpage);
-	set_delayed_call(done, kfree_link, paddr);
-	return paddr;
-errout:
-	fscrypt_fname_free_buffer(&pstr);
-	put_page(cpage);
-	return ERR_PTR(res);
+	target = fscrypt_get_symlink(inode, page_address(page),
+				     inode->i_sb->s_blocksize, done);
+	put_page(page);
+	return target;
 }
 
 const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 7cded84..803a010 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -193,8 +193,8 @@
 	__free_nat_entry(e);
 }
 
-static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
-						struct nat_entry *ne)
+static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
+							struct nat_entry *ne)
 {
 	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
 	struct nat_entry_set *head;
@@ -209,15 +209,36 @@
 		head->entry_cnt = 0;
 		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
 	}
+	return head;
+}
+
+static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
+						struct nat_entry *ne)
+{
+	struct nat_entry_set *head;
+	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
+
+	if (!new_ne)
+		head = __grab_nat_entry_set(nm_i, ne);
+
+	/*
+	 * update entry_cnt in below condition:
+	 * 1. update NEW_ADDR to valid block address;
+	 * 2. update old block address to new one;
+	 */
+	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
+				!get_nat_flag(ne, IS_DIRTY)))
+		head->entry_cnt++;
+
+	set_nat_flag(ne, IS_PREALLOC, new_ne);
 
 	if (get_nat_flag(ne, IS_DIRTY))
 		goto refresh_list;
 
 	nm_i->dirty_nat_cnt++;
-	head->entry_cnt++;
 	set_nat_flag(ne, IS_DIRTY, true);
 refresh_list:
-	if (nat_get_blkaddr(ne) == NEW_ADDR)
+	if (new_ne)
 		list_del_init(&ne->list);
 	else
 		list_move_tail(&ne->list, &head->entry_list);
@@ -1076,7 +1097,7 @@
 
 	f2fs_wait_on_page_writeback(page, NODE, true);
 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
-	set_cold_node(dn->inode, page);
+	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 	if (set_page_dirty(page))
@@ -1377,6 +1398,7 @@
 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
 
 	set_page_writeback(page);
+	ClearPageError(page);
 	fio.old_blkaddr = ni.blk_addr;
 	write_node_page(nid, &fio);
 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
@@ -1751,7 +1773,7 @@
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 	if (!PageDirty(page)) {
-		f2fs_set_page_dirty_nobuffers(page);
+		__set_page_dirty_nobuffers(page);
 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
 		SetPagePrivate(page);
 		f2fs_trace_pid(page);
@@ -2310,6 +2332,7 @@
 	if (!PageUptodate(ipage))
 		SetPageUptodate(ipage);
 	fill_node_footer(ipage, ino, ino, 0, true);
+	set_cold_node(page, false);
 
 	src = F2FS_INODE(page);
 	dst = F2FS_INODE(ipage);
@@ -2599,8 +2622,7 @@
 	if (!enabled_nat_bits(sbi, NULL))
 		return 0;
 
-	nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
-						F2FS_BLKSIZE - 1);
+	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
 	nm_i->nat_bits = f2fs_kzalloc(sbi,
 			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
 	if (!nm_i->nat_bits)
@@ -2726,12 +2748,20 @@
 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
+	int i;
 
-	nm_i->free_nid_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks *
-					NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
+	nm_i->free_nid_bitmap = f2fs_kzalloc(sbi, nm_i->nat_blocks *
+				sizeof(unsigned char *), GFP_KERNEL);
 	if (!nm_i->free_nid_bitmap)
 		return -ENOMEM;
 
+	for (i = 0; i < nm_i->nat_blocks; i++) {
+		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
+				NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
+		if (!nm_i->free_nid_bitmap)
+			return -ENOMEM;
+	}
+
 	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
 								GFP_KERNEL);
 	if (!nm_i->nat_block_bitmap)
@@ -2822,7 +2852,13 @@
 	up_write(&nm_i->nat_tree_lock);
 
 	kvfree(nm_i->nat_block_bitmap);
-	kvfree(nm_i->free_nid_bitmap);
+	if (nm_i->free_nid_bitmap) {
+		int i;
+
+		for (i = 0; i < nm_i->nat_blocks; i++)
+			kvfree(nm_i->free_nid_bitmap[i]);
+		kfree(nm_i->free_nid_bitmap);
+	}
 	kvfree(nm_i->free_nid_count);
 
 	kfree(nm_i->nat_bitmap);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 081ef0d..b95e49e 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -44,6 +44,7 @@
 	HAS_FSYNCED_INODE,	/* is the inode fsynced before? */
 	HAS_LAST_FSYNC,		/* has the latest node fsync mark? */
 	IS_DIRTY,		/* this nat entry is dirty? */
+	IS_PREALLOC,		/* nat entry is preallocated */
 };
 
 /*
@@ -422,12 +423,12 @@
 	ClearPageChecked(page);
 }
 
-static inline void set_cold_node(struct inode *inode, struct page *page)
+static inline void set_cold_node(struct page *page, bool is_dir)
 {
 	struct f2fs_node *rn = F2FS_NODE(page);
 	unsigned int flag = le32_to_cpu(rn->footer.flag);
 
-	if (S_ISDIR(inode->i_mode))
+	if (is_dir)
 		flag &= ~(0x1 << COLD_BIT_SHIFT);
 	else
 		flag |= (0x1 << COLD_BIT_SHIFT);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 210de28..4ddc226 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -242,6 +242,9 @@
 	struct curseg_info *curseg;
 	struct page *page = NULL;
 	block_t blkaddr;
+	unsigned int loop_cnt = 0;
+	unsigned int free_blocks = sbi->user_block_count -
+					valid_user_blocks(sbi);
 	int err = 0;
 
 	/* get node pages in the current segment */
@@ -294,6 +297,17 @@
 		if (IS_INODE(page) && is_dent_dnode(page))
 			entry->last_dentry = blkaddr;
 next:
+		/* sanity check in order to detect looped node chain */
+		if (++loop_cnt >= free_blocks ||
+			blkaddr == next_blkaddr_of_node(page)) {
+			f2fs_msg(sbi->sb, KERN_NOTICE,
+				"%s: detect looped node chain, "
+				"blkaddr:%u, next:%u",
+				__func__, blkaddr, next_blkaddr_of_node(page));
+			err = -EINVAL;
+			break;
+		}
+
 		/* check next segment */
 		blkaddr = next_blkaddr_of_node(page);
 		f2fs_put_page(page, 1);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index cc34d88..98fe1ed 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -915,6 +915,39 @@
 #endif
 }
 
+static void __init_discard_policy(struct f2fs_sb_info *sbi,
+				struct discard_policy *dpolicy,
+				int discard_type, unsigned int granularity)
+{
+	/* common policy */
+	dpolicy->type = discard_type;
+	dpolicy->sync = true;
+	dpolicy->granularity = granularity;
+
+	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
+	dpolicy->io_aware_gran = MAX_PLIST_NUM;
+
+	if (discard_type == DPOLICY_BG) {
+		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
+		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
+		dpolicy->io_aware = true;
+		dpolicy->sync = false;
+		if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
+			dpolicy->granularity = 1;
+			dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
+		}
+	} else if (discard_type == DPOLICY_FORCE) {
+		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
+		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
+		dpolicy->io_aware = false;
+	} else if (discard_type == DPOLICY_FSTRIM) {
+		dpolicy->io_aware = false;
+	} else if (discard_type == DPOLICY_UMOUNT) {
+		dpolicy->io_aware = false;
+	}
+}
+
+
 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
 static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
 						struct discard_policy *dpolicy,
@@ -1130,68 +1163,6 @@
 	return 0;
 }
 
-static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
-					struct discard_policy *dpolicy,
-					unsigned int start, unsigned int end)
-{
-	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
-	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
-	struct rb_node **insert_p = NULL, *insert_parent = NULL;
-	struct discard_cmd *dc;
-	struct blk_plug plug;
-	int issued;
-
-next:
-	issued = 0;
-
-	mutex_lock(&dcc->cmd_lock);
-	f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
-
-	dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
-					NULL, start,
-					(struct rb_entry **)&prev_dc,
-					(struct rb_entry **)&next_dc,
-					&insert_p, &insert_parent, true);
-	if (!dc)
-		dc = next_dc;
-
-	blk_start_plug(&plug);
-
-	while (dc && dc->lstart <= end) {
-		struct rb_node *node;
-
-		if (dc->len < dpolicy->granularity)
-			goto skip;
-
-		if (dc->state != D_PREP) {
-			list_move_tail(&dc->list, &dcc->fstrim_list);
-			goto skip;
-		}
-
-		__submit_discard_cmd(sbi, dpolicy, dc);
-
-		if (++issued >= dpolicy->max_requests) {
-			start = dc->lstart + dc->len;
-
-			blk_finish_plug(&plug);
-			mutex_unlock(&dcc->cmd_lock);
-
-			schedule();
-
-			goto next;
-		}
-skip:
-		node = rb_next(&dc->rb_node);
-		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
-
-		if (fatal_signal_pending(current))
-			break;
-	}
-
-	blk_finish_plug(&plug);
-	mutex_unlock(&dcc->cmd_lock);
-}
-
 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
 					struct discard_policy *dpolicy)
 {
@@ -1332,7 +1303,18 @@
 static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
 						struct discard_policy *dpolicy)
 {
-	__wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
+	struct discard_policy dp;
+
+	if (dpolicy) {
+		__wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
+		return;
+	}
+
+	/* wait all */
+	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
+	__wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
+	__wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
 }
 
 /* This should be covered by global mutex, &sit_i->sentry_lock */
@@ -1377,11 +1359,13 @@
 	struct discard_policy dpolicy;
 	bool dropped;
 
-	init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity);
+	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
+					dcc->discard_granularity);
 	__issue_discard_cmd(sbi, &dpolicy);
 	dropped = __drop_discard_cmd(sbi);
-	__wait_all_discard_cmd(sbi, &dpolicy);
 
+	/* just to make sure there is no pending discard commands */
+	__wait_all_discard_cmd(sbi, NULL);
 	return dropped;
 }
 
@@ -1397,7 +1381,7 @@
 	set_freezable();
 
 	do {
-		init_discard_policy(&dpolicy, DPOLICY_BG,
+		__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
 					dcc->discard_granularity);
 
 		wait_event_interruptible_timeout(*q,
@@ -1411,12 +1395,11 @@
 		if (kthread_should_stop())
 			return 0;
 
-		if (dcc->discard_wake) {
+		if (dcc->discard_wake)
 			dcc->discard_wake = 0;
-			if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
-				init_discard_policy(&dpolicy,
-							DPOLICY_FORCE, 1);
-		}
+
+		if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
+			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
 
 		sb_start_intwrite(sbi->sb);
 
@@ -1485,7 +1468,7 @@
 		struct block_device *bdev, block_t blkstart, block_t blklen)
 {
 #ifdef CONFIG_BLK_DEV_ZONED
-	if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+	if (f2fs_sb_has_blkzoned(sbi->sb) &&
 				bdev_zoned_model(bdev) != BLK_ZONED_NONE)
 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
 #endif
@@ -1683,7 +1666,7 @@
 					sbi->blocks_per_seg, cur_pos);
 			len = next_pos - cur_pos;
 
-			if (f2fs_sb_mounted_blkzoned(sbi->sb) ||
+			if (f2fs_sb_has_blkzoned(sbi->sb) ||
 			    (force && len < cpc->trim_minlen))
 				goto skip;
 
@@ -1709,32 +1692,6 @@
 	wake_up_discard_thread(sbi, false);
 }
 
-void init_discard_policy(struct discard_policy *dpolicy,
-				int discard_type, unsigned int granularity)
-{
-	/* common policy */
-	dpolicy->type = discard_type;
-	dpolicy->sync = true;
-	dpolicy->granularity = granularity;
-
-	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
-	dpolicy->io_aware_gran = MAX_PLIST_NUM;
-
-	if (discard_type == DPOLICY_BG) {
-		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
-		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
-		dpolicy->io_aware = true;
-	} else if (discard_type == DPOLICY_FORCE) {
-		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
-		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
-		dpolicy->io_aware = true;
-	} else if (discard_type == DPOLICY_FSTRIM) {
-		dpolicy->io_aware = false;
-	} else if (discard_type == DPOLICY_UMOUNT) {
-		dpolicy->io_aware = false;
-	}
-}
-
 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
 {
 	dev_t dev = sbi->sb->s_bdev->bd_dev;
@@ -1863,7 +1820,7 @@
 			sbi->discard_blks--;
 
 		/* don't overwrite by SSR to keep node chain */
-		if (se->type == CURSEG_WARM_NODE) {
+		if (IS_NODESEG(se->type)) {
 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
 				se->ckpt_valid_blocks++;
 		}
@@ -2164,11 +2121,17 @@
 	if (sbi->segs_per_sec != 1)
 		return CURSEG_I(sbi, type)->segno;
 
-	if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
+	if (test_opt(sbi, NOHEAP) &&
+		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
 		return 0;
 
 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
+
+	/* find segments from 0 to reuse freed segments */
+	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
+		return 0;
+
 	return CURSEG_I(sbi, type)->segno;
 }
 
@@ -2368,11 +2331,72 @@
 	return has_candidate;
 }
 
+static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+					struct discard_policy *dpolicy,
+					unsigned int start, unsigned int end)
+{
+	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	struct discard_cmd *dc;
+	struct blk_plug plug;
+	int issued;
+
+next:
+	issued = 0;
+
+	mutex_lock(&dcc->cmd_lock);
+	f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
+
+	dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
+					NULL, start,
+					(struct rb_entry **)&prev_dc,
+					(struct rb_entry **)&next_dc,
+					&insert_p, &insert_parent, true);
+	if (!dc)
+		dc = next_dc;
+
+	blk_start_plug(&plug);
+
+	while (dc && dc->lstart <= end) {
+		struct rb_node *node;
+
+		if (dc->len < dpolicy->granularity)
+			goto skip;
+
+		if (dc->state != D_PREP) {
+			list_move_tail(&dc->list, &dcc->fstrim_list);
+			goto skip;
+		}
+
+		__submit_discard_cmd(sbi, dpolicy, dc);
+
+		if (++issued >= dpolicy->max_requests) {
+			start = dc->lstart + dc->len;
+
+			blk_finish_plug(&plug);
+			mutex_unlock(&dcc->cmd_lock);
+			__wait_all_discard_cmd(sbi, NULL);
+			congestion_wait(BLK_RW_ASYNC, HZ/50);
+			goto next;
+		}
+skip:
+		node = rb_next(&dc->rb_node);
+		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+
+		if (fatal_signal_pending(current))
+			break;
+	}
+
+	blk_finish_plug(&plug);
+	mutex_unlock(&dcc->cmd_lock);
+}
+
 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
 {
 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
-	unsigned int start_segno, end_segno, cur_segno;
+	unsigned int start_segno, end_segno;
 	block_t start_block, end_block;
 	struct cp_control cpc;
 	struct discard_policy dpolicy;
@@ -2398,40 +2422,27 @@
 
 	cpc.reason = CP_DISCARD;
 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
+	cpc.trim_start = start_segno;
+	cpc.trim_end = end_segno;
 
-	/* do checkpoint to issue discard commands safely */
-	for (cur_segno = start_segno; cur_segno <= end_segno;
-					cur_segno = cpc.trim_end + 1) {
-		cpc.trim_start = cur_segno;
+	if (sbi->discard_blks == 0)
+		goto out;
 
-		if (sbi->discard_blks == 0)
-			break;
-		else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
-			cpc.trim_end = end_segno;
-		else
-			cpc.trim_end = min_t(unsigned int,
-				rounddown(cur_segno +
-				BATCHED_TRIM_SEGMENTS(sbi),
-				sbi->segs_per_sec) - 1, end_segno);
-
-		mutex_lock(&sbi->gc_mutex);
-		err = write_checkpoint(sbi, &cpc);
-		mutex_unlock(&sbi->gc_mutex);
-		if (err)
-			break;
-
-		schedule();
-	}
+	mutex_lock(&sbi->gc_mutex);
+	err = write_checkpoint(sbi, &cpc);
+	mutex_unlock(&sbi->gc_mutex);
+	if (err)
+		goto out;
 
 	start_block = START_BLOCK(sbi, start_segno);
-	end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1);
+	end_block = START_BLOCK(sbi, end_segno + 1);
 
-	init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
+	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
 	__issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
 	trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
 					start_block, end_block);
-out:
 	range->len = F2FS_BLK_TO_BYTES(trimmed);
+out:
 	return err;
 }
 
@@ -2455,6 +2466,101 @@
 	}
 }
 
+/* This returns write hints for each segment type. This hints will be
+ * passed down to block layer. There are mapping tables which depend on
+ * the mount option 'whint_mode'.
+ *
+ * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
+ *
+ * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
+ *
+ * User                  F2FS                     Block
+ * ----                  ----                     -----
+ *                       META                     WRITE_LIFE_NOT_SET
+ *                       HOT_NODE                 "
+ *                       WARM_NODE                "
+ *                       COLD_NODE                "
+ * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
+ * extension list        "                        "
+ *
+ * -- buffered io
+ * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
+ * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
+ * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
+ * WRITE_LIFE_NONE       "                        "
+ * WRITE_LIFE_MEDIUM     "                        "
+ * WRITE_LIFE_LONG       "                        "
+ *
+ * -- direct io
+ * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
+ * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
+ * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
+ * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
+ * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
+ * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
+ *
+ * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
+ *
+ * User                  F2FS                     Block
+ * ----                  ----                     -----
+ *                       META                     WRITE_LIFE_MEDIUM;
+ *                       HOT_NODE                 WRITE_LIFE_NOT_SET
+ *                       WARM_NODE                "
+ *                       COLD_NODE                WRITE_LIFE_NONE
+ * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
+ * extension list        "                        "
+ *
+ * -- buffered io
+ * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
+ * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
+ * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_LONG
+ * WRITE_LIFE_NONE       "                        "
+ * WRITE_LIFE_MEDIUM     "                        "
+ * WRITE_LIFE_LONG       "                        "
+ *
+ * -- direct io
+ * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
+ * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
+ * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
+ * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
+ * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
+ * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
+ */
+
+enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi,
+				enum page_type type, enum temp_type temp)
+{
+	if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
+		if (type == DATA) {
+			if (temp == WARM)
+				return WRITE_LIFE_NOT_SET;
+			else if (temp == HOT)
+				return WRITE_LIFE_SHORT;
+			else if (temp == COLD)
+				return WRITE_LIFE_EXTREME;
+		} else {
+			return WRITE_LIFE_NOT_SET;
+		}
+	} else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
+		if (type == DATA) {
+			if (temp == WARM)
+				return WRITE_LIFE_LONG;
+			else if (temp == HOT)
+				return WRITE_LIFE_SHORT;
+			else if (temp == COLD)
+				return WRITE_LIFE_EXTREME;
+		} else if (type == NODE) {
+			if (temp == WARM || temp == HOT)
+				return WRITE_LIFE_NOT_SET;
+			else if (temp == COLD)
+				return WRITE_LIFE_NONE;
+		} else if (type == META) {
+			return WRITE_LIFE_MEDIUM;
+		}
+	}
+	return WRITE_LIFE_NOT_SET;
+}
+
 static int __get_segment_type_2(struct f2fs_io_info *fio)
 {
 	if (fio->type == DATA)
@@ -2487,7 +2593,8 @@
 
 		if (is_cold_data(fio->page) || file_is_cold(inode))
 			return CURSEG_COLD_DATA;
-		if (is_inode_flag_set(inode, FI_HOT_DATA))
+		if (file_is_hot(inode) ||
+				is_inode_flag_set(inode, FI_HOT_DATA))
 			return CURSEG_HOT_DATA;
 		/* rw_hint_to_seg_type(inode->i_write_hint); */
 		return CURSEG_WARM_DATA;
@@ -2503,7 +2610,7 @@
 {
 	int type = 0;
 
-	switch (fio->sbi->active_logs) {
+	switch (F2FS_OPTION(fio->sbi).active_logs) {
 	case 2:
 		type = __get_segment_type_2(fio);
 		break;
@@ -2643,6 +2750,7 @@
 	struct f2fs_io_info fio = {
 		.sbi = sbi,
 		.type = META,
+		.temp = HOT,
 		.op = REQ_OP_WRITE,
 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
 		.old_blkaddr = page->index,
@@ -2656,6 +2764,7 @@
 		fio.op_flags &= ~REQ_META;
 
 	set_page_writeback(page);
+	ClearPageError(page);
 	f2fs_submit_page_write(&fio);
 
 	f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
@@ -2689,8 +2798,15 @@
 int rewrite_data_page(struct f2fs_io_info *fio)
 {
 	int err;
+	struct f2fs_sb_info *sbi = fio->sbi;
 
 	fio->new_blkaddr = fio->old_blkaddr;
+	/* i/o temperature is needed for passing down write hints */
+	__get_segment_type(fio);
+
+	f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi,
+			GET_SEGNO(sbi, fio->new_blkaddr))->type));
+
 	stat_inc_inplace_blocks(fio->sbi);
 
 	err = f2fs_submit_page_bio(fio);
@@ -3714,8 +3830,6 @@
 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
 	sm_info->min_ssr_sections = reserved_sections(sbi);
 
-	sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
-
 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
 
 	init_rwsem(&sm_info->curseg_lock);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index f11c4bc..3325d07 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -53,13 +53,19 @@
 	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno /		\
 	  (sbi)->segs_per_sec))	\
 
-#define MAIN_BLKADDR(sbi)	(SM_I(sbi)->main_blkaddr)
-#define SEG0_BLKADDR(sbi)	(SM_I(sbi)->seg0_blkaddr)
+#define MAIN_BLKADDR(sbi)						\
+	(SM_I(sbi) ? SM_I(sbi)->main_blkaddr : 				\
+		le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
+#define SEG0_BLKADDR(sbi)						\
+	(SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : 				\
+		le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
 
 #define MAIN_SEGS(sbi)	(SM_I(sbi)->main_segments)
 #define MAIN_SECS(sbi)	((sbi)->total_sections)
 
-#define TOTAL_SEGS(sbi)	(SM_I(sbi)->segment_count)
+#define TOTAL_SEGS(sbi)							\
+	(SM_I(sbi) ? SM_I(sbi)->segment_count : 				\
+		le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
 #define TOTAL_BLKS(sbi)	(TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
 
 #define MAX_BLKADDR(sbi)	(SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
@@ -596,6 +602,8 @@
 #define DEF_MIN_FSYNC_BLOCKS	8
 #define DEF_MIN_HOT_BLOCKS	16
 
+#define SMALL_VOLUME_SEGMENTS	(16 * 512)	/* 16GB */
+
 enum {
 	F2FS_IPU_FORCE,
 	F2FS_IPU_SSR,
@@ -630,10 +638,17 @@
 	f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
 }
 
-static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
+static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
 {
-	BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
-			|| blk_addr >= MAX_BLKADDR(sbi));
+	struct f2fs_sb_info *sbi = fio->sbi;
+
+	if (PAGE_TYPE_OF_BIO(fio->type) == META &&
+				(!is_read_io(fio->op) || fio->is_meta))
+		BUG_ON(blk_addr < SEG0_BLKADDR(sbi) ||
+				blk_addr >= MAIN_BLKADDR(sbi));
+	else
+		BUG_ON(blk_addr < MAIN_BLKADDR(sbi) ||
+				blk_addr >= MAX_BLKADDR(sbi));
 }
 
 /*
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 78b763c..a7711d7 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -60,7 +60,7 @@
 static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
 						unsigned int rate)
 {
-	struct f2fs_fault_info *ffi = &sbi->fault_info;
+	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
 
 	if (rate) {
 		atomic_set(&ffi->inject_ops, 0);
@@ -129,6 +129,10 @@
 	Opt_jqfmt_vfsold,
 	Opt_jqfmt_vfsv0,
 	Opt_jqfmt_vfsv1,
+	Opt_whint,
+	Opt_alloc,
+	Opt_fsync,
+	Opt_test_dummy_encryption,
 	Opt_err,
 };
 
@@ -182,6 +186,10 @@
 	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
 	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
 	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
+	{Opt_whint, "whint_mode=%s"},
+	{Opt_alloc, "alloc_mode=%s"},
+	{Opt_fsync, "fsync_mode=%s"},
+	{Opt_test_dummy_encryption, "test_dummy_encryption"},
 	{Opt_err, NULL},
 };
 
@@ -202,21 +210,24 @@
 	block_t limit = (sbi->user_block_count << 1) / 1000;
 
 	/* limit is 0.2% */
-	if (test_opt(sbi, RESERVE_ROOT) && sbi->root_reserved_blocks > limit) {
-		sbi->root_reserved_blocks = limit;
+	if (test_opt(sbi, RESERVE_ROOT) &&
+			F2FS_OPTION(sbi).root_reserved_blocks > limit) {
+		F2FS_OPTION(sbi).root_reserved_blocks = limit;
 		f2fs_msg(sbi->sb, KERN_INFO,
 			"Reduce reserved blocks for root = %u",
-				sbi->root_reserved_blocks);
+			F2FS_OPTION(sbi).root_reserved_blocks);
 	}
 	if (!test_opt(sbi, RESERVE_ROOT) &&
-		(!uid_eq(sbi->s_resuid,
+		(!uid_eq(F2FS_OPTION(sbi).s_resuid,
 				make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
-		!gid_eq(sbi->s_resgid,
+		!gid_eq(F2FS_OPTION(sbi).s_resgid,
 				make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
 		f2fs_msg(sbi->sb, KERN_INFO,
 			"Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
-				from_kuid_munged(&init_user_ns, sbi->s_resuid),
-				from_kgid_munged(&init_user_ns, sbi->s_resgid));
+				from_kuid_munged(&init_user_ns,
+					F2FS_OPTION(sbi).s_resuid),
+				from_kgid_munged(&init_user_ns,
+					F2FS_OPTION(sbi).s_resgid));
 }
 
 static void init_once(void *foo)
@@ -236,7 +247,7 @@
 	char *qname;
 	int ret = -EINVAL;
 
-	if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) {
+	if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
 		f2fs_msg(sb, KERN_ERR,
 			"Cannot change journaled "
 			"quota options when quota turned on");
@@ -254,8 +265,8 @@
 			"Not enough memory for storing quotafile name");
 		return -EINVAL;
 	}
-	if (sbi->s_qf_names[qtype]) {
-		if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
+	if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
+		if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
 			ret = 0;
 		else
 			f2fs_msg(sb, KERN_ERR,
@@ -268,7 +279,7 @@
 			"quotafile must be on filesystem root");
 		goto errout;
 	}
-	sbi->s_qf_names[qtype] = qname;
+	F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
 	set_opt(sbi, QUOTA);
 	return 0;
 errout:
@@ -280,13 +291,13 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 
-	if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) {
+	if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
 		f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
 			" when quota turned on");
 		return -EINVAL;
 	}
-	kfree(sbi->s_qf_names[qtype]);
-	sbi->s_qf_names[qtype] = NULL;
+	kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
+	F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
 	return 0;
 }
 
@@ -302,15 +313,19 @@
 			 "Cannot enable project quota enforcement.");
 		return -1;
 	}
-	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] ||
-			sbi->s_qf_names[PRJQUOTA]) {
-		if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
+	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
+			F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
+			F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
+		if (test_opt(sbi, USRQUOTA) &&
+				F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
 			clear_opt(sbi, USRQUOTA);
 
-		if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
+		if (test_opt(sbi, GRPQUOTA) &&
+				F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
 			clear_opt(sbi, GRPQUOTA);
 
-		if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA])
+		if (test_opt(sbi, PRJQUOTA) &&
+				F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
 			clear_opt(sbi, PRJQUOTA);
 
 		if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
@@ -320,19 +335,19 @@
 			return -1;
 		}
 
-		if (!sbi->s_jquota_fmt) {
+		if (!F2FS_OPTION(sbi).s_jquota_fmt) {
 			f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
 					"not specified");
 			return -1;
 		}
 	}
 
-	if (f2fs_sb_has_quota_ino(sbi->sb) && sbi->s_jquota_fmt) {
+	if (f2fs_sb_has_quota_ino(sbi->sb) && F2FS_OPTION(sbi).s_jquota_fmt) {
 		f2fs_msg(sbi->sb, KERN_INFO,
 			"QUOTA feature is enabled, so ignore jquota_fmt");
-		sbi->s_jquota_fmt = 0;
+		F2FS_OPTION(sbi).s_jquota_fmt = 0;
 	}
-	if (f2fs_sb_has_quota_ino(sbi->sb) && sb_rdonly(sbi->sb)) {
+	if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
 		f2fs_msg(sbi->sb, KERN_INFO,
 			 "Filesystem with quota feature cannot be mounted RDWR "
 			 "without CONFIG_QUOTA");
@@ -403,14 +418,14 @@
 			q = bdev_get_queue(sb->s_bdev);
 			if (blk_queue_discard(q)) {
 				set_opt(sbi, DISCARD);
-			} else if (!f2fs_sb_mounted_blkzoned(sb)) {
+			} else if (!f2fs_sb_has_blkzoned(sb)) {
 				f2fs_msg(sb, KERN_WARNING,
 					"mounting with \"discard\" option, but "
 					"the device does not support discard");
 			}
 			break;
 		case Opt_nodiscard:
-			if (f2fs_sb_mounted_blkzoned(sb)) {
+			if (f2fs_sb_has_blkzoned(sb)) {
 				f2fs_msg(sb, KERN_WARNING,
 					"discard is required for zoned block devices");
 				return -EINVAL;
@@ -440,7 +455,7 @@
 			if (args->from && match_int(args, &arg))
 				return -EINVAL;
 			set_opt(sbi, INLINE_XATTR_SIZE);
-			sbi->inline_xattr_size = arg;
+			F2FS_OPTION(sbi).inline_xattr_size = arg;
 			break;
 #else
 		case Opt_user_xattr:
@@ -480,7 +495,7 @@
 				return -EINVAL;
 			if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
 				return -EINVAL;
-			sbi->active_logs = arg;
+			F2FS_OPTION(sbi).active_logs = arg;
 			break;
 		case Opt_disable_ext_identify:
 			set_opt(sbi, DISABLE_EXT_IDENTIFY);
@@ -524,9 +539,9 @@
 			if (test_opt(sbi, RESERVE_ROOT)) {
 				f2fs_msg(sb, KERN_INFO,
 					"Preserve previous reserve_root=%u",
-					sbi->root_reserved_blocks);
+					F2FS_OPTION(sbi).root_reserved_blocks);
 			} else {
-				sbi->root_reserved_blocks = arg;
+				F2FS_OPTION(sbi).root_reserved_blocks = arg;
 				set_opt(sbi, RESERVE_ROOT);
 			}
 			break;
@@ -539,7 +554,7 @@
 					"Invalid uid value %d", arg);
 				return -EINVAL;
 			}
-			sbi->s_resuid = uid;
+			F2FS_OPTION(sbi).s_resuid = uid;
 			break;
 		case Opt_resgid:
 			if (args->from && match_int(args, &arg))
@@ -550,7 +565,7 @@
 					"Invalid gid value %d", arg);
 				return -EINVAL;
 			}
-			sbi->s_resgid = gid;
+			F2FS_OPTION(sbi).s_resgid = gid;
 			break;
 		case Opt_mode:
 			name = match_strdup(&args[0]);
@@ -559,7 +574,7 @@
 				return -ENOMEM;
 			if (strlen(name) == 8 &&
 					!strncmp(name, "adaptive", 8)) {
-				if (f2fs_sb_mounted_blkzoned(sb)) {
+				if (f2fs_sb_has_blkzoned(sb)) {
 					f2fs_msg(sb, KERN_WARNING,
 						 "adaptive mode is not allowed with "
 						 "zoned block device feature");
@@ -585,7 +600,7 @@
 					1 << arg, BIO_MAX_PAGES);
 				return -EINVAL;
 			}
-			sbi->write_io_size_bits = arg;
+			F2FS_OPTION(sbi).write_io_size_bits = arg;
 			break;
 		case Opt_fault_injection:
 			if (args->from && match_int(args, &arg))
@@ -646,13 +661,13 @@
 				return ret;
 			break;
 		case Opt_jqfmt_vfsold:
-			sbi->s_jquota_fmt = QFMT_VFS_OLD;
+			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
 			break;
 		case Opt_jqfmt_vfsv0:
-			sbi->s_jquota_fmt = QFMT_VFS_V0;
+			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
 			break;
 		case Opt_jqfmt_vfsv1:
-			sbi->s_jquota_fmt = QFMT_VFS_V1;
+			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
 			break;
 		case Opt_noquota:
 			clear_opt(sbi, QUOTA);
@@ -679,6 +694,77 @@
 					"quota operations not supported");
 			break;
 #endif
+		case Opt_whint:
+			name = match_strdup(&args[0]);
+			if (!name)
+				return -ENOMEM;
+			if (strlen(name) == 10 &&
+					!strncmp(name, "user-based", 10)) {
+				F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
+			} else if (strlen(name) == 3 &&
+					!strncmp(name, "off", 3)) {
+				F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
+			} else if (strlen(name) == 8 &&
+					!strncmp(name, "fs-based", 8)) {
+				F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
+			} else {
+				kfree(name);
+				return -EINVAL;
+			}
+			kfree(name);
+			break;
+		case Opt_alloc:
+			name = match_strdup(&args[0]);
+			if (!name)
+				return -ENOMEM;
+
+			if (strlen(name) == 7 &&
+					!strncmp(name, "default", 7)) {
+				F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
+			} else if (strlen(name) == 5 &&
+					!strncmp(name, "reuse", 5)) {
+				F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
+			} else {
+				kfree(name);
+				return -EINVAL;
+			}
+			kfree(name);
+			break;
+		case Opt_fsync:
+			name = match_strdup(&args[0]);
+			if (!name)
+				return -ENOMEM;
+			if (strlen(name) == 5 &&
+					!strncmp(name, "posix", 5)) {
+				F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
+			} else if (strlen(name) == 6 &&
+					!strncmp(name, "strict", 6)) {
+				F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
+			} else if (strlen(name) == 9 &&
+					!strncmp(name, "nobarrier", 9)) {
+				F2FS_OPTION(sbi).fsync_mode =
+							FSYNC_MODE_NOBARRIER;
+			} else {
+				kfree(name);
+				return -EINVAL;
+			}
+			kfree(name);
+			break;
+		case Opt_test_dummy_encryption:
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+			if (!f2fs_sb_has_encrypt(sb)) {
+				f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
+				return -EINVAL;
+			}
+
+			F2FS_OPTION(sbi).test_dummy_encryption = true;
+			f2fs_msg(sb, KERN_INFO,
+					"Test dummy encryption mode enabled");
+#else
+			f2fs_msg(sb, KERN_INFO,
+					"Test dummy encryption mount option ignored");
+#endif
+			break;
 		default:
 			f2fs_msg(sb, KERN_ERR,
 				"Unrecognized mount option \"%s\" or missing value",
@@ -699,14 +785,22 @@
 	}
 
 	if (test_opt(sbi, INLINE_XATTR_SIZE)) {
+		if (!f2fs_sb_has_extra_attr(sb) ||
+			!f2fs_sb_has_flexible_inline_xattr(sb)) {
+			f2fs_msg(sb, KERN_ERR,
+					"extra_attr or flexible_inline_xattr "
+					"feature is off");
+			return -EINVAL;
+		}
 		if (!test_opt(sbi, INLINE_XATTR)) {
 			f2fs_msg(sb, KERN_ERR,
 					"inline_xattr_size option should be "
 					"set with inline_xattr option");
 			return -EINVAL;
 		}
-		if (!sbi->inline_xattr_size ||
-			sbi->inline_xattr_size >= DEF_ADDRS_PER_INODE -
+		if (!F2FS_OPTION(sbi).inline_xattr_size ||
+			F2FS_OPTION(sbi).inline_xattr_size >=
+					DEF_ADDRS_PER_INODE -
 					F2FS_TOTAL_EXTRA_ATTR_SIZE -
 					DEF_INLINE_RESERVED_SIZE -
 					DEF_MIN_INLINE_SIZE) {
@@ -715,6 +809,12 @@
 			return -EINVAL;
 		}
 	}
+
+	/* Not pass down write hints if the number of active logs is lesser
+	 * than NR_CURSEG_TYPE.
+	 */
+	if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
+		F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
 	return 0;
 }
 
@@ -731,7 +831,6 @@
 	/* Initialize f2fs-specific inode info */
 	atomic_set(&fi->dirty_pages, 0);
 	fi->i_current_depth = 1;
-	fi->i_advise = 0;
 	init_rwsem(&fi->i_sem);
 	INIT_LIST_HEAD(&fi->dirty_list);
 	INIT_LIST_HEAD(&fi->gdirty_list);
@@ -743,10 +842,6 @@
 	init_rwsem(&fi->i_mmap_sem);
 	init_rwsem(&fi->i_xattr_sem);
 
-#ifdef CONFIG_QUOTA
-	memset(&fi->i_dquot, 0, sizeof(fi->i_dquot));
-	fi->i_reserved_quota = 0;
-#endif
 	/* Will be used by directory only */
 	fi->i_dir_level = F2FS_SB(sb)->dir_level;
 
@@ -956,7 +1051,7 @@
 	mempool_destroy(sbi->write_io_dummy);
 #ifdef CONFIG_QUOTA
 	for (i = 0; i < MAXQUOTAS; i++)
-		kfree(sbi->s_qf_names[i]);
+		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
 #endif
 	destroy_percpu_info(sbi);
 	for (i = 0; i < NR_PAGE_TYPE; i++)
@@ -1070,8 +1165,9 @@
 	buf->f_blocks = total_count - start_count;
 	buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
 						sbi->current_reserved_blocks;
-	if (buf->f_bfree > sbi->root_reserved_blocks)
-		buf->f_bavail = buf->f_bfree - sbi->root_reserved_blocks;
+	if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
+		buf->f_bavail = buf->f_bfree -
+				F2FS_OPTION(sbi).root_reserved_blocks;
 	else
 		buf->f_bavail = 0;
 
@@ -1106,10 +1202,10 @@
 #ifdef CONFIG_QUOTA
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 
-	if (sbi->s_jquota_fmt) {
+	if (F2FS_OPTION(sbi).s_jquota_fmt) {
 		char *fmtname = "";
 
-		switch (sbi->s_jquota_fmt) {
+		switch (F2FS_OPTION(sbi).s_jquota_fmt) {
 		case QFMT_VFS_OLD:
 			fmtname = "vfsold";
 			break;
@@ -1123,14 +1219,17 @@
 		seq_printf(seq, ",jqfmt=%s", fmtname);
 	}
 
-	if (sbi->s_qf_names[USRQUOTA])
-		seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
+	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
+		seq_show_option(seq, "usrjquota",
+			F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
 
-	if (sbi->s_qf_names[GRPQUOTA])
-		seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
+	if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
+		seq_show_option(seq, "grpjquota",
+			F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
 
-	if (sbi->s_qf_names[PRJQUOTA])
-		seq_show_option(seq, "prjjquota", sbi->s_qf_names[PRJQUOTA]);
+	if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
+		seq_show_option(seq, "prjjquota",
+			F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
 #endif
 }
 
@@ -1165,7 +1264,7 @@
 		seq_puts(seq, ",noinline_xattr");
 	if (test_opt(sbi, INLINE_XATTR_SIZE))
 		seq_printf(seq, ",inline_xattr_size=%u",
-					sbi->inline_xattr_size);
+					F2FS_OPTION(sbi).inline_xattr_size);
 #endif
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
 	if (test_opt(sbi, POSIX_ACL))
@@ -1201,18 +1300,20 @@
 		seq_puts(seq, "adaptive");
 	else if (test_opt(sbi, LFS))
 		seq_puts(seq, "lfs");
-	seq_printf(seq, ",active_logs=%u", sbi->active_logs);
+	seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
 	if (test_opt(sbi, RESERVE_ROOT))
 		seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
-				sbi->root_reserved_blocks,
-				from_kuid_munged(&init_user_ns, sbi->s_resuid),
-				from_kgid_munged(&init_user_ns, sbi->s_resgid));
+				F2FS_OPTION(sbi).root_reserved_blocks,
+				from_kuid_munged(&init_user_ns,
+					F2FS_OPTION(sbi).s_resuid),
+				from_kgid_munged(&init_user_ns,
+					F2FS_OPTION(sbi).s_resgid));
 	if (F2FS_IO_SIZE_BITS(sbi))
 		seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (test_opt(sbi, FAULT_INJECTION))
 		seq_printf(seq, ",fault_injection=%u",
-				sbi->fault_info.inject_rate);
+				F2FS_OPTION(sbi).fault_info.inject_rate);
 #endif
 #ifdef CONFIG_QUOTA
 	if (test_opt(sbi, QUOTA))
@@ -1225,15 +1326,37 @@
 		seq_puts(seq, ",prjquota");
 #endif
 	f2fs_show_quota_options(seq, sbi->sb);
+	if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
+		seq_printf(seq, ",whint_mode=%s", "user-based");
+	else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
+		seq_printf(seq, ",whint_mode=%s", "fs-based");
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+	if (F2FS_OPTION(sbi).test_dummy_encryption)
+		seq_puts(seq, ",test_dummy_encryption");
+#endif
 
+	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
+		seq_printf(seq, ",alloc_mode=%s", "default");
+	else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
+		seq_printf(seq, ",alloc_mode=%s", "reuse");
+
+	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
+		seq_printf(seq, ",fsync_mode=%s", "posix");
+	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
+		seq_printf(seq, ",fsync_mode=%s", "strict");
 	return 0;
 }
 
 static void default_options(struct f2fs_sb_info *sbi)
 {
 	/* init some FS parameters */
-	sbi->active_logs = NR_CURSEG_TYPE;
-	sbi->inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
+	F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
+	F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
+	F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
+	F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
+	F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
+	F2FS_OPTION(sbi).test_dummy_encryption = false;
+	sbi->readdir_ra = 1;
 
 	set_opt(sbi, BG_GC);
 	set_opt(sbi, INLINE_XATTR);
@@ -1243,7 +1366,7 @@
 	set_opt(sbi, NOHEAP);
 	sbi->sb->s_flags |= MS_LAZYTIME;
 	set_opt(sbi, FLUSH_MERGE);
-	if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
+	if (f2fs_sb_has_blkzoned(sbi->sb)) {
 		set_opt_mode(sbi, F2FS_MOUNT_LFS);
 		set_opt(sbi, DISCARD);
 	} else {
@@ -1270,16 +1393,11 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 	struct f2fs_mount_info org_mount_opt;
 	unsigned long old_sb_flags;
-	int err, active_logs;
+	int err;
 	bool need_restart_gc = false;
 	bool need_stop_gc = false;
 	bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-	struct f2fs_fault_info ffi = sbi->fault_info;
-#endif
 #ifdef CONFIG_QUOTA
-	int s_jquota_fmt;
-	char *s_qf_names[MAXQUOTAS];
 	int i, j;
 #endif
 
@@ -1289,21 +1407,21 @@
 	 */
 	org_mount_opt = sbi->mount_opt;
 	old_sb_flags = sb->s_flags;
-	active_logs = sbi->active_logs;
 
 #ifdef CONFIG_QUOTA
-	s_jquota_fmt = sbi->s_jquota_fmt;
+	org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
 	for (i = 0; i < MAXQUOTAS; i++) {
-		if (sbi->s_qf_names[i]) {
-			s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
-							 GFP_KERNEL);
-			if (!s_qf_names[i]) {
+		if (F2FS_OPTION(sbi).s_qf_names[i]) {
+			org_mount_opt.s_qf_names[i] =
+				kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
+				GFP_KERNEL);
+			if (!org_mount_opt.s_qf_names[i]) {
 				for (j = 0; j < i; j++)
-					kfree(s_qf_names[j]);
+					kfree(org_mount_opt.s_qf_names[j]);
 				return -ENOMEM;
 			}
 		} else {
-			s_qf_names[i] = NULL;
+			org_mount_opt.s_qf_names[i] = NULL;
 		}
 	}
 #endif
@@ -1373,7 +1491,8 @@
 		need_stop_gc = true;
 	}
 
-	if (*flags & MS_RDONLY) {
+	if (*flags & MS_RDONLY ||
+		F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
 		writeback_inodes_sb(sb, WB_REASON_SYNC);
 		sync_inodes_sb(sb);
 
@@ -1399,7 +1518,7 @@
 #ifdef CONFIG_QUOTA
 	/* Release old quota file names */
 	for (i = 0; i < MAXQUOTAS; i++)
-		kfree(s_qf_names[i]);
+		kfree(org_mount_opt.s_qf_names[i]);
 #endif
 	/* Update the POSIXACL Flag */
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
@@ -1417,18 +1536,14 @@
 	}
 restore_opts:
 #ifdef CONFIG_QUOTA
-	sbi->s_jquota_fmt = s_jquota_fmt;
+	F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
 	for (i = 0; i < MAXQUOTAS; i++) {
-		kfree(sbi->s_qf_names[i]);
-		sbi->s_qf_names[i] = s_qf_names[i];
+		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+		F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
 	}
 #endif
 	sbi->mount_opt = org_mount_opt;
-	sbi->active_logs = active_logs;
 	sb->s_flags = old_sb_flags;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-	sbi->fault_info = ffi;
-#endif
 	return err;
 }
 
@@ -1550,8 +1665,8 @@
 
 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
 {
-	return dquot_quota_on_mount(sbi->sb, sbi->s_qf_names[type],
-						sbi->s_jquota_fmt, type);
+	return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
+					F2FS_OPTION(sbi).s_jquota_fmt, type);
 }
 
 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
@@ -1570,7 +1685,7 @@
 	}
 
 	for (i = 0; i < MAXQUOTAS; i++) {
-		if (sbi->s_qf_names[i]) {
+		if (F2FS_OPTION(sbi).s_qf_names[i]) {
 			err = f2fs_quota_on_mount(sbi, i);
 			if (!err) {
 				enabled = 1;
@@ -1797,23 +1912,47 @@
 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
 							void *fs_data)
 {
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+	/*
+	 * Encrypting the root directory is not allowed because fsck
+	 * expects lost+found directory to exist and remain unencrypted
+	 * if LOST_FOUND feature is enabled.
+	 *
+	 */
+	if (f2fs_sb_has_lost_found(sbi->sb) &&
+			inode->i_ino == F2FS_ROOT_INO(sbi))
+		return -EPERM;
+
 	return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
 				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
 				ctx, len, fs_data, XATTR_CREATE);
 }
 
+static bool f2fs_dummy_context(struct inode *inode)
+{
+	return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
+}
+
 static unsigned f2fs_max_namelen(struct inode *inode)
 {
 	return S_ISLNK(inode->i_mode) ?
 			inode->i_sb->s_blocksize : F2FS_NAME_LEN;
 }
 
+static inline bool f2fs_is_encrypted(struct inode *inode)
+{
+	return f2fs_encrypted_file(inode);
+}
+
 static const struct fscrypt_operations f2fs_cryptops = {
 	.key_prefix	= "f2fs:",
 	.get_context	= f2fs_get_context,
 	.set_context	= f2fs_set_context,
+	.dummy_context	= f2fs_dummy_context,
 	.empty_dir	= f2fs_empty_dir,
 	.max_namelen	= f2fs_max_namelen,
+	.is_encrypted = f2fs_is_encrypted,
 };
 #endif
 
@@ -1894,7 +2033,6 @@
 	lock_buffer(bh);
 	if (super)
 		memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
-	set_buffer_uptodate(bh);
 	set_buffer_dirty(bh);
 	unlock_buffer(bh);
 
@@ -2181,6 +2319,8 @@
 
 	sbi->dirty_device = 0;
 	spin_lock_init(&sbi->dev_lock);
+
+	init_rwsem(&sbi->sb_lock);
 }
 
 static int init_percpu_info(struct f2fs_sb_info *sbi)
@@ -2206,7 +2346,7 @@
 	unsigned int n = 0;
 	int err = -EIO;
 
-	if (!f2fs_sb_mounted_blkzoned(sbi->sb))
+	if (!f2fs_sb_has_blkzoned(sbi->sb))
 		return 0;
 
 	if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
@@ -2334,7 +2474,7 @@
 	}
 
 	/* write back-up superblock first */
-	bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
+	bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
 	if (!bh)
 		return -EIO;
 	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
@@ -2345,7 +2485,7 @@
 		return err;
 
 	/* write current valid superblock */
-	bh = sb_getblk(sbi->sb, sbi->valid_super_block);
+	bh = sb_bread(sbi->sb, sbi->valid_super_block);
 	if (!bh)
 		return -EIO;
 	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
@@ -2417,7 +2557,7 @@
 
 #ifdef CONFIG_BLK_DEV_ZONED
 		if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
-				!f2fs_sb_mounted_blkzoned(sbi->sb)) {
+				!f2fs_sb_has_blkzoned(sbi->sb)) {
 			f2fs_msg(sbi->sb, KERN_ERR,
 				"Zoned block device feature not enabled\n");
 			return -EINVAL;
@@ -2451,6 +2591,18 @@
 	return 0;
 }
 
+static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
+{
+	struct f2fs_sm_info *sm_i = SM_I(sbi);
+
+	/* adjust parameters according to the volume size */
+	if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
+		F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
+		sm_i->dcc_info->discard_granularity = 1;
+		sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
+	}
+}
+
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 {
 	struct f2fs_sb_info *sbi;
@@ -2498,8 +2650,8 @@
 	sb->s_fs_info = sbi;
 	sbi->raw_super = raw_super;
 
-	sbi->s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
-	sbi->s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
+	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
 
 	/* precompute checksum seed for metadata */
 	if (f2fs_sb_has_inode_chksum(sb))
@@ -2512,7 +2664,7 @@
 	 * devices, but mandatory for host-managed zoned block devices.
 	 */
 #ifndef CONFIG_BLK_DEV_ZONED
-	if (f2fs_sb_mounted_blkzoned(sb)) {
+	if (f2fs_sb_has_blkzoned(sb)) {
 		f2fs_msg(sb, KERN_ERR,
 			 "Zoned block device support is not enabled\n");
 		err = -EOPNOTSUPP;
@@ -2728,7 +2880,7 @@
 	 * Turn on quotas which were not enabled for read-only mounts if
 	 * filesystem has quota feature, so that they are updated correctly.
 	 */
-	if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) {
+	if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
 		err = f2fs_enable_quotas(sb);
 		if (err) {
 			f2fs_msg(sb, KERN_ERR,
@@ -2803,6 +2955,8 @@
 
 	f2fs_join_shrinker(sbi);
 
+	f2fs_tuning_parameters(sbi);
+
 	f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
 				cur_cp_version(F2FS_CKPT(sbi)));
 	f2fs_update_time(sbi, CP_TIME);
@@ -2811,7 +2965,7 @@
 
 free_meta:
 #ifdef CONFIG_QUOTA
-	if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb))
+	if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
 		f2fs_quota_off_umount(sbi->sb);
 #endif
 	f2fs_sync_inode_meta(sbi);
@@ -2855,7 +3009,7 @@
 free_options:
 #ifdef CONFIG_QUOTA
 	for (i = 0; i < MAXQUOTAS; i++)
-		kfree(sbi->s_qf_names[i]);
+		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
 #endif
 	kfree(options);
 free_sb_buf:
@@ -2952,8 +3106,13 @@
 	err = f2fs_create_root_stats();
 	if (err)
 		goto free_filesystem;
+	err = f2fs_init_post_read_processing();
+	if (err)
+		goto free_root_stats;
 	return 0;
 
+free_root_stats:
+	f2fs_destroy_root_stats();
 free_filesystem:
 	unregister_filesystem(&f2fs_fs_type);
 free_shrinker:
@@ -2976,6 +3135,7 @@
 
 static void __exit exit_f2fs_fs(void)
 {
+	f2fs_destroy_post_read_processing();
 	f2fs_destroy_root_stats();
 	unregister_filesystem(&f2fs_fs_type);
 	unregister_shrinker(&f2fs_shrinker_info);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index d978c7b..2c53de9 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -58,7 +58,7 @@
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 	else if (struct_type == FAULT_INFO_RATE ||
 					struct_type == FAULT_INFO_TYPE)
-		return (unsigned char *)&sbi->fault_info;
+		return (unsigned char *)&F2FS_OPTION(sbi).fault_info;
 #endif
 	return NULL;
 }
@@ -92,10 +92,10 @@
 	if (!sb->s_bdev->bd_part)
 		return snprintf(buf, PAGE_SIZE, "0\n");
 
-	if (f2fs_sb_has_crypto(sb))
+	if (f2fs_sb_has_encrypt(sb))
 		len += snprintf(buf, PAGE_SIZE - len, "%s",
 						"encryption");
-	if (f2fs_sb_mounted_blkzoned(sb))
+	if (f2fs_sb_has_blkzoned(sb))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "blkzoned");
 	if (f2fs_sb_has_extra_attr(sb))
@@ -116,6 +116,9 @@
 	if (f2fs_sb_has_inode_crtime(sb))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "inode_crtime");
+	if (f2fs_sb_has_lost_found(sb))
+		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+				len ? ", " : "", "lost_found");
 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
 	return len;
 }
@@ -136,6 +139,27 @@
 	if (!ptr)
 		return -EINVAL;
 
+	if (!strcmp(a->attr.name, "extension_list")) {
+		__u8 (*extlist)[F2FS_EXTENSION_LEN] =
+					sbi->raw_super->extension_list;
+		int cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+		int hot_count = sbi->raw_super->hot_ext_count;
+		int len = 0, i;
+
+		len += snprintf(buf + len, PAGE_SIZE - len,
+						"cold file extenstion:\n");
+		for (i = 0; i < cold_count; i++)
+			len += snprintf(buf + len, PAGE_SIZE - len, "%s\n",
+								extlist[i]);
+
+		len += snprintf(buf + len, PAGE_SIZE - len,
+						"hot file extenstion:\n");
+		for (i = cold_count; i < cold_count + hot_count; i++)
+			len += snprintf(buf + len, PAGE_SIZE - len, "%s\n",
+								extlist[i]);
+		return len;
+	}
+
 	ui = (unsigned int *)(ptr + a->offset);
 
 	return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
@@ -154,6 +178,41 @@
 	if (!ptr)
 		return -EINVAL;
 
+	if (!strcmp(a->attr.name, "extension_list")) {
+		const char *name = strim((char *)buf);
+		bool set = true, hot;
+
+		if (!strncmp(name, "[h]", 3))
+			hot = true;
+		else if (!strncmp(name, "[c]", 3))
+			hot = false;
+		else
+			return -EINVAL;
+
+		name += 3;
+
+		if (*name == '!') {
+			name++;
+			set = false;
+		}
+
+		if (strlen(name) >= F2FS_EXTENSION_LEN)
+			return -EINVAL;
+
+		down_write(&sbi->sb_lock);
+
+		ret = update_extension_list(sbi, name, hot, set);
+		if (ret)
+			goto out;
+
+		ret = f2fs_commit_super(sbi, false);
+		if (ret)
+			update_extension_list(sbi, name, hot, !set);
+out:
+		up_write(&sbi->sb_lock);
+		return ret ? ret : count;
+	}
+
 	ui = (unsigned int *)(ptr + a->offset);
 
 	ret = kstrtoul(skip_spaces(buf), 0, &t);
@@ -166,7 +225,7 @@
 	if (a->struct_type == RESERVED_BLOCKS) {
 		spin_lock(&sbi->stat_lock);
 		if (t > (unsigned long)(sbi->user_block_count -
-					sbi->root_reserved_blocks)) {
+				F2FS_OPTION(sbi).root_reserved_blocks)) {
 			spin_unlock(&sbi->stat_lock);
 			return -EINVAL;
 		}
@@ -186,6 +245,9 @@
 		return count;
 	}
 
+	if (!strcmp(a->attr.name, "trim_sections"))
+		return -EINVAL;
+
 	*ui = t;
 
 	if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
@@ -236,6 +298,7 @@
 	FEAT_FLEXIBLE_INLINE_XATTR,
 	FEAT_QUOTA_INO,
 	FEAT_INODE_CRTIME,
+	FEAT_LOST_FOUND,
 };
 
 static ssize_t f2fs_feature_show(struct f2fs_attr *a,
@@ -251,6 +314,7 @@
 	case FEAT_FLEXIBLE_INLINE_XATTR:
 	case FEAT_QUOTA_INO:
 	case FEAT_INODE_CRTIME:
+	case FEAT_LOST_FOUND:
 		return snprintf(buf, PAGE_SIZE, "supported\n");
 	}
 	return 0;
@@ -307,6 +371,7 @@
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list);
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
 F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
@@ -329,6 +394,7 @@
 F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR);
 F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO);
 F2FS_FEATURE_RO_ATTR(inode_crtime, FEAT_INODE_CRTIME);
+F2FS_FEATURE_RO_ATTR(lost_found, FEAT_LOST_FOUND);
 
 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
 static struct attribute *f2fs_attrs[] = {
@@ -357,6 +423,7 @@
 	ATTR_LIST(iostat_enable),
 	ATTR_LIST(readdir_ra),
 	ATTR_LIST(gc_pin_file_thresh),
+	ATTR_LIST(extension_list),
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 	ATTR_LIST(inject_rate),
 	ATTR_LIST(inject_type),
@@ -383,6 +450,7 @@
 	ATTR_LIST(flexible_inline_xattr),
 	ATTR_LIST(quota_ino),
 	ATTR_LIST(inode_crtime),
+	ATTR_LIST(lost_found),
 	NULL,
 };
 
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ad2e55d..17ad41d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -745,11 +745,12 @@
 	 */
 	if (inode && inode_to_wb_is_valid(inode)) {
 		struct bdi_writeback *wb;
-		bool locked, congested;
+		struct wb_lock_cookie lock_cookie = {};
+		bool congested;
 
-		wb = unlocked_inode_to_wb_begin(inode, &locked);
+		wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
 		congested = wb_congested(wb, cong_bits);
-		unlocked_inode_to_wb_end(inode, locked);
+		unlocked_inode_to_wb_end(inode, &lock_cookie);
 		return congested;
 	}
 
@@ -1941,7 +1942,7 @@
 	}
 
 	if (!list_empty(&wb->work_list))
-		mod_delayed_work(bdi_wq, &wb->dwork, 0);
+		wb_wakeup(wb);
 	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
 		wb_wakeup_delayed(wb);
 
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index c8c4f79..8a7923a 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -776,6 +776,7 @@
 
 	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
 
+again:
 	spin_lock(&object->lock);
 	cookie = object->cookie;
 
@@ -816,10 +817,6 @@
 		goto superseded;
 	page = results[0];
 	_debug("gang %d [%lx]", n, page->index);
-	if (page->index >= op->store_limit) {
-		fscache_stat(&fscache_n_store_pages_over_limit);
-		goto superseded;
-	}
 
 	radix_tree_tag_set(&cookie->stores, page->index,
 			   FSCACHE_COOKIE_STORING_TAG);
@@ -829,6 +826,9 @@
 	spin_unlock(&cookie->stores_lock);
 	spin_unlock(&object->lock);
 
+	if (page->index >= op->store_limit)
+		goto discard_page;
+
 	fscache_stat(&fscache_n_store_pages);
 	fscache_stat(&fscache_n_cop_write_page);
 	ret = object->cache->ops->write_page(op, page);
@@ -844,6 +844,11 @@
 	_leave("");
 	return;
 
+discard_page:
+	fscache_stat(&fscache_n_store_pages_over_limit);
+	fscache_end_page_write(object, page);
+	goto again;
+
 superseded:
 	/* this writer is going away and there aren't any more things to
 	 * write */
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 658fa9e..a0b0683 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1891,8 +1891,10 @@
 
 	err = copy_out_args(cs, &req->out, nbytes);
 	if (req->in.h.opcode == FUSE_CANONICAL_PATH) {
-		req->out.h.error = kern_path((char *)req->out.args[0].value, 0,
-							req->canonical_path);
+		char *path = (char *)req->out.args[0].value;
+
+		path[req->out.args[0].size - 1] = 0;
+		req->out.h.error = kern_path(path, 0, req->canonical_path);
 	}
 	fuse_copy_finish(cs);
 
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 39c382f..ff93e96 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -801,7 +801,7 @@
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_alloc_parms ap = { .aflags = 0, };
 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
-	loff_t bytes, max_bytes, max_blks = UINT_MAX;
+	loff_t bytes, max_bytes, max_blks;
 	int error;
 	const loff_t pos = offset;
 	const loff_t count = len;
@@ -853,7 +853,8 @@
 			return error;
 		/* ap.allowed tells us how many blocks quota will allow
 		 * us to write. Check if this reduces max_blks */
-		if (ap.allowed && ap.allowed < max_blks)
+		max_blks = UINT_MAX;
+		if (ap.allowed)
 			max_blks = ap.allowed;
 
 		error = gfs2_inplace_reserve(ip, &ap);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 5e47c93..836f294 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -45,6 +45,8 @@
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	int ret;
+
+	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 		return 0;
 	ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 11854dd..b9563cd 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -588,6 +588,7 @@
 	return 0;
 
 out_put_hidden_dir:
+	cancel_delayed_work_sync(&sbi->sync_work);
 	iput(sbi->hidden_dir);
 out_put_root:
 	dput(sb->s_root);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f06c39c..d10bb2c 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -951,7 +951,7 @@
 }
 
 /*
- * This is a variaon of __jbd2_update_log_tail which checks for validity of
+ * This is a variation of __jbd2_update_log_tail which checks for validity of
  * provided log tail and locks j_checkpoint_mutex. So it is safe against races
  * with other threads updating log tail.
  */
@@ -1394,6 +1394,9 @@
 	journal_superblock_t *sb = journal->j_superblock;
 	int ret;
 
+	if (is_journal_aborted(journal))
+		return -EIO;
+
 	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
 	jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
 		  tail_block, tail_tid);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 4e5c610..9e9e093 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -528,6 +528,7 @@
 	 */
 	ret = start_this_handle(journal, handle, GFP_NOFS);
 	if (ret < 0) {
+		handle->h_journal = journal;
 		jbd2_journal_free_reserved(handle);
 		return ret;
 	}
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 0a754f3..e5a6deb 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -209,8 +209,7 @@
 		  __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
 		  f->inocache->pino_nlink, inode->i_mapping->nrpages);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	return 0;
 
  fail:
@@ -430,8 +429,7 @@
 	mutex_unlock(&dir_f->sem);
 	jffs2_complete_reservation(c);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	return 0;
 
  fail:
@@ -575,8 +573,7 @@
 	mutex_unlock(&dir_f->sem);
 	jffs2_complete_reservation(c);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	return 0;
 
  fail:
@@ -747,8 +744,7 @@
 	mutex_unlock(&dir_f->sem);
 	jffs2_complete_reservation(c);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	return 0;
 
  fail:
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 567653f..c9c47d0 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -361,7 +361,6 @@
 	ret = -EIO;
 error:
 	mutex_unlock(&f->sem);
-	jffs2_do_clear_inode(c, f);
 	iget_failed(inode);
 	return ERR_PTR(ret);
 }
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 5ef21f4..59c019a 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -342,7 +342,7 @@
 static void jffs2_kill_sb(struct super_block *sb)
 {
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
-	if (!(sb->s_flags & MS_RDONLY))
+	if (c && !(sb->s_flags & MS_RDONLY))
 		jffs2_stop_garbage_collect_thread(c);
 	kill_mtd_super(sb);
 	kfree(c);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index b41596d..56c3fcb 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -178,8 +178,7 @@
 		unlock_new_inode(ip);
 		iput(ip);
 	} else {
-		unlock_new_inode(ip);
-		d_instantiate(dentry, ip);
+		d_instantiate_new(dentry, ip);
 	}
 
       out2:
@@ -313,8 +312,7 @@
 		unlock_new_inode(ip);
 		iput(ip);
 	} else {
-		unlock_new_inode(ip);
-		d_instantiate(dentry, ip);
+		d_instantiate_new(dentry, ip);
 	}
 
       out2:
@@ -1059,8 +1057,7 @@
 		unlock_new_inode(ip);
 		iput(ip);
 	} else {
-		unlock_new_inode(ip);
-		d_instantiate(dentry, ip);
+		d_instantiate_new(dentry, ip);
 	}
 
       out2:
@@ -1447,8 +1444,7 @@
 		unlock_new_inode(ip);
 		iput(ip);
 	} else {
-		unlock_new_inode(ip);
-		d_instantiate(dentry, ip);
+		d_instantiate_new(dentry, ip);
 	}
 
       out1:
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index a770da3..d484c63 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -132,6 +132,8 @@
 {
 	int		err = 0;
 	struct svc_rqst *rqstp = vrqstp;
+	struct net *net = &init_net;
+	struct lockd_net *ln = net_generic(net, lockd_net_id);
 
 	/* try_to_freeze() is called from svc_recv() */
 	set_freezable();
@@ -176,6 +178,8 @@
 	if (nlmsvc_ops)
 		nlmsvc_invalidate_all();
 	nlm_shutdown_hosts();
+	cancel_delayed_work_sync(&ln->grace_period_end);
+	locks_end_grace(&ln->lockd_manager);
 	return 0;
 }
 
diff --git a/fs/namei.c b/fs/namei.c
index 02fc285..c138ab1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -221,9 +221,10 @@
 	if (len <= EMBEDDED_NAME_MAX) {
 		result->name = (char *)result->iname;
 	} else if (len <= PATH_MAX) {
+		const size_t size = offsetof(struct filename, iname[1]);
 		struct filename *tmp;
 
-		tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+		tmp = kmalloc(size, GFP_KERNEL);
 		if (unlikely(!tmp)) {
 			__putname(result);
 			return ERR_PTR(-ENOMEM);
@@ -2904,6 +2905,11 @@
 	if (error)
 		return error;
 	error = dir->i_op->create(dir, dentry, mode, want_excl);
+	if (error)
+		return error;
+	error = security_inode_post_create(dir, dentry, mode);
+	if (error)
+		return error;
 	if (!error)
 		fsnotify_create(dir, dentry);
 	return error;
@@ -3719,6 +3725,13 @@
 		return error;
 
 	error = dir->i_op->mknod(dir, dentry, mode, dev);
+	if (error)
+		return error;
+
+	error = security_inode_post_create(dir, dentry, mode);
+	if (error)
+		return error;
+
 	if (!error)
 		fsnotify_create(dir, dentry);
 	return error;
diff --git a/fs/namespace.c b/fs/namespace.c
index 2160bb9..4628d08c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1051,7 +1051,8 @@
 			goto out_free;
 	}
 
-	mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
+	mnt->mnt.mnt_flags = old->mnt.mnt_flags;
+	mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
 	/* Don't allow unprivileged users to change mount flags */
 	if (flag & CL_UNPRIVILEGED) {
 		mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 13abd60..4539008 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -475,6 +475,7 @@
 			goto out_err_free;
 
 		/* fh */
+		rc = -EIO;
 		p = xdr_inline_decode(&stream, 4);
 		if (!p)
 			goto out_err_free;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4638654..91e017c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1934,7 +1934,7 @@
 	return ret;
 }
 
-static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
+static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
 {
 	switch (err) {
 		default:
@@ -1981,7 +1981,11 @@
 			return -EAGAIN;
 		case -ENOMEM:
 		case -NFS4ERR_DENIED:
-			/* kill_proc(fl->fl_pid, SIGLOST, 1); */
+			if (fl) {
+				struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
+				if (lsp)
+					set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
+			}
 			return 0;
 	}
 	return err;
@@ -2017,7 +2021,7 @@
 		err = nfs4_open_recover_helper(opendata, FMODE_READ);
 	}
 	nfs4_opendata_put(opendata);
-	return nfs4_handle_delegation_recall_error(server, state, stateid, err);
+	return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
 }
 
 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
@@ -3300,6 +3304,7 @@
 		.rpc_resp = &res,
 	};
 	int status;
+	int i;
 
 	bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
 		     FATTR4_WORD0_FH_EXPIRE_TYPE |
@@ -3365,8 +3370,13 @@
 		server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
 		server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
 		server->cache_consistency_bitmask[2] = 0;
+
+		/* Avoid a regression due to buggy server */
+		for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
+			res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
 		memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
 			sizeof(server->exclcreat_bitmask));
+
 		server->acl_bitmask = res.acl_bitmask;
 		server->fh_expire_type = res.fh_expire_type;
 	}
@@ -6493,7 +6503,7 @@
 	if (err != 0)
 		return err;
 	err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
-	return nfs4_handle_delegation_recall_error(server, state, stateid, err);
+	return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
 }
 
 struct nfs_release_lockowner_data {
@@ -8173,6 +8183,12 @@
 		/* fall through */
 	case -NFS4ERR_RETRY_UNCACHED_REP:
 		return -EAGAIN;
+	case -NFS4ERR_BADSESSION:
+	case -NFS4ERR_DEADSESSION:
+	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+		nfs4_schedule_session_recovery(clp->cl_session,
+				task->tk_status);
+		break;
 	default:
 		nfs4_schedule_lease_recovery(clp);
 	}
@@ -8251,7 +8267,6 @@
 	if (status == 0)
 		status = task->tk_status;
 	rpc_put_task(task);
-	return 0;
 out:
 	dprintk("<-- %s status=%d\n", __func__, status);
 	return status;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 71deeae..3536913 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1429,6 +1429,7 @@
 	struct inode *inode = state->inode;
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct file_lock *fl;
+	struct nfs4_lock_state *lsp;
 	int status = 0;
 	struct file_lock_context *flctx = inode->i_flctx;
 	struct list_head *list;
@@ -1469,7 +1470,9 @@
 		case -NFS4ERR_DENIED:
 		case -NFS4ERR_RECLAIM_BAD:
 		case -NFS4ERR_RECLAIM_CONFLICT:
-			/* kill_proc(fl->fl_pid, SIGLOST, 1); */
+			lsp = fl->fl_u.nfs4_fl.owner;
+			if (lsp)
+				set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
 			status = 0;
 		}
 		spin_lock(&flctx->flc_lock);
@@ -1637,13 +1640,14 @@
 	nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
 }
 
-static void nfs4_reclaim_complete(struct nfs_client *clp,
+static int nfs4_reclaim_complete(struct nfs_client *clp,
 				 const struct nfs4_state_recovery_ops *ops,
 				 struct rpc_cred *cred)
 {
 	/* Notify the server we're done reclaiming our state */
 	if (ops->reclaim_complete)
-		(void)ops->reclaim_complete(clp, cred);
+		return ops->reclaim_complete(clp, cred);
+	return 0;
 }
 
 static void nfs4_clear_reclaim_server(struct nfs_server *server)
@@ -1690,13 +1694,16 @@
 {
 	const struct nfs4_state_recovery_ops *ops;
 	struct rpc_cred *cred;
+	int err;
 
 	if (!nfs4_state_clear_reclaim_reboot(clp))
 		return;
 	ops = clp->cl_mvops->reboot_recovery_ops;
 	cred = nfs4_get_clid_cred(clp);
-	nfs4_reclaim_complete(clp, ops, cred);
+	err = nfs4_reclaim_complete(clp, ops, cred);
 	put_rpccred(cred);
+	if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
+		set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
 }
 
 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index 8693d77..76241aa 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -31,7 +31,7 @@
 		.data = &nfs_idmap_cache_timeout,
 		.maxlen = sizeof(int),
 		.mode = 0644,
-		.proc_handler = proc_dointvec_jiffies,
+		.proc_handler = proc_dointvec,
 	},
 	{ }
 };
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 2b71c60..1631318 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -46,8 +46,7 @@
 	int err = nilfs_add_link(dentry, inode);
 
 	if (!err) {
-		d_instantiate(dentry, inode);
-		unlock_new_inode(inode);
+		d_instantiate_new(dentry, inode);
 		return 0;
 	}
 	inode_dec_link_count(inode);
@@ -243,8 +242,7 @@
 		goto out_fail;
 
 	nilfs_mark_inode_dirty(inode);
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 out:
 	if (!err)
 		err = nilfs_transaction_commit(dir->i_sb);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index e0e5f7c..8a459b1 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -92,7 +92,7 @@
 				       u32 event_mask,
 				       void *data, int data_type)
 {
-	__u32 marks_mask, marks_ignored_mask;
+	__u32 marks_mask = 0, marks_ignored_mask = 0;
 	struct path *path = data;
 
 	pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
@@ -108,24 +108,20 @@
 	    !d_can_lookup(path->dentry))
 		return false;
 
-	if (inode_mark && vfsmnt_mark) {
-		marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
-		marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
-	} else if (inode_mark) {
-		/*
-		 * if the event is for a child and this inode doesn't care about
-		 * events on the child, don't send it!
-		 */
-		if ((event_mask & FS_EVENT_ON_CHILD) &&
-		    !(inode_mark->mask & FS_EVENT_ON_CHILD))
-			return false;
-		marks_mask = inode_mark->mask;
-		marks_ignored_mask = inode_mark->ignored_mask;
-	} else if (vfsmnt_mark) {
-		marks_mask = vfsmnt_mark->mask;
-		marks_ignored_mask = vfsmnt_mark->ignored_mask;
-	} else {
-		BUG();
+	/*
+	 * if the event is for a child and this inode doesn't care about
+	 * events on the child, don't send it!
+	 */
+	if (inode_mark &&
+	    (!(event_mask & FS_EVENT_ON_CHILD) ||
+	     (inode_mark->mask & FS_EVENT_ON_CHILD))) {
+		marks_mask |= inode_mark->mask;
+		marks_ignored_mask |= inode_mark->ignored_mask;
+	}
+
+	if (vfsmnt_mark) {
+		marks_mask |= vfsmnt_mark->mask;
+		marks_ignored_mask |= vfsmnt_mark->ignored_mask;
 	}
 
 	if (d_is_dir(path->dentry) &&
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index bed1fcb..ee8dbba 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -314,7 +314,9 @@
 		return ERR_PTR(ret);
 	}
 
+	down_read(&OCFS2_I(inode)->ip_xattr_sem);
 	acl = ocfs2_get_acl_nolock(inode, type, di_bh);
+	up_read(&OCFS2_I(inode)->ip_xattr_sem);
 
 	ocfs2_inode_unlock(inode, 0);
 	brelse(di_bh);
@@ -333,7 +335,9 @@
 	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
 		return 0;
 
+	down_read(&OCFS2_I(inode)->ip_xattr_sem);
 	acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
+	up_read(&OCFS2_I(inode)->ip_xattr_sem);
 	if (IS_ERR(acl) || !acl)
 		return PTR_ERR(acl);
 	ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
@@ -364,8 +368,10 @@
 
 	if (!S_ISLNK(inode->i_mode)) {
 		if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+			down_read(&OCFS2_I(dir)->ip_xattr_sem);
 			acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
 						   dir_bh);
+			up_read(&OCFS2_I(dir)->ip_xattr_sem);
 			if (IS_ERR(acl))
 				return PTR_ERR(acl);
 		}
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 733e4e7..73be0c6 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -675,20 +675,6 @@
 	spin_unlock(&dlm->spinlock);
 }
 
-int dlm_shutting_down(struct dlm_ctxt *dlm)
-{
-	int ret = 0;
-
-	spin_lock(&dlm_domain_lock);
-
-	if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
-		ret = 1;
-
-	spin_unlock(&dlm_domain_lock);
-
-	return ret;
-}
-
 void dlm_unregister_domain(struct dlm_ctxt *dlm)
 {
 	int leave = 0;
diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
index fd6122a..8a92814 100644
--- a/fs/ocfs2/dlm/dlmdomain.h
+++ b/fs/ocfs2/dlm/dlmdomain.h
@@ -28,7 +28,30 @@
 extern spinlock_t dlm_domain_lock;
 extern struct list_head dlm_domains;
 
-int dlm_shutting_down(struct dlm_ctxt *dlm);
+static inline int dlm_joined(struct dlm_ctxt *dlm)
+{
+	int ret = 0;
+
+	spin_lock(&dlm_domain_lock);
+	if (dlm->dlm_state == DLM_CTXT_JOINED)
+		ret = 1;
+	spin_unlock(&dlm_domain_lock);
+
+	return ret;
+}
+
+static inline int dlm_shutting_down(struct dlm_ctxt *dlm)
+{
+	int ret = 0;
+
+	spin_lock(&dlm_domain_lock);
+	if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
+		ret = 1;
+	spin_unlock(&dlm_domain_lock);
+
+	return ret;
+}
+
 void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
 					int node_num);
 
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index eef3248..844dc8d 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1378,6 +1378,15 @@
 	if (!dlm_grab(dlm))
 		return -EINVAL;
 
+	if (!dlm_joined(dlm)) {
+		mlog(ML_ERROR, "Domain %s not joined! "
+			  "lockres %.*s, master %u\n",
+			  dlm->name, mres->lockname_len,
+			  mres->lockname, mres->master);
+		dlm_put(dlm);
+		return -EINVAL;
+	}
+
 	BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
 
 	real_master = mres->master;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index a244f14..fa947d3 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -666,23 +666,24 @@
 	/* we can safely remove this assertion after testing. */
 	if (!buffer_uptodate(bh)) {
 		mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
-		mlog(ML_ERROR, "b_blocknr=%llu\n",
-		     (unsigned long long)bh->b_blocknr);
+		mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n",
+		     (unsigned long long)bh->b_blocknr, bh->b_state);
 
 		lock_buffer(bh);
 		/*
-		 * A previous attempt to write this buffer head failed.
-		 * Nothing we can do but to retry the write and hope for
-		 * the best.
+		 * A previous transaction with a couple of buffer heads fail
+		 * to checkpoint, so all the bhs are marked as BH_Write_EIO.
+		 * For current transaction, the bh is just among those error
+		 * bhs which previous transaction handle. We can't just clear
+		 * its BH_Write_EIO and reuse directly, since other bhs are
+		 * not written to disk yet and that will cause metadata
+		 * inconsistency. So we should set fs read-only to avoid
+		 * further damage.
 		 */
 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
-			clear_buffer_write_io_error(bh);
-			set_buffer_uptodate(bh);
-		}
-
-		if (!buffer_uptodate(bh)) {
 			unlock_buffer(bh);
-			return -EIO;
+			return ocfs2_error(osb->sb, "A previous attempt to "
+					"write this buffer head failed\n");
 		}
 		unlock_buffer(bh);
 	}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index f56fe39..64dfbe5 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -473,9 +473,8 @@
 		new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
 		if (!new) {
 			ocfs2_release_system_inodes(osb);
-			status = -EINVAL;
+			status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
 			mlog_errno(status);
-			/* FIXME: Should ERROR_RO_FS */
 			mlog(ML_ERROR, "Unable to load system inode %d, "
 			     "possibly corrupt fs?", i);
 			goto bail;
@@ -504,7 +503,7 @@
 		new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
 		if (!new) {
 			ocfs2_release_system_inodes(osb);
-			status = -EINVAL;
+			status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
 			mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n",
 			     status, i, osb->slot_num);
 			goto bail;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index cb157a3..03f6ff2 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -638,9 +638,11 @@
 						     si->value_len);
 
 	if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+		down_read(&OCFS2_I(dir)->ip_xattr_sem);
 		acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
 					OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
 					"", NULL, 0);
+		up_read(&OCFS2_I(dir)->ip_xattr_sem);
 		if (acl_len > 0) {
 			a_size = ocfs2_xattr_entry_real_size(0, acl_len);
 			if (S_ISDIR(mode))
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 7c31593..561497a 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -70,8 +70,7 @@
 		     get_khandle_from_ino(inode),
 		     dentry);
 
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 	orangefs_set_timeout(dentry);
 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
@@ -318,8 +317,7 @@
 		     "Assigned symlink inode new number of %pU\n",
 		     get_khandle_from_ino(inode));
 
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 	orangefs_set_timeout(dentry);
 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
@@ -382,8 +380,7 @@
 		     "Assigned dir inode new number of %pU\n",
 		     get_khandle_from_ino(inode));
 
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 	orangefs_set_timeout(dentry);
 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 629d8c9..6e35ef6 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -559,6 +559,11 @@
 	/* provided sb cleanup */
 	kill_anon_super(sb);
 
+	if (!ORANGEFS_SB(sb)) {
+		mutex_lock(&orangefs_request_mutex);
+		mutex_unlock(&orangefs_request_mutex);
+		return;
+	}
 	/*
 	 * issue the unmount to userspace to tell it to remove the
 	 * dynamic mount info it has for this superblock
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 306b6c1..8546384 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -180,6 +180,9 @@
 		inc_nlink(inode);
 	}
 	d_instantiate(dentry, inode);
+	/* Force lookup of new upper hardlink to find its lower */
+	if (hardlink)
+		d_drop(dentry);
 }
 
 static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 7fb53d0..16f6db8 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -227,6 +227,16 @@
 	return res;
 }
 
+static bool ovl_can_list(const char *s)
+{
+	/* List all non-trusted xatts */
+	if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+		return true;
+
+	/* Never list trusted.overlay, list other trusted for superuser only */
+	return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+}
+
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
 	struct dentry *realdentry = ovl_dentry_real(dentry);
@@ -250,7 +260,7 @@
 			return -EIO;
 
 		len -= slen;
-		if (ovl_is_private_xattr(s)) {
+		if (!ovl_can_list(s)) {
 			res -= slen;
 			memmove(s, s + slen, len);
 		} else {
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 1ade120..08dce22 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -81,3 +81,10 @@
 
 	  Say Y if you are running any user-space software which takes benefit from
 	  this interface. For example, rkt is such a piece of software.
+
+config PROC_UID
+	bool "Include /proc/uid/ files"
+	default y
+	depends on PROC_FS && RT_MUTEXES
+	help
+	Provides aggregated per-uid information under /proc/uid.
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 12c6922..dea53ba 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -25,6 +25,7 @@
 proc-y	+= namespaces.o
 proc-y	+= self.o
 proc-y	+= thread_self.o
+proc-$(CONFIG_PROC_UID)  += uid.o
 proc-$(CONFIG_PROC_SYSCTL)	+= proc_sysctl.o
 proc-$(CONFIG_NET)		+= proc_net.o
 proc-$(CONFIG_PROC_KCORE)	+= kcore.o
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 794b52a..94f83e7 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -80,6 +80,7 @@
 #include <linux/delayacct.h>
 #include <linux/seq_file.h>
 #include <linux/pid_namespace.h>
+#include <linux/prctl.h>
 #include <linux/ptrace.h>
 #include <linux/tracehook.h>
 #include <linux/string_helpers.h>
@@ -345,8 +346,32 @@
 {
 #ifdef CONFIG_SECCOMP
 	seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode);
-	seq_putc(m, '\n');
 #endif
+	seq_printf(m, "\nSpeculation_Store_Bypass:\t");
+	switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
+	case -EINVAL:
+		seq_printf(m, "unknown");
+		break;
+	case PR_SPEC_NOT_AFFECTED:
+		seq_printf(m, "not vulnerable");
+		break;
+	case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
+		seq_printf(m, "thread force mitigated");
+		break;
+	case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
+		seq_printf(m, "thread mitigated");
+		break;
+	case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
+		seq_printf(m, "thread vulnerable");
+		break;
+	case PR_SPEC_DISABLE:
+		seq_printf(m, "globally mitigated");
+		break;
+	default:
+		seq_printf(m, "vulnerable");
+		break;
+	}
+	seq_putc(m, '\n');
 }
 
 static inline void task_context_switch_counts(struct seq_file *m,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 1370a4e..0f23b3b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -87,6 +87,7 @@
 #include <linux/slab.h>
 #include <linux/flex_array.h>
 #include <linux/posix-timers.h>
+#include <linux/cpufreq_times.h>
 #ifdef CONFIG_HARDWALL
 #include <asm/hardwall.h>
 #endif
@@ -94,6 +95,8 @@
 #include "internal.h"
 #include "fd.h"
 
+#include "../../lib/kstrtox.h"
+
 /* NOTE:
  *	Implementing inode permission operations in /proc is almost
  *	certainly an error.  Permission checks need to happen during
@@ -252,7 +255,7 @@
 	 * Inherently racy -- command line shares address space
 	 * with code and data.
 	 */
-	rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
+	rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
 	if (rv <= 0)
 		goto out_free_page;
 
@@ -270,7 +273,7 @@
 			int nr_read;
 
 			_count = min3(count, len, PAGE_SIZE);
-			nr_read = access_remote_vm(mm, p, page, _count, 0);
+			nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
 			if (nr_read < 0)
 				rv = nr_read;
 			if (nr_read <= 0)
@@ -305,7 +308,7 @@
 			bool final;
 
 			_count = min3(count, len, PAGE_SIZE);
-			nr_read = access_remote_vm(mm, p, page, _count, 0);
+			nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
 			if (nr_read < 0)
 				rv = nr_read;
 			if (nr_read <= 0)
@@ -354,7 +357,7 @@
 			bool final;
 
 			_count = min3(count, len, PAGE_SIZE);
-			nr_read = access_remote_vm(mm, p, page, _count, 0);
+			nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
 			if (nr_read < 0)
 				rv = nr_read;
 			if (nr_read <= 0)
@@ -970,7 +973,7 @@
 		max_len = min_t(size_t, PAGE_SIZE, count);
 		this_len = min(max_len, this_len);
 
-		retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
+		retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
 
 		if (retval <= 0) {
 			ret = retval;
@@ -2062,8 +2065,33 @@
 static int dname_to_vma_addr(struct dentry *dentry,
 			     unsigned long *start, unsigned long *end)
 {
-	if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
+	const char *str = dentry->d_name.name;
+	unsigned long long sval, eval;
+	unsigned int len;
+
+	len = _parse_integer(str, 16, &sval);
+	if (len & KSTRTOX_OVERFLOW)
 		return -EINVAL;
+	if (sval != (unsigned long)sval)
+		return -EINVAL;
+	str += len;
+
+	if (*str != '-')
+		return -EINVAL;
+	str++;
+
+	len = _parse_integer(str, 16, &eval);
+	if (len & KSTRTOX_OVERFLOW)
+		return -EINVAL;
+	if (eval != (unsigned long)eval)
+		return -EINVAL;
+	str += len;
+
+	if (*str != '\0')
+		return -EINVAL;
+
+	*start = sval;
+	*end = eval;
 
 	return 0;
 }
@@ -3166,8 +3194,8 @@
 	ONE("cgroup",  S_IRUGO, proc_cgroup_show),
 #endif
 	ONE("oom_score",  S_IRUGO, proc_oom_score),
-	REG("oom_adj",    S_IRUSR, proc_oom_adj_operations),
-	REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
+	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
+	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
 	REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
 	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
@@ -3198,6 +3226,9 @@
 	REG("timers",	  S_IRUGO, proc_timers_operations),
 #endif
 	REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations),
+#ifdef CONFIG_CPU_FREQ_TIMES
+	ONE("time_in_state", 0444, proc_time_in_state_show),
+#endif
 };
 
 static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
@@ -3561,8 +3592,8 @@
 	ONE("cgroup",  S_IRUGO, proc_cgroup_show),
 #endif
 	ONE("oom_score", S_IRUGO, proc_oom_score),
-	REG("oom_adj",   S_IRUSR, proc_oom_adj_operations),
-	REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
+	REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adj_operations),
+	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
 	REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
 	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
@@ -3586,6 +3617,9 @@
 	REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
 	REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
 #endif
+#ifdef CONFIG_CPU_FREQ_TIMES
+	ONE("time_in_state", 0444, proc_time_in_state_show),
+#endif
 };
 
 static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 6dfb414..d8105cd 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -257,6 +257,15 @@
 #endif
 
 /*
+ * uid.c
+ */
+#ifdef CONFIG_PROC_UID
+extern int proc_uid_init(void);
+#else
+static inline void proc_uid_init(void) { }
+#endif
+
+/*
  * proc_tty.c
  */
 #ifdef CONFIG_TTY
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index df7e079..7ed961c 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -505,6 +505,10 @@
 			/* we have to zero-fill user buffer even if no read */
 			if (copy_to_user(buffer, buf, tsz))
 				return -EFAULT;
+		} else if (m->type == KCORE_USER) {
+			/* User page is handled prior to normal kernel page: */
+			if (copy_to_user(buffer, (char *)start, tsz))
+				return -EFAULT;
 		} else {
 			if (kern_addr_valid(start)) {
 				/*
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index d4e37ac..847f234 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -660,7 +660,10 @@
 				    struct ctl_table *table)
 {
 	bool ret = true;
+
 	head = sysctl_head_grab(head);
+	if (IS_ERR(head))
+		return false;
 
 	if (S_ISLNK(table->mode)) {
 		/* It is not an error if we can not follow the link ignore it */
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 8d3e484..c2f5014 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -131,7 +131,7 @@
 	proc_symlink("mounts", NULL, "self/mounts");
 
 	proc_net_init();
-
+	proc_uid_init();
 #ifdef CONFIG_SYSVIPC
 	proc_mkdir("sysvipc", NULL);
 #endif
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c585e7e..4fc0895 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1123,8 +1123,11 @@
 					goto out_mm;
 				}
 				for (vma = mm->mmap; vma; vma = vma->vm_next) {
-					vma->vm_flags &= ~VM_SOFTDIRTY;
+					vm_write_begin(vma);
+					WRITE_ONCE(vma->vm_flags,
+						vma->vm_flags & ~VM_SOFTDIRTY);
 					vma_set_page_prot(vma);
+					vm_write_end(vma);
 				}
 				downgrade_write(&mm->mmap_sem);
 				break;
diff --git a/fs/proc/uid.c b/fs/proc/uid.c
new file mode 100644
index 0000000..b2bb085
--- /dev/null
+++ b/fs/proc/uid.c
@@ -0,0 +1,295 @@
+/*
+ * /proc/uid support
+ */
+
+#include <linux/cpufreq_times.h>
+#include <linux/fs.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+static struct proc_dir_entry *proc_uid;
+
+#define UID_HASH_BITS 10
+
+static DECLARE_HASHTABLE(proc_uid_hash_table, UID_HASH_BITS);
+
+/*
+ * use rt_mutex here to avoid priority inversion between high-priority readers
+ * of these files and tasks calling proc_register_uid().
+ */
+static DEFINE_RT_MUTEX(proc_uid_lock); /* proc_uid_hash_table */
+
+struct uid_hash_entry {
+	uid_t uid;
+	struct hlist_node hash;
+};
+
+/* Caller must hold proc_uid_lock */
+static bool uid_hash_entry_exists_locked(uid_t uid)
+{
+	struct uid_hash_entry *entry;
+
+	hash_for_each_possible(proc_uid_hash_table, entry, hash, uid) {
+		if (entry->uid == uid)
+			return true;
+	}
+	return false;
+}
+
+void proc_register_uid(kuid_t kuid)
+{
+	struct uid_hash_entry *entry;
+	bool exists;
+	uid_t uid = from_kuid_munged(current_user_ns(), kuid);
+
+	rt_mutex_lock(&proc_uid_lock);
+	exists = uid_hash_entry_exists_locked(uid);
+	rt_mutex_unlock(&proc_uid_lock);
+	if (exists)
+		return;
+
+	entry = kzalloc(sizeof(struct uid_hash_entry), GFP_KERNEL);
+	if (!entry)
+		return;
+	entry->uid = uid;
+
+	rt_mutex_lock(&proc_uid_lock);
+	if (uid_hash_entry_exists_locked(uid))
+		kfree(entry);
+	else
+		hash_add(proc_uid_hash_table, &entry->hash, uid);
+	rt_mutex_unlock(&proc_uid_lock);
+}
+
+struct uid_entry {
+	const char *name;
+	int len;
+	umode_t mode;
+	const struct inode_operations *iop;
+	const struct file_operations *fop;
+};
+
+#define NOD(NAME, MODE, IOP, FOP) {			\
+	.name	= (NAME),				\
+	.len	= sizeof(NAME) - 1,			\
+	.mode	= MODE,					\
+	.iop	= IOP,					\
+	.fop	= FOP,					\
+}
+
+#ifdef CONFIG_CPU_FREQ_TIMES
+static const struct file_operations proc_uid_time_in_state_operations = {
+	.open		= single_uid_time_in_state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+#endif
+
+static const struct uid_entry uid_base_stuff[] = {
+#ifdef CONFIG_CPU_FREQ_TIMES
+	NOD("time_in_state", 0444, NULL, &proc_uid_time_in_state_operations),
+#endif
+};
+
+static const struct inode_operations proc_uid_def_inode_operations = {
+	.setattr	= proc_setattr,
+};
+
+static struct inode *proc_uid_make_inode(struct super_block *sb, kuid_t kuid)
+{
+	struct inode *inode;
+
+	inode = new_inode(sb);
+	if (!inode)
+		return NULL;
+
+	inode->i_ino = get_next_ino();
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_op = &proc_uid_def_inode_operations;
+	inode->i_uid = kuid;
+
+	return inode;
+}
+
+static int proc_uident_instantiate(struct inode *dir, struct dentry *dentry,
+				   struct task_struct *unused, const void *ptr)
+{
+	const struct uid_entry *u = ptr;
+	struct inode *inode;
+
+	inode = proc_uid_make_inode(dir->i_sb, dir->i_uid);
+	if (!inode)
+		return -ENOENT;
+
+	inode->i_mode = u->mode;
+	if (S_ISDIR(inode->i_mode))
+		set_nlink(inode, 2);
+	if (u->iop)
+		inode->i_op = u->iop;
+	if (u->fop)
+		inode->i_fop = u->fop;
+	d_add(dentry, inode);
+	return 0;
+}
+
+static struct dentry *proc_uid_base_lookup(struct inode *dir,
+					   struct dentry *dentry,
+					   unsigned int flags)
+{
+	const struct uid_entry *u, *last;
+	unsigned int nents = ARRAY_SIZE(uid_base_stuff);
+
+	if (nents == 0)
+		return ERR_PTR(-ENOENT);
+
+	last = &uid_base_stuff[nents - 1];
+	for (u = uid_base_stuff; u <= last; u++) {
+		if (u->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, u->name, u->len))
+			break;
+	}
+	if (u > last)
+		return ERR_PTR(-ENOENT);
+
+	return ERR_PTR(proc_uident_instantiate(dir, dentry, NULL, u));
+}
+
+static int proc_uid_base_readdir(struct file *file, struct dir_context *ctx)
+{
+	unsigned int nents = ARRAY_SIZE(uid_base_stuff);
+	const struct uid_entry *u;
+
+	if (!dir_emit_dots(file, ctx))
+		return 0;
+
+	if (ctx->pos >= nents + 2)
+		return 0;
+
+	for (u = uid_base_stuff + (ctx->pos - 2);
+	     u < uid_base_stuff + nents; u++) {
+		if (!proc_fill_cache(file, ctx, u->name, u->len,
+				     proc_uident_instantiate, NULL, u))
+			break;
+		ctx->pos++;
+	}
+
+	return 0;
+}
+
+static const struct inode_operations proc_uid_base_inode_operations = {
+	.lookup		= proc_uid_base_lookup,
+	.setattr	= proc_setattr,
+};
+
+static const struct file_operations proc_uid_base_operations = {
+	.read		= generic_read_dir,
+	.iterate	= proc_uid_base_readdir,
+	.llseek		= default_llseek,
+};
+
+static int proc_uid_instantiate(struct inode *dir, struct dentry *dentry,
+				struct task_struct *unused, const void *ptr)
+{
+	unsigned int i, len;
+	nlink_t nlinks;
+	kuid_t *kuid = (kuid_t *)ptr;
+	struct inode *inode = proc_uid_make_inode(dir->i_sb, *kuid);
+
+	if (!inode)
+		return -ENOENT;
+
+	inode->i_mode = S_IFDIR | 0555;
+	inode->i_op = &proc_uid_base_inode_operations;
+	inode->i_fop = &proc_uid_base_operations;
+	inode->i_flags |= S_IMMUTABLE;
+
+	nlinks = 2;
+	len = ARRAY_SIZE(uid_base_stuff);
+	for (i = 0; i < len; ++i) {
+		if (S_ISDIR(uid_base_stuff[i].mode))
+			++nlinks;
+	}
+	set_nlink(inode, nlinks);
+
+	d_add(dentry, inode);
+
+	return 0;
+}
+
+static int proc_uid_readdir(struct file *file, struct dir_context *ctx)
+{
+	int last_shown, i;
+	unsigned long bkt;
+	struct uid_hash_entry *entry;
+
+	if (!dir_emit_dots(file, ctx))
+		return 0;
+
+	i = 0;
+	last_shown = ctx->pos - 2;
+	rt_mutex_lock(&proc_uid_lock);
+	hash_for_each(proc_uid_hash_table, bkt, entry, hash) {
+		int len;
+		char buf[PROC_NUMBUF];
+
+		if (i < last_shown)
+			continue;
+		len = snprintf(buf, sizeof(buf), "%u", entry->uid);
+		if (!proc_fill_cache(file, ctx, buf, len,
+				     proc_uid_instantiate, NULL, &entry->uid))
+			break;
+		i++;
+		ctx->pos++;
+	}
+	rt_mutex_unlock(&proc_uid_lock);
+	return 0;
+}
+
+static struct dentry *proc_uid_lookup(struct inode *dir, struct dentry *dentry,
+				      unsigned int flags)
+{
+	int result = -ENOENT;
+
+	uid_t uid = name_to_int(&dentry->d_name);
+	bool uid_exists;
+
+	rt_mutex_lock(&proc_uid_lock);
+	uid_exists = uid_hash_entry_exists_locked(uid);
+	rt_mutex_unlock(&proc_uid_lock);
+	if (uid_exists) {
+		kuid_t kuid = make_kuid(current_user_ns(), uid);
+
+		result = proc_uid_instantiate(dir, dentry, NULL, &kuid);
+	}
+	return ERR_PTR(result);
+}
+
+static const struct file_operations proc_uid_operations = {
+	.read		= generic_read_dir,
+	.iterate	= proc_uid_readdir,
+	.llseek		= default_llseek,
+};
+
+static const struct inode_operations proc_uid_inode_operations = {
+	.lookup		= proc_uid_lookup,
+	.setattr	= proc_setattr,
+};
+
+int __init proc_uid_init(void)
+{
+	proc_uid = proc_mkdir("uid", NULL);
+	if (!proc_uid)
+		return -ENOMEM;
+	proc_uid->proc_iops = &proc_uid_inode_operations;
+	proc_uid->proc_fops = &proc_uid_operations;
+
+	return 0;
+}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 7610818..2a5c481 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2640,7 +2640,7 @@
 	if (IS_ERR(journal->j_dev_bd)) {
 		result = PTR_ERR(journal->j_dev_bd);
 		journal->j_dev_bd = NULL;
-		reiserfs_warning(super,
+		reiserfs_warning(super, "sh-457",
 				 "journal_init_dev: Cannot open '%s': %i",
 				 jdev_name, result);
 		return result;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index e6a2b40..1ec728c 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -687,8 +687,7 @@
 	reiserfs_update_inode_transaction(inode);
 	reiserfs_update_inode_transaction(dir);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	retval = journal_end(&th);
 
 out_failed:
@@ -771,8 +770,7 @@
 		goto out_failed;
 	}
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	retval = journal_end(&th);
 
 out_failed:
@@ -871,8 +869,7 @@
 	/* the above add_entry did not update dir's stat data */
 	reiserfs_update_sd(&th, dir);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	retval = journal_end(&th);
 out_failed:
 	reiserfs_write_unlock(dir->i_sb);
@@ -1187,8 +1184,7 @@
 		goto out_failed;
 	}
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	retval = journal_end(&th);
 out_failed:
 	reiserfs_write_unlock(parent_dir->i_sb);
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index e9426a6..776d549 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -51,7 +51,6 @@
 	 * whether the base obbpath has been changed or not
 	 */
 	if (is_obbpath_invalid(dentry)) {
-		d_drop(dentry);
 		return 0;
 	}
 
@@ -65,7 +64,6 @@
 	if ((lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) {
 		err = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
 		if (err == 0) {
-			d_drop(dentry);
 			goto out;
 		}
 	}
@@ -73,14 +71,12 @@
 	spin_lock(&lower_dentry->d_lock);
 	if (d_unhashed(lower_dentry)) {
 		spin_unlock(&lower_dentry->d_lock);
-		d_drop(dentry);
 		err = 0;
 		goto out;
 	}
 	spin_unlock(&lower_dentry->d_lock);
 
 	if (parent_lower_dentry != lower_cur_parent_dentry) {
-		d_drop(dentry);
 		err = 0;
 		goto out;
 	}
@@ -94,7 +90,6 @@
 	}
 
 	if (!qstr_case_eq(&dentry->d_name, &lower_dentry->d_name)) {
-		__d_drop(dentry);
 		err = 0;
 	}
 
@@ -113,7 +108,6 @@
 	if (inode) {
 		data = top_data_get(SDCARDFS_I(inode));
 		if (!data || data->abandoned) {
-			d_drop(dentry);
 			err = 0;
 		}
 		if (data)
@@ -131,6 +125,8 @@
 
 static void sdcardfs_d_release(struct dentry *dentry)
 {
+	if (!dentry || !dentry->d_fsdata)
+		return;
 	/* release and reset the lower paths */
 	if (has_graft_path(dentry))
 		sdcardfs_put_reset_orig_path(dentry);
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 843fcd2..98051996 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -41,8 +41,6 @@
 
 void free_dentry_private_data(struct dentry *dentry)
 {
-	if (!dentry || !dentry->d_fsdata)
-		return;
 	kmem_cache_free(sdcardfs_dentry_cachep, dentry->d_fsdata);
 	dentry->d_fsdata = NULL;
 }
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index e4fd3fb..30e0c43 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -316,7 +316,7 @@
 	sb->s_root = d_make_root(inode);
 	if (!sb->s_root) {
 		err = -ENOMEM;
-		goto out_iput;
+		goto out_sput;
 	}
 	d_set_d_op(sb->s_root, &sdcardfs_ci_dops);
 
@@ -361,8 +361,7 @@
 	/* no longer needed: free_dentry_private_data(sb->s_root); */
 out_freeroot:
 	dput(sb->s_root);
-out_iput:
-	iput(inode);
+	sb->s_root = NULL;
 out_sput:
 	/* drop refs we took earlier */
 	atomic_dec(&lower_sb->s_active);
@@ -422,7 +421,7 @@
 {
 	struct sdcardfs_sb_info *sbi;
 
-	if (sb->s_magic == SDCARDFS_SUPER_MAGIC) {
+	if (sb->s_magic == SDCARDFS_SUPER_MAGIC && sb->s_fs_info) {
 		sbi = SDCARDFS_SB(sb);
 		mutex_lock(&sdcardfs_super_list_lock);
 		list_del(&sbi->list);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 4ec05108..03dda1c 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1728,8 +1728,11 @@
 
 	dbg_save_space_info(c);
 
-	for (i = 0; i < c->jhead_cnt; i++)
-		ubifs_wbuf_sync(&c->jheads[i].wbuf);
+	for (i = 0; i < c->jhead_cnt; i++) {
+		err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
+		if (err)
+			ubifs_ro_mode(c, err);
+	}
 
 	c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
 	c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
@@ -1795,8 +1798,11 @@
 			int err;
 
 			/* Synchronize write-buffers */
-			for (i = 0; i < c->jhead_cnt; i++)
-				ubifs_wbuf_sync(&c->jheads[i].wbuf);
+			for (i = 0; i < c->jhead_cnt; i++) {
+				err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
+				if (err)
+					ubifs_ro_mode(c, err);
+			}
 
 			/*
 			 * We are being cleanly unmounted which means the
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 2d65e28..348b922 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -621,8 +621,7 @@
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 
 	return 0;
 }
@@ -732,8 +731,7 @@
 	inc_nlink(dir);
 	dir->i_ctime = dir->i_mtime = current_time(dir);
 	mark_inode_dirty(dir);
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 4b1f6d5..12467ad 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -2094,8 +2094,9 @@
 	bool lvid_open = false;
 
 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
-	uopt.uid = INVALID_UID;
-	uopt.gid = INVALID_GID;
+	/* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
+	uopt.uid = make_kuid(current_user_ns(), overflowuid);
+	uopt.gid = make_kgid(current_user_ns(), overflowgid);
 	uopt.umask = 0;
 	uopt.fmode = UDF_INVALID_MODE;
 	uopt.dmode = UDF_INVALID_MODE;
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 695389a..3a3be23 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -28,6 +28,9 @@
 
 #include "udf_sb.h"
 
+#define SURROGATE_MASK 0xfffff800
+#define SURROGATE_PAIR 0x0000d800
+
 static int udf_uni2char_utf8(wchar_t uni,
 			     unsigned char *out,
 			     int boundlen)
@@ -37,6 +40,9 @@
 	if (boundlen <= 0)
 		return -ENAMETOOLONG;
 
+	if ((uni & SURROGATE_MASK) == SURROGATE_PAIR)
+		return -EINVAL;
+
 	if (uni < 0x80) {
 		out[u_len++] = (unsigned char)uni;
 	} else if (uni < 0x800) {
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 8eca4ed..2109c07 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -38,8 +38,7 @@
 {
 	int err = ufs_add_link(dentry, inode);
 	if (!err) {
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
+		d_instantiate_new(dentry, inode);
 		return 0;
 	}
 	inode_dec_link_count(inode);
@@ -192,8 +191,7 @@
 	if (err)
 		goto out_fail;
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	return 0;
 
 out_fail:
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 9d9c032..00b661d 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -499,8 +499,10 @@
 			vma = prev;
 		else
 			prev = vma;
-		vma->vm_flags = new_flags;
+		vm_write_begin(vma);
+		WRITE_ONCE(vma->vm_flags, new_flags);
 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+		vm_write_end(vma);
 	}
 	up_write(&mm->mmap_sem);
 	mmput(mm);
@@ -895,8 +897,10 @@
 		 * the next vma was merged into the current one and
 		 * the current one has not been updated yet.
 		 */
-		vma->vm_flags = new_flags;
+		vm_write_begin(vma);
+		WRITE_ONCE(vma->vm_flags, new_flags);
 		vma->vm_userfaultfd_ctx.ctx = ctx;
+		vm_write_end(vma);
 
 	skip:
 		prev = vma;
@@ -1033,8 +1037,10 @@
 		 * the next vma was merged into the current one and
 		 * the current one has not been updated yet.
 		 */
-		vma->vm_flags = new_flags;
+		vm_write_begin(vma);
+		WRITE_ONCE(vma->vm_flags, new_flags);
 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+		vm_write_end(vma);
 
 	skip:
 		prev = vma;
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 4ff499a..b2ab123 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -50,6 +50,13 @@
 
 	pag = xfs_perag_get(mp, agno);
 
+	/*
+	 * Force out the log.  This means any transactions that might have freed
+	 * space before we take the AGF buffer lock are now on disk, and the
+	 * volatile disk cache is flushed.
+	 */
+	xfs_log_force(mp, XFS_LOG_SYNC);
+
 	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
 	if (error || !agbp)
 		goto out_put_perag;
@@ -57,13 +64,6 @@
 	cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
 
 	/*
-	 * Force out the log.  This means any transactions that might have freed
-	 * space before we took the AGF buffer lock are now on disk, and the
-	 * volatile disk cache is flushed.
-	 */
-	xfs_log_force(mp, XFS_LOG_SYNC);
-
-	/*
 	 * Look up the longest btree in the AGF and start with it.
 	 */
 	error = xfs_alloc_lookup_ge(cur, 0,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 362c6b4..1410835 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -846,22 +846,26 @@
 		if (error)
 			goto out_unlock;
 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
-		unsigned int blksize_mask = i_blocksize(inode) - 1;
+		unsigned int	blksize_mask = i_blocksize(inode) - 1;
+		loff_t		isize = i_size_read(inode);
 
-		new_size = i_size_read(inode) + len;
 		if (offset & blksize_mask || len & blksize_mask) {
 			error = -EINVAL;
 			goto out_unlock;
 		}
 
-		/* check the new inode size does not wrap through zero */
-		if (new_size > inode->i_sb->s_maxbytes) {
+		/*
+		 * New inode size must not exceed ->s_maxbytes, accounting for
+		 * possible signed overflow.
+		 */
+		if (inode->i_sb->s_maxbytes - isize < len) {
 			error = -EFBIG;
 			goto out_unlock;
 		}
+		new_size = isize + len;
 
 		/* Offset should be less than i_size */
-		if (offset >= i_size_read(inode)) {
+		if (offset >= isize) {
 			error = -EINVAL;
 			goto out_unlock;
 		}
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 8f66aaa..9e3f761 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -48,7 +48,17 @@
  * Use compiler specific <stdarg.h> is a good practice for even when
  * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
  */
+#ifndef va_arg
+#ifdef ACPI_USE_BUILTIN_STDARG
+typedef __builtin_va_list va_list;
+#define va_start(v, l)          __builtin_va_start(v, l)
+#define va_end(v)               __builtin_va_end(v)
+#define va_arg(v, l)            __builtin_va_arg(v, l)
+#define va_copy(d, s)           __builtin_va_copy(d, s)
+#else
 #include <stdarg.h>
+#endif
+#endif
 
 #define ACPI_INLINE             __inline__
 
diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h
index 17bd3b7..bdb6858 100644
--- a/include/acpi/platform/acintel.h
+++ b/include/acpi/platform/acintel.h
@@ -48,7 +48,9 @@
  * Use compiler specific <stdarg.h> is a good practice for even when
  * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
  */
+#ifndef va_arg
 #include <stdarg.h>
+#endif
 
 /* Configuration specific to Intel 64-bit C compiler */
 
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 288cc9e..f2d97b7 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -243,4 +243,7 @@
 #define atomic_long_inc_not_zero(l) \
 	ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
 
+#define atomic_long_cond_read_acquire(v, c) \
+	ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
+
 #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index bf2d34c..f0d8b1c 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -13,7 +13,7 @@
  */
 
 /**
- * futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
  *			  argument and comparison of the previous
  *			  futex value with another constant.
  *
@@ -25,18 +25,11 @@
  * <0 - On error
  */
 static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval, ret;
 	u32 tmp;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
 	preempt_disable();
 	pagefault_disable();
 
@@ -74,17 +67,9 @@
 	pagefault_enable();
 	preempt_enable();
 
-	if (ret == 0) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (ret == 0)
+		*oval = oldval;
+
 	return ret;
 }
 
@@ -126,18 +111,9 @@
 
 #else
 static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -153,17 +129,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f6ea0f3..4e8551c 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -234,6 +234,21 @@
 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 #endif
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * This is an implementation of pmdp_establish() that is only suitable for an
+ * architecture that doesn't have hardware dirty/accessed bits. In this case we
+ * can't race with CPU which sets these bits and non-atomic aproach is fine.
+ */
+static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
+		unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+	pmd_t old_pmd = *pmdp;
+	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+	return old_pmd;
+}
+#endif
+
 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 			    pmd_t *pmdp);
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 7d026bf..c39a93a 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -26,30 +26,17 @@
 
 /*
  * Writer states & reader shift and bias.
- *
- *       | +0 | +1 | +2 | +3 |
- *   ----+----+----+----+----+
- *    LE | 78 | 56 | 34 | 12 | 0x12345678
- *   ----+----+----+----+----+
- *       | wr |      rd      |
- *       +----+----+----+----+
- *
- *   ----+----+----+----+----+
- *    BE | 12 | 34 | 56 | 78 | 0x12345678
- *   ----+----+----+----+----+
- *       |      rd      | wr |
- *       +----+----+----+----+
  */
-#define	_QW_WAITING	1		/* A writer is waiting	   */
-#define	_QW_LOCKED	0xff		/* A writer holds the lock */
-#define	_QW_WMASK	0xff		/* Writer mask		   */
-#define	_QR_SHIFT	8		/* Reader count shift	   */
+#define	_QW_WAITING	0x100		/* A writer is waiting	   */
+#define	_QW_LOCKED	0x0ff		/* A writer holds the lock */
+#define	_QW_WMASK	0x1ff		/* Writer mask		   */
+#define	_QR_SHIFT	9		/* Reader count shift	   */
 #define _QR_BIAS	(1U << _QR_SHIFT)
 
 /*
  * External function declarations
  */
-extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
+extern void queued_read_lock_slowpath(struct qrwlock *lock);
 extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
@@ -118,7 +105,7 @@
 		return;
 
 	/* The slowpath will decrement the reader count, if necessary. */
-	queued_read_lock_slowpath(lock, cnts);
+	queued_read_lock_slowpath(lock);
 }
 
 /**
@@ -147,22 +134,12 @@
 }
 
 /**
- * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
- * @lock : Pointer to queue rwlock structure
- * Return: the write byte address of a queue rwlock
- */
-static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
-{
-	return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
-}
-
-/**
  * queued_write_unlock - release write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
 static inline void queued_write_unlock(struct qrwlock *lock)
 {
-	smp_store_release(__qrwlock_write_byte(lock), 0);
+	smp_store_release(&lock->wlocked, 0);
 }
 
 /*
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index 0abc6b6..8af752a 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -9,12 +9,23 @@
  */
 
 typedef struct qrwlock {
-	atomic_t		cnts;
+	union {
+		atomic_t cnts;
+		struct {
+#ifdef __LITTLE_ENDIAN
+			u8 wlocked;	/* Locked for write? */
+			u8 __lstate[3];
+#else
+			u8 __lstate[3];
+			u8 wlocked;	/* Locked for write? */
+#endif
+		};
+	};
 	arch_spinlock_t		wait_lock;
 } arch_rwlock_t;
 
 #define	__ARCH_RW_LOCK_UNLOCKED {		\
-	.cnts = ATOMIC_INIT(0),			\
+	{ .cnts = ATOMIC_INIT(0), },		\
 	.wait_lock = __ARCH_SPIN_LOCK_UNLOCKED,	\
 }
 
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8236dbd..3c3519b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -172,7 +172,7 @@
 #endif
 
 #ifdef CONFIG_SERIAL_EARLYCON
-#define EARLYCON_TABLE() STRUCT_ALIGN();			\
+#define EARLYCON_TABLE() . = ALIGN(8);				\
 			 VMLINUX_SYMBOL(__earlycon_table) = .;	\
 			 *(__earlycon_table)			\
 			 VMLINUX_SYMBOL(__earlycon_table_end) = .;
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 3c2024d..328f232 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -32,6 +32,7 @@
  * @type: payload data type
  * @flags: flags controlling this message transmission
  * @ctrl: ctrl index to transmit on
+ * @wait_ms: duration in ms to wait after message transmission
  * @tx_len: length of @tx_buf
  * @tx_buf: data to be written
  * @rx_len: length of @rx_buf
@@ -42,6 +43,7 @@
 	u8 type;
 	u16 flags;
 	u32 ctrl;
+	u32 wait_ms;
 
 	size_t tx_len;
 	const void *tx_buf;
diff --git a/include/dt-bindings/arm/arm-smmu.h b/include/dt-bindings/arm/arm-smmu.h
index 1de45a9..3a1dbd3 100644
--- a/include/dt-bindings/arm/arm-smmu.h
+++ b/include/dt-bindings/arm/arm-smmu.h
@@ -23,6 +23,5 @@
 #define ARM_SMMU_OPT_MMU500_ERRATA1	(1 << 7)
 #define ARM_SMMU_OPT_STATIC_CB          (1 << 8)
 #define ARM_SMMU_OPT_HALT               (1 << 9)
-#define ARM_SMMU_OPT_HIBERNATION	(1 << 10)
 
 #endif
diff --git a/include/dt-bindings/clock/msm-clocks-8952.h b/include/dt-bindings/clock/msm-clocks-8952.h
index 3190d4f..e66c5ed 100644
--- a/include/dt-bindings/clock/msm-clocks-8952.h
+++ b/include/dt-bindings/clock/msm-clocks-8952.h
@@ -237,8 +237,10 @@
 
 #define clk_dsi0pll_byte_clk_src		0xbbaa30be
 #define clk_dsi0pll_pixel_clk_src		0x45b3260f
+#define clk_dsi0pll_vco_clk			0x15940d40
 #define clk_dsi1pll_byte_clk_src		0x63930a8f
 #define clk_dsi1pll_pixel_clk_src		0x0e4c9b56
+#define clk_dsi1pll_vco_clk			0x99797b50
 
 #define clk_dsi_pll0_byte_clk_src		0x44539836
 #define clk_dsi_pll0_pixel_clk_src		0x5767c287
diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
index 2062c67..a72db8d 100644
--- a/include/dt-bindings/clock/mt2701-clk.h
+++ b/include/dt-bindings/clock/mt2701-clk.h
@@ -176,7 +176,8 @@
 #define CLK_TOP_AUD_EXT1			156
 #define CLK_TOP_AUD_EXT2			157
 #define CLK_TOP_NFI1X_PAD			158
-#define CLK_TOP_NR				159
+#define CLK_TOP_AXISEL_D4			159
+#define CLK_TOP_NR				160
 
 /* APMIXEDSYS */
 
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index c8696df..6da90d0 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,205 +14,213 @@
 #ifndef _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
 #define _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
 
+/* Dummy clocks for rate measurement */
+#define MEASURE_ONLY_SNOC_CLK					0
+#define MEASURE_ONLY_CNOC_CLK					1
+#define MEASURE_ONLY_BIMC_CLK					2
+#define MEASURE_ONLY_IPA_2X_CLK					3
+#define UFS_PHY_AXI_EMMC_VOTE_CLK				4
+#define UFS_PHY_AXI_UFS_VOTE_CLK				5
+
 /* GCC clock registers */
-#define GCC_AGGRE_NOC_PCIE_TBU_CLK				0
-#define GCC_AGGRE_UFS_CARD_AXI_CLK				1
-#define GCC_AGGRE_UFS_PHY_AXI_CLK				2
-#define GCC_AGGRE_USB3_PRIM_AXI_CLK				3
-#define GCC_AGGRE_USB3_SEC_AXI_CLK				4
-#define GCC_BOOT_ROM_AHB_CLK					5
-#define GCC_CAMERA_AHB_CLK					6
-#define GCC_CAMERA_AXI_CLK					7
-#define GCC_CAMERA_XO_CLK					8
-#define GCC_CE1_AHB_CLK						9
-#define GCC_CE1_AXI_CLK						10
-#define GCC_CE1_CLK						11
-#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK				12
-#define GCC_CFG_NOC_USB3_SEC_AXI_CLK				13
-#define GCC_CPUSS_AHB_CLK					14
-#define GCC_CPUSS_AHB_CLK_SRC					15
-#define GCC_CPUSS_DVM_BUS_CLK					16
-#define GCC_CPUSS_GNOC_CLK					17
-#define GCC_CPUSS_RBCPR_CLK					18
-#define GCC_CPUSS_RBCPR_CLK_SRC					19
-#define GCC_DDRSS_GPU_AXI_CLK					20
-#define GCC_DISP_AHB_CLK					21
-#define GCC_DISP_AXI_CLK					22
-#define GCC_DISP_GPLL0_CLK_SRC					23
-#define GCC_DISP_GPLL0_DIV_CLK_SRC				24
-#define GCC_DISP_XO_CLK						25
-#define GCC_GP1_CLK						26
-#define GCC_GP1_CLK_SRC						27
-#define GCC_GP2_CLK						28
-#define GCC_GP2_CLK_SRC						29
-#define GCC_GP3_CLK						30
-#define GCC_GP3_CLK_SRC						31
-#define GCC_GPU_CFG_AHB_CLK					32
-#define GCC_GPU_GPLL0_CLK_SRC					33
-#define GCC_GPU_GPLL0_DIV_CLK_SRC				34
-#define GCC_GPU_MEMNOC_GFX_CLK					35
-#define GCC_GPU_SNOC_DVM_GFX_CLK				36
-#define GCC_MSS_AXIS2_CLK					37
-#define GCC_MSS_CFG_AHB_CLK					38
-#define GCC_MSS_GPLL0_DIV_CLK_SRC				39
-#define GCC_MSS_MFAB_AXIS_CLK					40
-#define GCC_MSS_Q6_MEMNOC_AXI_CLK				41
-#define GCC_MSS_SNOC_AXI_CLK					42
-#define GCC_PCIE_0_AUX_CLK					43
-#define GCC_PCIE_0_AUX_CLK_SRC					44
-#define GCC_PCIE_0_CFG_AHB_CLK					45
-#define GCC_PCIE_0_CLKREF_CLK					46
-#define GCC_PCIE_0_MSTR_AXI_CLK					47
-#define GCC_PCIE_0_PIPE_CLK					48
-#define GCC_PCIE_0_SLV_AXI_CLK					49
-#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				50
-#define GCC_PCIE_1_AUX_CLK					51
-#define GCC_PCIE_1_AUX_CLK_SRC					52
-#define GCC_PCIE_1_CFG_AHB_CLK					53
-#define GCC_PCIE_1_CLKREF_CLK					54
-#define GCC_PCIE_1_MSTR_AXI_CLK					55
-#define GCC_PCIE_1_PIPE_CLK					56
-#define GCC_PCIE_1_SLV_AXI_CLK					57
-#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				58
-#define GCC_PCIE_PHY_AUX_CLK					59
-#define GCC_PCIE_PHY_REFGEN_CLK					60
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC				61
-#define GCC_PDM2_CLK						62
-#define GCC_PDM2_CLK_SRC					63
-#define GCC_PDM_AHB_CLK						64
-#define GCC_PDM_XO4_CLK						65
-#define GCC_PRNG_AHB_CLK					66
-#define GCC_QMIP_CAMERA_AHB_CLK					67
-#define GCC_QMIP_DISP_AHB_CLK					68
-#define GCC_QMIP_VIDEO_AHB_CLK					69
-#define GCC_QUPV3_WRAP0_S0_CLK					70
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				71
-#define GCC_QUPV3_WRAP0_S1_CLK					72
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				73
-#define GCC_QUPV3_WRAP0_S2_CLK					74
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				75
-#define GCC_QUPV3_WRAP0_S3_CLK					76
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				77
-#define GCC_QUPV3_WRAP0_S4_CLK					78
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				79
-#define GCC_QUPV3_WRAP0_S5_CLK					80
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				81
-#define GCC_QUPV3_WRAP0_S6_CLK					82
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC				83
-#define GCC_QUPV3_WRAP0_S7_CLK					84
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC				85
-#define GCC_QUPV3_WRAP1_S0_CLK					86
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC				87
-#define GCC_QUPV3_WRAP1_S1_CLK					88
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC				89
-#define GCC_QUPV3_WRAP1_S2_CLK					90
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC				91
-#define GCC_QUPV3_WRAP1_S3_CLK					92
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC				93
-#define GCC_QUPV3_WRAP1_S4_CLK					94
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC				95
-#define GCC_QUPV3_WRAP1_S5_CLK					96
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC				97
-#define GCC_QUPV3_WRAP1_S6_CLK					98
-#define GCC_QUPV3_WRAP1_S6_CLK_SRC				99
-#define GCC_QUPV3_WRAP1_S7_CLK					100
-#define GCC_QUPV3_WRAP1_S7_CLK_SRC				101
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				102
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				103
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK				104
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK				105
-#define GCC_SDCC2_AHB_CLK					106
-#define GCC_SDCC2_APPS_CLK					107
-#define GCC_SDCC2_APPS_CLK_SRC					108
-#define GCC_SDCC4_AHB_CLK					109
-#define GCC_SDCC4_APPS_CLK					110
-#define GCC_SDCC4_APPS_CLK_SRC					111
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				112
-#define GCC_TSIF_AHB_CLK					113
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK				114
-#define GCC_TSIF_REF_CLK					115
-#define GCC_TSIF_REF_CLK_SRC					116
-#define GCC_UFS_CARD_AHB_CLK					117
-#define GCC_UFS_CARD_AXI_CLK					118
-#define GCC_UFS_CARD_AXI_CLK_SRC				119
-#define GCC_UFS_CARD_CLKREF_CLK					120
-#define GCC_UFS_CARD_ICE_CORE_CLK				121
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				122
-#define GCC_UFS_CARD_PHY_AUX_CLK				123
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				124
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				125
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				126
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				127
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK				128
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			129
-#define GCC_UFS_MEM_CLKREF_CLK					130
-#define GCC_UFS_PHY_AHB_CLK					131
-#define GCC_UFS_PHY_AXI_CLK					132
-#define GCC_UFS_PHY_AXI_CLK_SRC					133
-#define GCC_UFS_PHY_ICE_CORE_CLK				134
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				135
-#define GCC_UFS_PHY_PHY_AUX_CLK					136
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				137
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				138
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				139
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				140
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				141
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				142
-#define GCC_USB30_PRIM_MASTER_CLK				143
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				144
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				145
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			146
-#define GCC_USB30_PRIM_SLEEP_CLK				147
-#define GCC_USB30_SEC_MASTER_CLK				148
-#define GCC_USB30_SEC_MASTER_CLK_SRC				149
-#define GCC_USB30_SEC_MOCK_UTMI_CLK				150
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				151
-#define GCC_USB30_SEC_SLEEP_CLK					152
-#define GCC_USB3_PRIM_CLKREF_CLK				153
-#define GCC_USB3_PRIM_PHY_AUX_CLK				154
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				155
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				156
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				157
-#define GCC_USB3_SEC_CLKREF_CLK					158
-#define GCC_USB3_SEC_PHY_AUX_CLK				159
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				160
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK				161
-#define GCC_USB3_SEC_PHY_PIPE_CLK				162
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				163
-#define GCC_VIDEO_AHB_CLK					164
-#define GCC_VIDEO_AXI_CLK					165
-#define GCC_VIDEO_XO_CLK					166
-#define GPLL0							167
-#define GPLL0_OUT_EVEN						168
-#define GPLL0_OUT_MAIN						169
-#define GCC_UFS_CARD_AXI_HW_CTL_CLK				170
-#define GCC_UFS_PHY_AXI_HW_CTL_CLK				171
-#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK			172
-#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK			173
-#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK			174
-#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK				175
-#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK			176
-#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK			177
-#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				178
-#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				179
-#define GCC_GPU_IREF_CLK					180
-#define GCC_SDCC1_AHB_CLK					181
-#define GCC_SDCC1_APPS_CLK					182
-#define GCC_SDCC1_ICE_CORE_CLK					183
-#define GCC_SDCC1_APPS_CLK_SRC					184
-#define GCC_SDCC1_ICE_CORE_CLK_SRC				185
-#define GCC_APC_VS_CLK						186
-#define GCC_GPU_VS_CLK						187
-#define GCC_MSS_VS_CLK						188
-#define GCC_VDDA_VS_CLK						189
-#define GCC_VDDCX_VS_CLK					190
-#define GCC_VDDMX_VS_CLK					191
-#define GCC_VS_CTRL_AHB_CLK					192
-#define GCC_VS_CTRL_CLK						193
-#define GCC_VS_CTRL_CLK_SRC					194
-#define GCC_VSENSOR_CLK_SRC					195
-#define GPLL4							196
-#define GPLL6							197
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK				6
+#define GCC_AGGRE_UFS_CARD_AXI_CLK				7
+#define GCC_AGGRE_UFS_PHY_AXI_CLK				8
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK				9
+#define GCC_AGGRE_USB3_SEC_AXI_CLK				10
+#define GCC_BOOT_ROM_AHB_CLK					11
+#define GCC_CAMERA_AHB_CLK					12
+#define GCC_CAMERA_AXI_CLK					13
+#define GCC_CAMERA_XO_CLK					14
+#define GCC_CE1_AHB_CLK						15
+#define GCC_CE1_AXI_CLK						16
+#define GCC_CE1_CLK						17
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK				18
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK				19
+#define GCC_CPUSS_AHB_CLK					20
+#define GCC_CPUSS_AHB_CLK_SRC					21
+#define GCC_CPUSS_DVM_BUS_CLK					22
+#define GCC_CPUSS_GNOC_CLK					23
+#define GCC_CPUSS_RBCPR_CLK					24
+#define GCC_CPUSS_RBCPR_CLK_SRC					25
+#define GCC_DDRSS_GPU_AXI_CLK					26
+#define GCC_DISP_AHB_CLK					27
+#define GCC_DISP_AXI_CLK					28
+#define GCC_DISP_GPLL0_CLK_SRC					29
+#define GCC_DISP_GPLL0_DIV_CLK_SRC				30
+#define GCC_DISP_XO_CLK						31
+#define GCC_GP1_CLK						32
+#define GCC_GP1_CLK_SRC						33
+#define GCC_GP2_CLK						34
+#define GCC_GP2_CLK_SRC						35
+#define GCC_GP3_CLK						36
+#define GCC_GP3_CLK_SRC						37
+#define GCC_GPU_CFG_AHB_CLK					38
+#define GCC_GPU_GPLL0_CLK_SRC					39
+#define GCC_GPU_GPLL0_DIV_CLK_SRC				40
+#define GCC_GPU_MEMNOC_GFX_CLK					41
+#define GCC_GPU_SNOC_DVM_GFX_CLK				42
+#define GCC_MSS_AXIS2_CLK					43
+#define GCC_MSS_CFG_AHB_CLK					44
+#define GCC_MSS_GPLL0_DIV_CLK_SRC				45
+#define GCC_MSS_MFAB_AXIS_CLK					46
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK				47
+#define GCC_MSS_SNOC_AXI_CLK					48
+#define GCC_PCIE_0_AUX_CLK					49
+#define GCC_PCIE_0_AUX_CLK_SRC					50
+#define GCC_PCIE_0_CFG_AHB_CLK					51
+#define GCC_PCIE_0_CLKREF_CLK					52
+#define GCC_PCIE_0_MSTR_AXI_CLK					53
+#define GCC_PCIE_0_PIPE_CLK					54
+#define GCC_PCIE_0_SLV_AXI_CLK					55
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				56
+#define GCC_PCIE_1_AUX_CLK					57
+#define GCC_PCIE_1_AUX_CLK_SRC					58
+#define GCC_PCIE_1_CFG_AHB_CLK					59
+#define GCC_PCIE_1_CLKREF_CLK					60
+#define GCC_PCIE_1_MSTR_AXI_CLK					61
+#define GCC_PCIE_1_PIPE_CLK					62
+#define GCC_PCIE_1_SLV_AXI_CLK					63
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				64
+#define GCC_PCIE_PHY_AUX_CLK					65
+#define GCC_PCIE_PHY_REFGEN_CLK					66
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC				67
+#define GCC_PDM2_CLK						68
+#define GCC_PDM2_CLK_SRC					69
+#define GCC_PDM_AHB_CLK						70
+#define GCC_PDM_XO4_CLK						71
+#define GCC_PRNG_AHB_CLK					72
+#define GCC_QMIP_CAMERA_AHB_CLK					73
+#define GCC_QMIP_DISP_AHB_CLK					74
+#define GCC_QMIP_VIDEO_AHB_CLK					75
+#define GCC_QUPV3_WRAP0_S0_CLK					76
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				77
+#define GCC_QUPV3_WRAP0_S1_CLK					78
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				79
+#define GCC_QUPV3_WRAP0_S2_CLK					80
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				81
+#define GCC_QUPV3_WRAP0_S3_CLK					82
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				83
+#define GCC_QUPV3_WRAP0_S4_CLK					84
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				85
+#define GCC_QUPV3_WRAP0_S5_CLK					86
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				87
+#define GCC_QUPV3_WRAP0_S6_CLK					88
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC				89
+#define GCC_QUPV3_WRAP0_S7_CLK					90
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC				91
+#define GCC_QUPV3_WRAP1_S0_CLK					92
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC				93
+#define GCC_QUPV3_WRAP1_S1_CLK					94
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC				95
+#define GCC_QUPV3_WRAP1_S2_CLK					96
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC				97
+#define GCC_QUPV3_WRAP1_S3_CLK					98
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC				99
+#define GCC_QUPV3_WRAP1_S4_CLK					100
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC				101
+#define GCC_QUPV3_WRAP1_S5_CLK					102
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC				103
+#define GCC_QUPV3_WRAP1_S6_CLK					104
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC				105
+#define GCC_QUPV3_WRAP1_S7_CLK					106
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC				107
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				108
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				109
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK				110
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK				111
+#define GCC_SDCC2_AHB_CLK					112
+#define GCC_SDCC2_APPS_CLK					113
+#define GCC_SDCC2_APPS_CLK_SRC					114
+#define GCC_SDCC4_AHB_CLK					115
+#define GCC_SDCC4_APPS_CLK					116
+#define GCC_SDCC4_APPS_CLK_SRC					117
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				118
+#define GCC_TSIF_AHB_CLK					119
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK				120
+#define GCC_TSIF_REF_CLK					121
+#define GCC_TSIF_REF_CLK_SRC					122
+#define GCC_UFS_CARD_AHB_CLK					123
+#define GCC_UFS_CARD_AXI_CLK					124
+#define GCC_UFS_CARD_AXI_CLK_SRC				125
+#define GCC_UFS_CARD_CLKREF_CLK					126
+#define GCC_UFS_CARD_ICE_CORE_CLK				127
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				128
+#define GCC_UFS_CARD_PHY_AUX_CLK				129
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				130
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				131
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				132
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				133
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK				134
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			135
+#define GCC_UFS_MEM_CLKREF_CLK					136
+#define GCC_UFS_PHY_AHB_CLK					137
+#define GCC_UFS_PHY_AXI_CLK					138
+#define GCC_UFS_PHY_AXI_CLK_SRC					139
+#define GCC_UFS_PHY_ICE_CORE_CLK				140
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				141
+#define GCC_UFS_PHY_PHY_AUX_CLK					142
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				143
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				144
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				145
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				146
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				147
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				148
+#define GCC_USB30_PRIM_MASTER_CLK				149
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				150
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				151
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			152
+#define GCC_USB30_PRIM_SLEEP_CLK				153
+#define GCC_USB30_SEC_MASTER_CLK				154
+#define GCC_USB30_SEC_MASTER_CLK_SRC				155
+#define GCC_USB30_SEC_MOCK_UTMI_CLK				156
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				157
+#define GCC_USB30_SEC_SLEEP_CLK					158
+#define GCC_USB3_PRIM_CLKREF_CLK				159
+#define GCC_USB3_PRIM_PHY_AUX_CLK				160
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				161
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				162
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				163
+#define GCC_USB3_SEC_CLKREF_CLK					164
+#define GCC_USB3_SEC_PHY_AUX_CLK				165
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				166
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK				167
+#define GCC_USB3_SEC_PHY_PIPE_CLK				168
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				169
+#define GCC_VIDEO_AHB_CLK					170
+#define GCC_VIDEO_AXI_CLK					171
+#define GCC_VIDEO_XO_CLK					172
+#define GPLL0							173
+#define GPLL0_OUT_EVEN						174
+#define GPLL0_OUT_MAIN						175
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK				176
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK				177
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK			178
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK			179
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK			180
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK				181
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK			182
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK			183
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				184
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				185
+#define GCC_GPU_IREF_CLK					186
+#define GCC_SDCC1_AHB_CLK					187
+#define GCC_SDCC1_APPS_CLK					188
+#define GCC_SDCC1_ICE_CORE_CLK					189
+#define GCC_SDCC1_APPS_CLK_SRC					190
+#define GCC_SDCC1_ICE_CORE_CLK_SRC				191
+#define GCC_APC_VS_CLK						192
+#define GCC_GPU_VS_CLK						193
+#define GCC_MSS_VS_CLK						194
+#define GCC_VDDA_VS_CLK						195
+#define GCC_VDDCX_VS_CLK					196
+#define GCC_VDDMX_VS_CLK					197
+#define GCC_VS_CTRL_AHB_CLK					198
+#define GCC_VS_CTRL_CLK						199
+#define GCC_VS_CTRL_CLK_SRC					200
+#define GCC_VSENSOR_CLK_SRC					201
+#define GPLL4							202
+#define GPLL6							203
 
 /* GCC reset clocks */
 #define GCC_MMSS_BCR						0
@@ -243,10 +251,4 @@
 #define GCC_PCIE_1_PHY_BCR					25
 #define GCC_SDCC1_BCR						26
 
-/* Dummy clocks for rate measurement */
-#define MEASURE_ONLY_SNOC_CLK					0
-#define MEASURE_ONLY_CNOC_CLK					1
-#define MEASURE_ONLY_BIMC_CLK					2
-#define MEASURE_ONLY_IPA_2X_CLK					3
-
 #endif
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
index 64e2dc7..7ac6f16 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -11,9 +11,14 @@
 #define PMIC_GPIO_PULL_UP_1P5_30	3
 
 #define PMIC_GPIO_STRENGTH_NO		0
-#define PMIC_GPIO_STRENGTH_HIGH		1
+#define PMIC_GPIO_STRENGTH_LOW		1
 #define PMIC_GPIO_STRENGTH_MED		2
-#define PMIC_GPIO_STRENGTH_LOW		3
+#define PMIC_GPIO_STRENGTH_HIGH		3
+
+#define PM8921_GPIO_STRENGTH_NO		0
+#define PM8921_GPIO_STRENGTH_HIGH	1
+#define PM8921_GPIO_STRENGTH_MED	2
+#define PM8921_GPIO_STRENGTH_LOW	3
 
 /*
  * Note: PM8018 GPIO3 and GPIO4 are supporting
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
new file mode 100644
index 0000000..4b15481
--- /dev/null
+++ b/include/kvm/arm_psci.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __KVM_ARM_PSCI_H__
+#define __KVM_ARM_PSCI_H__
+
+#include <linux/kvm_host.h>
+#include <uapi/linux/psci.h>
+
+#define KVM_ARM_PSCI_0_1	PSCI_VERSION(0, 1)
+#define KVM_ARM_PSCI_0_2	PSCI_VERSION(0, 2)
+#define KVM_ARM_PSCI_1_0	PSCI_VERSION(1, 0)
+
+#define KVM_ARM_PSCI_LATEST	KVM_ARM_PSCI_1_0
+
+/*
+ * We need the KVM pointer independently from the vcpu as we can call
+ * this from HYP, and need to apply kern_hyp_va on it...
+ */
+static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
+{
+	/*
+	 * Our PSCI implementation stays the same across versions from
+	 * v0.2 onward, only adding the few mandatory functions (such
+	 * as FEATURES with 1.0) that are required by newer
+	 * revisions. It is thus safe to return the latest, unless
+	 * userspace has instructed us otherwise.
+	 */
+	if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
+		if (vcpu->kvm->arch.psci_version)
+			return vcpu->kvm->arch.psci_version;
+
+		return KVM_ARM_PSCI_LATEST;
+	}
+
+	return KVM_ARM_PSCI_0_1;
+}
+
+
+int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+
+struct kvm_one_reg;
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
+#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 4c5bca38..43690f5 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -14,14 +14,16 @@
 #ifndef __LINUX_ARM_SMCCC_H
 #define __LINUX_ARM_SMCCC_H
 
+#include <uapi/linux/const.h>
+
 /*
  * This file provides common defines for ARM SMC Calling Convention as
  * specified in
  * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
  */
 
-#define ARM_SMCCC_STD_CALL		0
-#define ARM_SMCCC_FAST_CALL		1
+#define ARM_SMCCC_STD_CALL	        _AC(0,U)
+#define ARM_SMCCC_FAST_CALL	        _AC(1,U)
 #define ARM_SMCCC_TYPE_SHIFT		31
 
 #define ARM_SMCCC_SMC_32		0
@@ -60,6 +62,24 @@
 #define ARM_SMCCC_QUIRK_NONE		0
 #define ARM_SMCCC_QUIRK_QCOM_A6		1 /* Save/restore register a6 */
 
+#define ARM_SMCCC_VERSION_1_0		0x10000
+#define ARM_SMCCC_VERSION_1_1		0x10001
+
+#define ARM_SMCCC_VERSION_FUNC_ID					\
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
+			   ARM_SMCCC_SMC_32,				\
+			   0, 0)
+
+#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID					\
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
+			   ARM_SMCCC_SMC_32,				\
+			   0, 1)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_1					\
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
+			   ARM_SMCCC_SMC_32,				\
+			   0, 0x8000)
+
 #ifndef __ASSEMBLY__
 
 #include <linux/linkage.h>
@@ -130,5 +150,148 @@
 
 #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
 
+/* SMCCC v1.1 implementation madness follows */
+#ifdef CONFIG_ARM64
+
+#define SMCCC_SMC_INST	"smc	#0"
+#define SMCCC_HVC_INST	"hvc	#0"
+#define SMCCC_REG(n)	asm("x" # n)
+
+#elif defined(CONFIG_ARM)
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+
+#define SMCCC_SMC_INST	__SMC(0)
+#define SMCCC_HVC_INST	__HVC(0)
+#define SMCCC_REG(n)	asm("r" # n)
+
+#endif
+
+#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
+
+#define __count_args(...)						\
+	___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __constraint_write_0						\
+	"+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_1						\
+	"+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_2						\
+	"+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
+#define __constraint_write_3						\
+	"+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
+#define __constraint_write_4	__constraint_write_3
+#define __constraint_write_5	__constraint_write_4
+#define __constraint_write_6	__constraint_write_5
+#define __constraint_write_7	__constraint_write_6
+
+#define __constraint_read_0
+#define __constraint_read_1
+#define __constraint_read_2
+#define __constraint_read_3
+#define __constraint_read_4	"r" (r4)
+#define __constraint_read_5	__constraint_read_4, "r" (r5)
+#define __constraint_read_6	__constraint_read_5, "r" (r6)
+#define __constraint_read_7	__constraint_read_6, "r" (r7)
+
+#define __declare_arg_0(a0, res)					\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 SMCCC_REG(0) = a0;			\
+	register unsigned long r1 SMCCC_REG(1);				\
+	register unsigned long r2 SMCCC_REG(2);				\
+	register unsigned long r3 SMCCC_REG(3)
+
+#define __declare_arg_1(a0, a1, res)					\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 SMCCC_REG(0) = a0;			\
+	register typeof(a1)    r1 SMCCC_REG(1) = a1;			\
+	register unsigned long r2 SMCCC_REG(2);				\
+	register unsigned long r3 SMCCC_REG(3)
+
+#define __declare_arg_2(a0, a1, a2, res)				\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 SMCCC_REG(0) = a0;			\
+	register typeof(a1)    r1 SMCCC_REG(1) = a1;			\
+	register typeof(a2)    r2 SMCCC_REG(2) = a2;			\
+	register unsigned long r3 SMCCC_REG(3)
+
+#define __declare_arg_3(a0, a1, a2, a3, res)				\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 SMCCC_REG(0) = a0;			\
+	register typeof(a1)    r1 SMCCC_REG(1) = a1;			\
+	register typeof(a2)    r2 SMCCC_REG(2) = a2;			\
+	register typeof(a3)    r3 SMCCC_REG(3) = a3
+
+#define __declare_arg_4(a0, a1, a2, a3, a4, res)			\
+	__declare_arg_3(a0, a1, a2, a3, res);				\
+	register typeof(a4) r4 SMCCC_REG(4) = a4
+
+#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res)			\
+	__declare_arg_4(a0, a1, a2, a3, a4, res);			\
+	register typeof(a5) r5 SMCCC_REG(5) = a5
+
+#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res)		\
+	__declare_arg_5(a0, a1, a2, a3, a4, a5, res);			\
+	register typeof(a6) r6 SMCCC_REG(6) = a6
+
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res)		\
+	__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res);		\
+	register typeof(a7) r7 SMCCC_REG(7) = a7
+
+#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
+#define __declare_args(count, ...)  ___declare_args(count, __VA_ARGS__)
+
+#define ___constraints(count)						\
+	: __constraint_write_ ## count					\
+	: __constraint_read_ ## count					\
+	: "memory"
+#define __constraints(count)	___constraints(count)
+
+/*
+ * We have an output list that is not necessarily used, and GCC feels
+ * entitled to optimise the whole sequence away. "volatile" is what
+ * makes it stick.
+ */
+#define __arm_smccc_1_1(inst, ...)					\
+	do {								\
+		__declare_args(__count_args(__VA_ARGS__), __VA_ARGS__);	\
+		asm volatile(inst "\n"					\
+			     __constraints(__count_args(__VA_ARGS__)));	\
+		if (___res)						\
+			*___res = (typeof(*___res)){r0, r1, r2, r3};	\
+	} while (0)
+
+/*
+ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction if not NULL.
+ */
+#define arm_smccc_1_1_smc(...)	__arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
+
+/*
+ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the HVC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the HVC instruction if not NULL.
+ */
+#define arm_smccc_1_1_hvc(...)	__arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
 #endif /*__ASSEMBLY__*/
 #endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index e71835b..bc7dba0 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -627,6 +627,8 @@
 }
 #endif
 
+#define atomic_cond_read_acquire(v, c)	smp_cond_load_acquire(&(v)->counter, (c))
+
 #ifdef CONFIG_GENERIC_ATOMIC64
 #include <asm-generic/atomic64.h>
 #endif
@@ -1023,6 +1025,8 @@
 }
 #endif
 
+#define atomic64_cond_read_acquire(v, c)	smp_cond_load_acquire(&(v)->counter, (c))
+
 #include <asm-generic/atomic-long.h>
 
 #endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 0691068..002f87c 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -195,6 +195,11 @@
 	set_wb_congested(bdi->wb.congested, sync);
 }
 
+struct wb_lock_cookie {
+	bool locked;
+	unsigned long flags;
+};
+
 #ifdef CONFIG_CGROUP_WRITEBACK
 
 /**
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c52a48c..8272fcf7 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -374,7 +374,7 @@
 /**
  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
  * @inode: target inode
- * @lockedp: temp bool output param, to be passed to the end function
+ * @cookie: output param, to be passed to the end function
  *
  * The caller wants to access the wb associated with @inode but isn't
  * holding inode->i_lock, mapping->tree_lock or wb->list_lock.  This
@@ -382,12 +382,12 @@
  * association doesn't change until the transaction is finished with
  * unlocked_inode_to_wb_end().
  *
- * The caller must call unlocked_inode_to_wb_end() with *@lockdep
- * afterwards and can't sleep during transaction.  IRQ may or may not be
- * disabled on return.
+ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
+ * can't sleep during the transaction.  IRQs may or may not be disabled on
+ * return.
  */
 static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
 {
 	rcu_read_lock();
 
@@ -395,10 +395,10 @@
 	 * Paired with store_release in inode_switch_wb_work_fn() and
 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
 	 */
-	*lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
 
-	if (unlikely(*lockedp))
-		spin_lock_irq(&inode->i_mapping->tree_lock);
+	if (unlikely(cookie->locked))
+		spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
 
 	/*
 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
@@ -410,12 +410,13 @@
 /**
  * unlocked_inode_to_wb_end - end inode wb access transaction
  * @inode: target inode
- * @locked: *@lockedp from unlocked_inode_to_wb_begin()
+ * @cookie: @cookie from unlocked_inode_to_wb_begin()
  */
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+					    struct wb_lock_cookie *cookie)
 {
-	if (unlikely(locked))
-		spin_unlock_irq(&inode->i_mapping->tree_lock);
+	if (unlikely(cookie->locked))
+		spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags);
 
 	rcu_read_unlock();
 }
@@ -462,12 +463,13 @@
 }
 
 static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
 {
 	return inode_to_wb(inode);
 }
 
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+					    struct wb_lock_cookie *cookie)
 {
 }
 
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 97cb48f..0885c9f 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -61,6 +61,9 @@
 	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
 #define bio_sectors(bio)	((bio)->bi_iter.bi_size >> 9)
 #define bio_end_sector(bio)	((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+#define bio_dun(bio)		((bio)->bi_iter.bi_dun)
+#define bio_duns(bio)		(bio_sectors(bio) >> 3) /* 4KB unit */
+#define bio_end_dun(bio)	(bio_dun(bio) + bio_duns(bio))
 
 /*
  * Check whether this bio carries any data or not. A NULL bio is allowed.
@@ -169,6 +172,11 @@
 {
 	iter->bi_sector += bytes >> 9;
 
+#ifdef CONFIG_PFK
+	if (iter->bi_dun)
+		iter->bi_dun += bytes >> 12;
+#endif
+
 	if (bio_no_advance_iter(bio))
 		iter->bi_size -= bytes;
 	else
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 2b8b6e0..5a3712c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -25,6 +25,7 @@
 struct bio {
 	struct bio		*bi_next;	/* request queue link */
 	struct block_device	*bi_bdev;
+	unsigned short		bi_write_hint;
 	int			bi_error;
 	unsigned int		bi_opf;		/* bottom bits req flags,
 						 * top bits REQ_OP. Use
@@ -65,6 +66,10 @@
 		struct bio_integrity_payload *bi_integrity; /* data integrity */
 #endif
 	};
+#ifdef CONFIG_PFK
+	/* Encryption key to use (NULL if none) */
+	const struct blk_encryption_key	*bi_crypt_key;
+#endif
 
 	unsigned short		bi_vcnt;	/* how many bio_vec's */
 
@@ -81,6 +86,12 @@
 	struct bio_set		*bi_pool;
 
 	/*
+	 * When using dircet-io (O_DIRECT), we can't get the inode from a bio
+	 * by walking bio->bi_io_vec->bv_page->mapping->host
+	 * since the page is anon.
+	 */
+	struct inode		*bi_dio_inode;
+	/*
 	 * We can inline a number of vecs at the end of the bio, to avoid
 	 * double allocations for a small number of bio_vecs. This member
 	 * MUST obviously be kept at the very end of the bio.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b0f981a..661dcec 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -509,6 +509,7 @@
 #define QUEUE_FLAG_FLUSH_NQ    25	/* flush not queueuable */
 #define QUEUE_FLAG_DAX         26	/* device supports DAX */
 #define QUEUE_FLAG_FAST        27	/* fast block device (e.g. ram based) */
+#define QUEUE_FLAG_INLINECRYPT 28	/* inline encryption support */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -600,6 +601,8 @@
 	(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
 #define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
 #define blk_queue_fast(q)	test_bit(QUEUE_FLAG_FAST, &(q)->queue_flags)
+#define blk_queue_inlinecrypt(q) \
+	test_bit(QUEUE_FLAG_INLINECRYPT, &(q)->queue_flags)
 
 #define blk_noretry_request(rq) \
 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 89b65b8..dbf1f2c 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -41,6 +41,7 @@
 
 	unsigned int            bi_bvec_done;	/* number of bytes completed in
 						   current bvec */
+	u64			bi_dun;		/* DUN setting for bio */
 };
 
 /*
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 3a4f264..a8b4284 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -117,7 +117,7 @@
 #define CLOCK_SOURCE_RESELECT			0x100
 
 /* simplify initialization of mask field */
-#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
 
 static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
 {
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 27101bb..eba9285 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -207,6 +207,17 @@
 #endif
 #endif
 
+#ifdef CONFIG_STACK_VALIDATION
+#define annotate_unreachable() ({					\
+	asm("1:\t\n"							\
+	    ".pushsection __unreachable, \"a\"\t\n"			\
+	    ".long 1b\t\n"						\
+	    ".popsection\t\n");						\
+})
+#else
+#define annotate_unreachable()
+#endif
+
 /*
  * Mark a position in code as unreachable.  This can be used to
  * suppress control flow warnings after asm blocks that transfer
@@ -216,7 +227,8 @@
  * this in the preprocessor, but we can live with this because they're
  * unreleased.  Really, we need to have autoconf for the kernel.
  */
-#define unreachable() __builtin_unreachable()
+#define unreachable() \
+	do { annotate_unreachable(); __builtin_unreachable(); } while (0)
 
 /* Mark a function definition as prohibited from being cloned. */
 #define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 1f7e4ec..912d945 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -50,6 +50,8 @@
 				   struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_spectre_v2(struct device *dev,
 				   struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
+					  struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 45d5522..0fbce32 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -424,6 +424,7 @@
 #define CPUFREQ_START			(2)
 #define CPUFREQ_CREATE_POLICY		(3)
 #define CPUFREQ_REMOVE_POLICY		(4)
+#define CPUFREQ_STOP			(5)
 
 /* Govinfo Notifiers */
 #define CPUFREQ_LOAD_CHANGE		(0)
diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h
new file mode 100644
index 0000000..3fb3875
--- /dev/null
+++ b/include/linux/cpufreq_times.h
@@ -0,0 +1,40 @@
+/* drivers/cpufreq/cpufreq_times.c
+ *
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_CPUFREQ_TIMES_H
+#define _LINUX_CPUFREQ_TIMES_H
+
+#include <linux/cpufreq.h>
+#include <linux/cputime.h>
+#include <linux/pid.h>
+
+#ifdef CONFIG_CPU_FREQ_TIMES
+void cpufreq_task_times_init(struct task_struct *p);
+void cpufreq_task_times_exit(struct task_struct *p);
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+			    struct pid *pid, struct task_struct *p);
+void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime);
+void cpufreq_times_create_policy(struct cpufreq_policy *policy);
+void cpufreq_times_record_transition(struct cpufreq_freqs *freq);
+void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
+int single_uid_time_in_state_open(struct inode *inode, struct file *file);
+#else
+static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
+static inline void cpufreq_times_record_transition(
+	struct cpufreq_freqs *freq) {}
+static inline void cpufreq_task_times_remove_uids(uid_t uid_start,
+						  uid_t uid_end) {}
+#endif /* CONFIG_CPU_FREQ_TIMES */
+#endif /* _LINUX_CPUFREQ_TIMES_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b49f866..df17901 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -179,6 +179,8 @@
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
 #define for_each_cpu_not(cpu, mask)		\
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#define for_each_cpu_wrap(cpu, mask, start)	\
+	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
 #define for_each_cpu_and(cpu, mask, and)	\
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
 #else
@@ -695,6 +697,11 @@
 void free_cpumask_var(cpumask_var_t mask);
 void free_bootmem_cpumask_var(cpumask_var_t mask);
 
+static inline bool cpumask_available(cpumask_var_t mask)
+{
+	return mask != NULL;
+}
+
 #else
 typedef struct cpumask cpumask_var_t[1];
 
@@ -735,6 +742,11 @@
 static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
 {
 }
+
+static inline bool cpumask_available(cpumask_var_t mask)
+{
+	return true;
+}
 #endif /* CONFIG_CPUMASK_OFFSTACK */
 
 /* It's common to want to use cpu_all_mask in struct member initializers,
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 3d4a198..014d7f9 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -220,6 +220,7 @@
  * These are the low-level FS interfaces to the dcache..
  */
 extern void d_instantiate(struct dentry *, struct inode *);
+extern void d_instantiate_new(struct dentry *, struct inode *);
 extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
 extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
 extern void __d_drop(struct dentry *dentry);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index b20a094..2e75ae2 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -23,7 +23,6 @@
 
 struct device;
 struct file_operations;
-struct srcu_struct;
 
 struct debugfs_blob_wrapper {
 	void *data;
@@ -43,25 +42,6 @@
 
 extern struct dentry *arch_debugfs_dir;
 
-extern struct srcu_struct debugfs_srcu;
-
-/**
- * debugfs_real_fops - getter for the real file operation
- * @filp: a pointer to a struct file
- *
- * Must only be called under the protection established by
- * debugfs_use_file_start().
- */
-static inline const struct file_operations *debugfs_real_fops(struct file *filp)
-	__must_hold(&debugfs_srcu)
-{
-	/*
-	 * Neither the pointer to the struct file_operations, nor its
-	 * contents ever change -- srcu_dereference() is not needed here.
-	 */
-	return filp->f_path.dentry->d_fsdata;
-}
-
 #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt)		\
 static int __fops ## _open(struct inode *inode, struct file *file)	\
 {									\
@@ -105,10 +85,10 @@
 void debugfs_remove(struct dentry *dentry);
 void debugfs_remove_recursive(struct dentry *dentry);
 
-int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx)
-	__acquires(&debugfs_srcu);
+const struct file_operations *debugfs_real_fops(const struct file *filp);
 
-void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
+int debugfs_file_get(struct dentry *dentry);
+void debugfs_file_put(struct dentry *dentry);
 
 ssize_t debugfs_attr_read(struct file *file, char __user *buf,
 			size_t len, loff_t *ppos);
@@ -223,15 +203,12 @@
 static inline void debugfs_remove_recursive(struct dentry *dentry)
 { }
 
-static inline int debugfs_use_file_start(const struct dentry *dentry,
-					int *srcu_idx)
-	__acquires(&debugfs_srcu)
+static inline int debugfs_file_get(struct dentry *dentry)
 {
 	return 0;
 }
 
-static inline void debugfs_use_file_finish(int srcu_idx)
-	__releases(&debugfs_srcu)
+static inline void debugfs_file_put(struct dentry *dentry)
 { }
 
 static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index a3d9563..62f4fa8 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -146,7 +146,7 @@
  * a new RANGE of SSIDs to the msg_mask_tbl.
  */
 #define MSG_MASK_TBL_CNT		26
-#define APPS_EVENT_LAST_ID		0x0C5B
+#define APPS_EVENT_LAST_ID		0xC7A
 
 #define MSG_SSID_0			0
 #define MSG_SSID_0_LAST			125
@@ -896,7 +896,7 @@
 /* LOG CODES */
 static const uint32_t log_code_last_tbl[] = {
 	0x0,	/* EQUIP ID 0 */
-	0x1C68,	/* EQUIP ID 1 */
+	0x1C6A,	/* EQUIP ID 1 */
 	0x0,	/* EQUIP ID 2 */
 	0x0,	/* EQUIP ID 3 */
 	0x4910,	/* EQUIP ID 4 */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 5e204a5..2877ccb 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -380,8 +380,8 @@
 	u32 attributes;
 	u32 get_bar_attributes;
 	u32 set_bar_attributes;
-	uint64_t romsize;
-	void *romimage;
+	u64 romsize;
+	u32 romimage;
 } efi_pci_io_protocol_32;
 
 typedef struct {
@@ -400,8 +400,8 @@
 	u64 attributes;
 	u64 get_bar_attributes;
 	u64 set_bar_attributes;
-	uint64_t romsize;
-	void *romimage;
+	u64 romsize;
+	u64 romimage;
 } efi_pci_io_protocol_64;
 
 typedef struct {
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 393b880..aa5db8b 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -21,6 +21,7 @@
 #define F2FS_BLKSIZE			4096	/* support only 4KB block */
 #define F2FS_BLKSIZE_BITS		12	/* bits for F2FS_BLKSIZE */
 #define F2FS_MAX_EXTENSION		64	/* # of extension entries */
+#define F2FS_EXTENSION_LEN		8	/* max size of extension */
 #define F2FS_BLK_ALIGN(x)	(((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
 
 #define NULL_ADDR		((block_t)0)	/* used as block_t addresses */
@@ -38,10 +39,10 @@
 
 #define F2FS_MAX_QUOTAS		3
 
-#define F2FS_IO_SIZE(sbi)	(1 << (sbi)->write_io_size_bits) /* Blocks */
-#define F2FS_IO_SIZE_KB(sbi)	(1 << ((sbi)->write_io_size_bits + 2)) /* KB */
-#define F2FS_IO_SIZE_BYTES(sbi)	(1 << ((sbi)->write_io_size_bits + 12)) /* B */
-#define F2FS_IO_SIZE_BITS(sbi)	((sbi)->write_io_size_bits) /* power of 2 */
+#define F2FS_IO_SIZE(sbi)	(1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
+#define F2FS_IO_SIZE_KB(sbi)	(1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */
+#define F2FS_IO_SIZE_BYTES(sbi)	(1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */
+#define F2FS_IO_SIZE_BITS(sbi)	(F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
 #define F2FS_IO_SIZE_MASK(sbi)	(F2FS_IO_SIZE(sbi) - 1)
 
 /* This flag is used by node and meta inodes, and by recovery */
@@ -101,7 +102,7 @@
 	__u8 uuid[16];			/* 128-bit uuid for volume */
 	__le16 volume_name[MAX_VOLUME_NAME];	/* volume name */
 	__le32 extension_count;		/* # of extensions below */
-	__u8 extension_list[F2FS_MAX_EXTENSION][8];	/* extension array */
+	__u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */
 	__le32 cp_payload;
 	__u8 version[VERSION_LEN];	/* the kernel version */
 	__u8 init_version[VERSION_LEN];	/* the initial kernel version */
@@ -110,12 +111,14 @@
 	__u8 encrypt_pw_salt[16];	/* Salt used for string2key algorithm */
 	struct f2fs_device devs[MAX_DEVICES];	/* device list */
 	__le32 qf_ino[F2FS_MAX_QUOTAS];	/* quota inode numbers */
-	__u8 reserved[315];		/* valid reserved region */
+	__u8 hot_ext_count;		/* # of hot file extension */
+	__u8 reserved[314];		/* valid reserved region */
 } __packed;
 
 /*
  * For checkpoint
  */
+#define CP_LARGE_NAT_BITMAP_FLAG	0x00000400
 #define CP_NOCRC_RECOVERY_FLAG	0x00000200
 #define CP_TRIMMED_FLAG		0x00000100
 #define CP_NAT_BITS_FLAG	0x00000080
@@ -302,6 +305,10 @@
  */
 #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
 #define NAT_ENTRY_BITMAP_SIZE	((NAT_ENTRY_PER_BLOCK + 7) / 8)
+#define NAT_ENTRY_BITMAP_SIZE_ALIGNED				\
+	((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) /		\
+	BITS_PER_LONG * BITS_PER_LONG)
+
 
 struct f2fs_nat_entry {
 	__u8 version;		/* latest version of cached nat entry */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index db3bdb2..1c4da43 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -21,6 +21,7 @@
 #include <linux/mm_types.h>
 #include <linux/capability.h>
 #include <linux/semaphore.h>
+#include <linux/fcntl.h>
 #include <linux/fiemap.h>
 #include <linux/rculist_bl.h>
 #include <linux/atomic.h>
@@ -144,6 +145,9 @@
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY		((__force fmode_t)0x4000000)
 
+/* File is capable of returning -EAGAIN if I/O will block */
+#define FMODE_NOWAIT		((__force fmode_t)0x8000000)
+
 /*
  * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
  * that indicates that they should check the contents of the iovec are
@@ -316,6 +320,18 @@
 struct address_space;
 struct writeback_control;
 
+/*
+ * Write life time hint values.
+ */
+enum rw_hint {
+	WRITE_LIFE_NOT_SET	= 0,
+	WRITE_LIFE_NONE		= RWH_WRITE_LIFE_NONE,
+	WRITE_LIFE_SHORT	= RWH_WRITE_LIFE_SHORT,
+	WRITE_LIFE_MEDIUM	= RWH_WRITE_LIFE_MEDIUM,
+	WRITE_LIFE_LONG		= RWH_WRITE_LIFE_LONG,
+	WRITE_LIFE_EXTREME	= RWH_WRITE_LIFE_EXTREME,
+};
+
 #define IOCB_EVENTFD		(1 << 0)
 #define IOCB_APPEND		(1 << 1)
 #define IOCB_DIRECT		(1 << 2)
@@ -323,6 +339,7 @@
 #define IOCB_DSYNC		(1 << 4)
 #define IOCB_SYNC		(1 << 5)
 #define IOCB_WRITE		(1 << 6)
+#define IOCB_NOWAIT		(1 << 7)
 
 struct kiocb {
 	struct file		*ki_filp;
@@ -330,6 +347,7 @@
 	void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
 	void			*private;
 	int			ki_flags;
+	enum rw_hint		ki_hint;
 };
 
 static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -644,6 +662,7 @@
 	spinlock_t		i_lock;	/* i_blocks, i_bytes, maybe i_size */
 	unsigned short          i_bytes;
 	unsigned int		i_blkbits;
+	enum rw_hint		i_write_hint;
 	blkcnt_t		i_blocks;
 
 #ifdef __NEED_I_SIZE_ORDERED
@@ -1071,8 +1090,6 @@
 #define OFFT_OFFSET_MAX	INT_LIMIT(off_t)
 #endif
 
-#include <linux/fcntl.h>
-
 extern void send_sigio(struct fown_struct *fown, int fd, int band);
 
 /*
@@ -2931,6 +2948,8 @@
 		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio);
+
 extern void inode_set_flags(struct inode *inode, unsigned int flags,
 			    unsigned int mask);
 
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 8641e56..8f4e90c 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -13,41 +13,20 @@
 #ifndef _LINUX_FSCRYPT_H
 #define _LINUX_FSCRYPT_H
 
-#include <linux/key.h>
 #include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/bio.h>
-#include <linux/dcache.h>
-#include <crypto/skcipher.h>
-#include <uapi/linux/fs.h>
 
 #define FS_CRYPTO_BLOCK_SIZE		16
 
-struct fscrypt_info;
+struct fscrypt_ctx;
 
-struct fscrypt_ctx {
-	union {
-		struct {
-			struct page *bounce_page;	/* Ciphertext page */
-			struct page *control_page;	/* Original page  */
-		} w;
-		struct {
-			struct bio *bio;
-			struct work_struct work;
-		} r;
-		struct list_head free_list;	/* Free list */
-	};
-	u8 flags;				/* Flags */
-};
-
-/**
- * For encrypted symlinks, the ciphertext length is stored at the beginning
- * of the string in little-endian format.
+/* iv sector for security/pfe/pfk_fscrypt.c and f2fs. sizeof is required
+ * to accommodate 32 bit targets.
  */
-struct fscrypt_symlink_data {
-	__le16 len;
-	char encrypted_path[1];
-} __packed;
+#define PG_DUN(i, p)                                            \
+	((((i)->i_ino & 0xffffffff) << (sizeof((i)->i_ino)/2)) | \
+				((p)->index & 0xffffffff))
+
+struct fscrypt_info;
 
 struct fscrypt_str {
 	unsigned char *name;
@@ -67,86 +46,11 @@
 #define fname_name(p)		((p)->disk_name.name)
 #define fname_len(p)		((p)->disk_name.len)
 
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
-/*
- * crypto opertions for filesystems
- */
-struct fscrypt_operations {
-	unsigned int flags;
-	const char *key_prefix;
-	int (*get_context)(struct inode *, void *, size_t);
-	int (*set_context)(struct inode *, const void *, size_t, void *);
-	bool (*dummy_context)(struct inode *);
-	bool (*empty_dir)(struct inode *);
-	unsigned (*max_namelen)(struct inode *);
-};
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
-	if (inode->i_sb->s_cop->dummy_context &&
-				inode->i_sb->s_cop->dummy_context(inode))
-		return true;
-	return false;
-}
-
-static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
-					u32 filenames_mode)
-{
-	if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
-	    filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
-		return true;
-
-	if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
-	    filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
-		return true;
-
-	return false;
-}
-
-static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
-{
-	if (str->len == 1 && str->name[0] == '.')
-		return true;
-
-	if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
-		return true;
-
-	return false;
-}
-
 #if __FS_HAS_ENCRYPTION
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
-	return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
-}
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
-	return (inode->i_crypt_info != NULL);
-}
-
 #include <linux/fscrypt_supp.h>
-
-#else /* !__FS_HAS_ENCRYPTION */
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
-	WARN_ON_ONCE(1);
-	return ERR_PTR(-EINVAL);
-}
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
-	return 0;
-}
-
+#else
 #include <linux/fscrypt_notsupp.h>
-#endif /* __FS_HAS_ENCRYPTION */
+#endif
 
 /**
  * fscrypt_require_key - require an inode's encryption key
@@ -287,4 +191,68 @@
 	return 0;
 }
 
+/**
+ * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink
+ * @dir: directory in which the symlink is being created
+ * @target: plaintext symlink target
+ * @len: length of @target excluding null terminator
+ * @max_len: space the filesystem has available to store the symlink target
+ * @disk_link: (out) the on-disk symlink target being prepared
+ *
+ * This function computes the size the symlink target will require on-disk,
+ * stores it in @disk_link->len, and validates it against @max_len.  An
+ * encrypted symlink may be longer than the original.
+ *
+ * Additionally, @disk_link->name is set to @target if the symlink will be
+ * unencrypted, but left NULL if the symlink will be encrypted.  For encrypted
+ * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the
+ * on-disk target later.  (The reason for the two-step process is that some
+ * filesystems need to know the size of the symlink target before creating the
+ * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.)
+ *
+ * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long,
+ * -ENOKEY if the encryption key is missing, or another -errno code if a problem
+ * occurred while setting up the encryption key.
+ */
+static inline int fscrypt_prepare_symlink(struct inode *dir,
+					  const char *target,
+					  unsigned int len,
+					  unsigned int max_len,
+					  struct fscrypt_str *disk_link)
+{
+	if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir))
+		return __fscrypt_prepare_symlink(dir, len, max_len, disk_link);
+
+	disk_link->name = (unsigned char *)target;
+	disk_link->len = len + 1;
+	if (disk_link->len > max_len)
+		return -ENAMETOOLONG;
+	return 0;
+}
+
+/**
+ * fscrypt_encrypt_symlink - encrypt the symlink target if needed
+ * @inode: symlink inode
+ * @target: plaintext symlink target
+ * @len: length of @target excluding null terminator
+ * @disk_link: (in/out) the on-disk symlink target being prepared
+ *
+ * If the symlink target needs to be encrypted, then this function encrypts it
+ * into @disk_link->name.  fscrypt_prepare_symlink() must have been called
+ * previously to compute @disk_link->len.  If the filesystem did not allocate a
+ * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one
+ * will be kmalloc()'ed and the filesystem will be responsible for freeing it.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fscrypt_encrypt_symlink(struct inode *inode,
+					  const char *target,
+					  unsigned int len,
+					  struct fscrypt_str *disk_link)
+{
+	if (IS_ENCRYPTED(inode))
+		return __fscrypt_encrypt_symlink(inode, target, len, disk_link);
+	return 0;
+}
+
 #endif	/* _LINUX_FSCRYPT_H */
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index c4c6bf2..ce4df7a 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -13,7 +13,21 @@
 #ifndef _LINUX_FSCRYPT_NOTSUPP_H
 #define _LINUX_FSCRYPT_NOTSUPP_H
 
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+	return false;
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+	return false;
+}
+
 /* crypto.c */
+static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
+{
+}
+
 static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
 						  gfp_t gfp_flags)
 {
@@ -42,6 +56,11 @@
 	return -EOPNOTSUPP;
 }
 
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+	WARN_ON_ONCE(1);
+	return ERR_PTR(-EINVAL);
+}
 
 static inline void fscrypt_restore_control_page(struct page *page)
 {
@@ -89,8 +108,7 @@
 	return -EOPNOTSUPP;
 }
 
-static inline void fscrypt_put_encryption_info(struct inode *inode,
-					       struct fscrypt_info *ci)
+static inline void fscrypt_put_encryption_info(struct inode *inode)
 {
 	return;
 }
@@ -115,16 +133,8 @@
 	return;
 }
 
-static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode,
-					       u32 ilen)
-{
-	/* never happens */
-	WARN_ON(1);
-	return 0;
-}
-
 static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
-					     u32 ilen,
+					     u32 max_encrypted_len,
 					     struct fscrypt_str *crypto_str)
 {
 	return -EOPNOTSUPP;
@@ -143,13 +153,6 @@
 	return -EOPNOTSUPP;
 }
 
-static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
-					    const struct qstr *iname,
-					    struct fscrypt_str *oname)
-{
-	return -EOPNOTSUPP;
-}
-
 static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
 				      const u8 *de_name, u32 de_name_len)
 {
@@ -160,10 +163,13 @@
 }
 
 /* bio.c */
-static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
-					     struct bio *bio)
+static inline void fscrypt_decrypt_bio(struct bio *bio)
 {
-	return;
+}
+
+static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+					       struct bio *bio)
+{
 }
 
 static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
@@ -177,6 +183,21 @@
 	return -EOPNOTSUPP;
 }
 
+/* fscrypt_ice.c */
+static inline int fscrypt_using_hardware_encryption(const struct inode *inode)
+{
+	return 0;
+}
+
+static inline void fscrypt_set_ice_dun(const struct inode *inode,
+		struct bio *bio, u64 dun){}
+
+static inline bool fscrypt_mergeable_bio(struct bio *bio,
+		sector_t iv_block, bool bio_encrypted)
+{
+	return true;
+}
+
 /* hooks.c */
 
 static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
@@ -207,4 +228,28 @@
 	return -EOPNOTSUPP;
 }
 
+static inline int __fscrypt_prepare_symlink(struct inode *dir,
+					    unsigned int len,
+					    unsigned int max_len,
+					    struct fscrypt_str *disk_link)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_encrypt_symlink(struct inode *inode,
+					    const char *target,
+					    unsigned int len,
+					    struct fscrypt_str *disk_link)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline const char *fscrypt_get_symlink(struct inode *inode,
+					      const void *caddr,
+					      unsigned int max_size,
+					      struct delayed_call *done)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
 #endif	/* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 2db5e970..32e2b6c 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -10,8 +10,56 @@
 #ifndef _LINUX_FSCRYPT_SUPP_H
 #define _LINUX_FSCRYPT_SUPP_H
 
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
+ * crypto operations for filesystems
+ */
+struct fscrypt_operations {
+	unsigned int flags;
+	const char *key_prefix;
+	int (*get_context)(struct inode *, void *, size_t);
+	int (*set_context)(struct inode *, const void *, size_t, void *);
+	bool (*dummy_context)(struct inode *);
+	bool (*empty_dir)(struct inode *);
+	unsigned (*max_namelen)(struct inode *);
+	bool (*is_encrypted)(struct inode *);
+};
+
+struct fscrypt_ctx {
+	union {
+		struct {
+			struct page *bounce_page;	/* Ciphertext page */
+			struct page *control_page;	/* Original page  */
+		} w;
+		struct {
+			struct bio *bio;
+			struct work_struct work;
+		} r;
+		struct list_head free_list;	/* Free list */
+	};
+	u8 flags;				/* Flags */
+};
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+	return (inode->i_crypt_info != NULL);
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+	return inode->i_sb->s_cop->dummy_context &&
+		inode->i_sb->s_cop->dummy_context(inode);
+}
+
 /* crypto.c */
-extern struct kmem_cache *fscrypt_info_cachep;
+extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
 extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
 extern void fscrypt_release_ctx(struct fscrypt_ctx *);
 extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
@@ -19,6 +67,12 @@
 						u64, gfp_t);
 extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
 				unsigned int, u64);
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+	return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+}
+
 extern void fscrypt_restore_control_page(struct page *);
 
 extern const struct dentry_operations fscrypt_d_ops;
@@ -43,7 +97,7 @@
 					void *, bool);
 /* keyinfo.c */
 extern int fscrypt_get_encryption_info(struct inode *);
-extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+extern void fscrypt_put_encryption_info(struct inode *);
 
 /* fname.c */
 extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
@@ -54,14 +108,11 @@
 	kfree(fname->crypto_buf.name);
 }
 
-extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
 extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
 				struct fscrypt_str *);
 extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
 extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
 			const struct fscrypt_str *, struct fscrypt_str *);
-extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
-			struct fscrypt_str *);
 
 #define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE	32
 
@@ -138,11 +189,20 @@
 }
 
 /* bio.c */
-extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_decrypt_bio(struct bio *);
+extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+					struct bio *bio);
 extern void fscrypt_pullback_bio_page(struct page **, bool);
 extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
 				 unsigned int);
 
+/* fscrypt_ice.c */
+extern int fscrypt_using_hardware_encryption(const struct inode *inode);
+extern void fscrypt_set_ice_dun(const struct inode *inode,
+		struct bio *bio, u64 dun);
+extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted);
+
+
 /* hooks.c */
 extern int fscrypt_file_open(struct inode *inode, struct file *filp);
 extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
@@ -152,5 +212,14 @@
 				    struct dentry *new_dentry,
 				    unsigned int flags);
 extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
+extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
+				     unsigned int max_len,
+				     struct fscrypt_str *disk_link);
+extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+				     unsigned int len,
+				     struct fscrypt_str *disk_link);
+extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+				       unsigned int max_size,
+				       struct delayed_call *done);
 
 #endif	/* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f4c0d36..ab7938a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -244,8 +244,16 @@
 	return *this_cpu_ptr(ops->disabled);
 }
 
+#ifdef CONFIG_CFI_CLANG
+/* Use a C stub with the correct type for CFI */
+static inline void ftrace_stub(unsigned long a0, unsigned long a1,
+			       struct ftrace_ops *op, struct pt_regs *regs)
+{
+}
+#else
 extern void ftrace_stub(unsigned long a0, unsigned long a1,
 			struct ftrace_ops *op, struct pt_regs *regs);
+#endif
 
 #else /* !CONFIG_FUNCTION_TRACER */
 /*
diff --git a/include/linux/hid.h b/include/linux/hid.h
index b2ec827..fab65b6 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -801,7 +801,7 @@
 extern void hidinput_disconnect(struct hid_device *);
 
 int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
+int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
 int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
 struct hid_field *hidinput_get_led_field(struct hid_device *hid);
 unsigned int hidinput_count_leds(struct hid_device *hid);
@@ -1106,13 +1106,13 @@
  *
  * @report: the report we want to know the length
  */
-static inline int hid_report_len(struct hid_report *report)
+static inline u32 hid_report_len(struct hid_report *report)
 {
 	/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
 	return ((report->size - 1) >> 3) + 1 + (report->id > 0);
 }
 
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
 		int interrupt);
 
 /* HID quirks API */
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index a4e7ca0..6cfdfca 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -7,7 +7,7 @@
 
 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
 {
-	return !!(vma->vm_flags & VM_HUGETLB);
+	return !!(READ_ONCE(vma->vm_flags) & VM_HUGETLB);
 }
 
 #else
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 8feecd5..7e39719 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -600,7 +600,7 @@
  * Returns true if the skb is tagged with multiple vlan headers, regardless
  * of whether it is hardware accelerated or not.
  */
-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
 {
 	__be16 protocol = skb->protocol;
 
@@ -610,6 +610,9 @@
 		if (likely(!eth_type_vlan(protocol)))
 			return false;
 
+		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+			return false;
+
 		veh = (struct vlan_ethhdr *)skb->data;
 		protocol = veh->h_vlan_encapsulated_proto;
 	}
@@ -627,7 +630,7 @@
  *
  * Returns features without unsafe ones if the skb has multiple tags.
  */
-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
 						    netdev_features_t features)
 {
 	if (skb_vlan_tagged_multi(skb)) {
diff --git a/include/linux/init.h b/include/linux/init.h
index 906ec32..d4b13a4 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -133,6 +133,9 @@
 void __init load_default_modules(void);
 int __init init_rootfs(void);
 
+#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX)
+extern bool rodata_enabled;
+#endif
 #ifdef CONFIG_DEBUG_RODATA
 void mark_rodata_ro(void);
 #endif
diff --git a/include/linux/input/synaptics_dsx_v2_6.h b/include/linux/input/synaptics_dsx_v2_6.h
index 5cd26bb..75d4b77 100644
--- a/include/linux/input/synaptics_dsx_v2_6.h
+++ b/include/linux/input/synaptics_dsx_v2_6.h
@@ -59,6 +59,7 @@
  * @y_flip: y flip flag
  * @swap_axes: swap axes flag
  * @resume_in_workqueue: defer resume function to workqueue
+ * @resume_in_workqueue: do not disable/enable regulators in suspend/resume
  * @irq_gpio: attention interrupt GPIO
  * @irq_on_state: attention interrupt active state
  * @power_gpio: power switch GPIO
@@ -75,6 +76,7 @@
  * @power_delay_ms: delay time to wait after powering up device
  * @reset_delay_ms: delay time to wait after resetting device
  * @reset_active_ms: reset active time
+ * @bus_lpm_cur_uA: low power mode current setting for bus
  * @byte_delay_us: delay time between two bytes of SPI data
  * @block_delay_us: delay time between two SPI transfers
  * @pwr_reg_name: pointer to name of regulator for power control
@@ -88,12 +90,14 @@
 	bool swap_axes;
 	bool resume_in_workqueue;
 	bool wakeup_gesture_en;
+	bool dont_disable_regs;
 	int irq_gpio;
 	int irq_on_state;
 	int power_gpio;
 	int power_on_state;
 	int reset_gpio;
 	int reset_on_state;
+	int bus_lpm_cur_uA;
 	int max_y_for_2d;
 	unsigned long irq_flags;
 	unsigned short i2c_addr;
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 1a1c68e..2c31666 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -1156,6 +1156,7 @@
  * @ipa_if_tlv: number of IPA_IF TLV
  * @ipa_if_aos: number of IPA_IF AOS
  * @ee: Execution environment
+ * @prefetch_mode: Prefetch mode to be used
  */
 struct ipa_gsi_ep_config {
 	int ipa_ep_num;
@@ -1163,6 +1164,7 @@
 	int ipa_if_tlv;
 	int ipa_if_aos;
 	int ee;
+	enum gsi_prefetch_mode prefetch_mode;
 };
 
 /**
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c122409..914eb4a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -451,6 +451,8 @@
 }
 
 void gic_show_pending_irqs(void);
+void gic_v3_dist_save(void);
+void gic_v3_dist_restore(void);
 unsigned int get_gic_highpri_irq(void);
 #endif
 
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 589d14e..c2a0f00 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -1,6 +1,7 @@
 #ifndef _LINUX_JIFFIES_H
 #define _LINUX_JIFFIES_H
 
+#include <linux/cache.h>
 #include <linux/math64.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
@@ -63,19 +64,17 @@
 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
 
-/* some arch's have a small-data section that can be accessed register-relative
- * but that can only take up to, say, 4-byte variables. jiffies being part of
- * an 8-byte variable may not be correctly accessed unless we force the issue
- */
-#define __jiffy_data  __attribute__((section(".data")))
+#ifndef __jiffy_arch_data
+#define __jiffy_arch_data
+#endif
 
 /*
  * The 64-bit value is not atomic - you MUST NOT read it
  * without sampling the sequence number in jiffies_lock.
  * get_jiffies_64() will do this for you as appropriate.
  */
-extern u64 __jiffy_data jiffies_64;
-extern unsigned long volatile __jiffy_data jiffies;
+extern u64 __cacheline_aligned_in_smp jiffies_64;
+extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
 
 #if (BITS_PER_LONG < 64)
 u64 get_jiffies_64(void);
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index d927622..3ffade4 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -9,6 +9,7 @@
 	KCORE_VMALLOC,
 	KCORE_RAM,
 	KCORE_VMEMMAP,
+	KCORE_USER,
 	KCORE_OTHER,
 };
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8c58db2..eb55374 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1070,7 +1070,6 @@
 {
 }
 #endif
-void kvm_arch_irq_routing_update(struct kvm *kvm);
 
 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 {
@@ -1079,6 +1078,8 @@
 
 #endif /* CONFIG_HAVE_KVM_EVENTFD */
 
+void kvm_arch_irq_routing_update(struct kvm *kvm);
+
 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
 {
 	/*
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 66a1f4e..b27315c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -35,6 +35,7 @@
 	const char		*name;
 	enum led_brightness	 brightness;
 	enum led_brightness	 max_brightness;
+	enum led_brightness	 usr_brightness_req;
 	int			 flags;
 
 	/* Lower 16 bits reflect status */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index be65c03..f510c68 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1452,6 +1452,8 @@
 					size_t *len);
 	int (*inode_create)(struct inode *dir, struct dentry *dentry,
 				umode_t mode);
+	int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
+				umode_t mode);
 	int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
 				struct dentry *new_dentry);
 	int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@@ -1750,6 +1752,7 @@
 	struct list_head inode_free_security;
 	struct list_head inode_init_security;
 	struct list_head inode_create;
+	struct list_head inode_post_create;
 	struct list_head inode_link;
 	struct list_head inode_unlink;
 	struct list_head inode_symlink;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index ae8d475..df8e0b0 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -89,14 +89,14 @@
 #ifdef CONFIG_NUMA_BALANCING
 extern bool pmd_trans_migrating(pmd_t pmd);
 extern int migrate_misplaced_page(struct page *page,
-				  struct vm_area_struct *vma, int node);
+				  struct fault_env *fe, int node);
 #else
 static inline bool pmd_trans_migrating(pmd_t pmd)
 {
 	return false;
 }
 static inline int migrate_misplaced_page(struct page *page,
-					 struct vm_area_struct *vma, int node)
+					 struct fault_env *fe, int node)
 {
 	return -EAGAIN; /* can't migrate now */
 }
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b4ee8f6..8e2828d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -470,6 +470,7 @@
 	u16	rate_val;
 };
 
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
 		   enum mlx4_update_qp_attr attr,
 		   struct mlx4_update_qp_params *params);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 5827614..5f3e622 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -750,8 +750,14 @@
 };
 
 enum {
-	CQE_RSS_HTYPE_IP	= 0x3 << 6,
-	CQE_RSS_HTYPE_L4	= 0x3 << 2,
+	CQE_RSS_HTYPE_IP	= 0x3 << 2,
+	/* cqe->rss_hash_type[3:2] - IP destination selected for hash
+	 * (00 = none,  01 = IPv4, 10 = IPv6, 11 = Reserved)
+	 */
+	CQE_RSS_HTYPE_L4	= 0x3 << 6,
+	/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
+	 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
+	 */
 };
 
 enum {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 270f032..9d5d808 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -284,6 +284,9 @@
 #define FAULT_FLAG_USER		0x40	/* The fault originated in userspace */
 #define FAULT_FLAG_REMOTE	0x80	/* faulting for non current tsk/mm */
 #define FAULT_FLAG_INSTRUCTION  0x100	/* The fault was during an instruction fetch */
+/* Speculative fault, not holding mmap_sem */
+#define FAULT_FLAG_SPECULATIVE	0x200
+#define FAULT_FLAG_PREFAULT_OLD 0x400   /* Make faultaround ptes old */
 
 /*
  * vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -300,7 +303,6 @@
 	gfp_t gfp_mask;			/* gfp mask to be used for allocations */
 	pgoff_t pgoff;			/* Logical page offset based on vma */
 	void __user *virtual_address;	/* Faulting virtual address */
-
 	struct page *cow_page;		/* Handler may choose to COW */
 	struct page *page;		/* ->fault handlers should return a
 					 * page here, unless VM_FAULT_NOPAGE
@@ -322,6 +324,7 @@
 struct fault_env {
 	struct vm_area_struct *vma;	/* Target VMA */
 	unsigned long address;		/* Faulting virtual address */
+	unsigned long fault_address;    /* Saved faulting virtual address */
 	unsigned int flags;		/* FAULT_FLAG_xxx flags */
 	pmd_t *pmd;			/* Pointer to pmd entry matching
 					 * the 'address'
@@ -341,6 +344,17 @@
 					 * page table to avoid allocation from
 					 * atomic context.
 					 */
+	/*
+	 * These entries are required when handling speculative page fault.
+	 * This way the page handling is done using consistent field values.
+	 */
+	unsigned long vma_flags;
+	pgprot_t vma_page_prot;
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+	unsigned int sequence;
+	pmd_t orig_pmd;			/* value of PMD at the time of fault */
+	pte_t orig_pte;                 /* Value of PTE at the time of fault */
+#endif
 };
 
 /*
@@ -623,9 +637,9 @@
  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
  * that do not have writing enabled, when used by access_process_vm.
  */
-static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+static inline pte_t maybe_mkwrite(pte_t pte, unsigned long vma_flags)
 {
-	if (likely(vma->vm_flags & VM_WRITE))
+	if (likely(vma_flags & VM_WRITE))
 		pte = pte_mkwrite(pte);
 	return pte;
 }
@@ -1118,6 +1132,7 @@
 #define VM_FAULT_DAX_LOCKED 0x1000	/* ->fault has locked DAX entry */
 
 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
+#define VM_FAULT_PTNOTSAME 0x4000	/* Page table entries have changed */
 
 #define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
 			 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
@@ -1167,8 +1182,23 @@
 	pgoff_t last_index;			/* Highest page->index to unmap */
 };
 
-struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
-		pte_t pte);
+struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+			      pte_t pte, unsigned long vma_flags);
+static inline struct page *vm_normal_page(struct vm_area_struct *vma,
+		unsigned long addr, pte_t pte)
+{
+	return __vm_normal_page(vma, addr, pte, vma->vm_flags);
+}
+
+static inline void INIT_VMA(struct vm_area_struct *vma)
+{
+	INIT_LIST_HEAD(&vma->anon_vma_chain);
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+	seqcount_init(&vma->vm_sequence);
+	atomic_set(&vma->vm_ref_count, 1);
+#endif
+}
+
 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 				pmd_t pmd);
 
@@ -1238,6 +1268,47 @@
 	unmap_mapping_range(mapping, holebegin, holelen, 0);
 }
 
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+static inline void vm_write_begin(struct vm_area_struct *vma)
+{
+	write_seqcount_begin(&vma->vm_sequence);
+}
+static inline void vm_write_begin_nested(struct vm_area_struct *vma,
+					 int subclass)
+{
+	write_seqcount_begin_nested(&vma->vm_sequence, subclass);
+}
+static inline void vm_write_end(struct vm_area_struct *vma)
+{
+	write_seqcount_end(&vma->vm_sequence);
+}
+static inline void vm_raw_write_begin(struct vm_area_struct *vma)
+{
+	raw_write_seqcount_begin(&vma->vm_sequence);
+}
+static inline void vm_raw_write_end(struct vm_area_struct *vma)
+{
+	raw_write_seqcount_end(&vma->vm_sequence);
+}
+#else
+static inline void vm_write_begin(struct vm_area_struct *vma)
+{
+}
+static inline void vm_write_begin_nested(struct vm_area_struct *vma,
+					 int subclass)
+{
+}
+static inline void vm_write_end(struct vm_area_struct *vma)
+{
+}
+static inline void vm_raw_write_begin(struct vm_area_struct *vma)
+{
+}
+static inline void vm_raw_write_end(struct vm_area_struct *vma)
+{
+}
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
 extern void truncate_pagecache(struct inode *inode, loff_t new);
 extern void truncate_setsize(struct inode *inode, loff_t newsize);
 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -1249,6 +1320,43 @@
 #ifdef CONFIG_MMU
 extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 		unsigned int flags);
+
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+extern int __handle_speculative_fault(struct mm_struct *mm,
+				      unsigned long address,
+				      unsigned int flags,
+				      struct vm_area_struct **vma);
+static inline int handle_speculative_fault(struct mm_struct *mm,
+					   unsigned long address,
+					   unsigned int flags,
+					   struct vm_area_struct **vma)
+{
+	/*
+	 * Try speculative page fault for multithreaded user space task only.
+	 */
+	if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1) {
+		*vma = NULL;
+		return VM_FAULT_RETRY;
+	}
+	return __handle_speculative_fault(mm, address, flags, vma);
+}
+extern bool can_reuse_spf_vma(struct vm_area_struct *vma,
+			      unsigned long address);
+#else
+static inline int handle_speculative_fault(struct mm_struct *mm,
+					   unsigned long address,
+					   unsigned int flags,
+					   struct vm_area_struct **vma)
+{
+	return VM_FAULT_RETRY;
+}
+static inline bool can_reuse_spf_vma(struct vm_area_struct *vma,
+				     unsigned long address)
+{
+	return false;
+}
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
 			    unsigned long address, unsigned int fault_flags,
 			    bool *unlocked);
@@ -1957,16 +2065,29 @@
 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
-	struct vm_area_struct *expand);
+	struct vm_area_struct *expand, bool keep_locked);
 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
 	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
 {
-	return __vma_adjust(vma, start, end, pgoff, insert, NULL);
+	return __vma_adjust(vma, start, end, pgoff, insert, NULL, false);
 }
-extern struct vm_area_struct *vma_merge(struct mm_struct *,
+
+extern struct vm_area_struct *__vma_merge(struct mm_struct *mm,
 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
-	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-	struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
+	unsigned long vm_flags, struct anon_vma *anon, struct file *file,
+	pgoff_t pgoff, struct mempolicy *mpol, struct vm_userfaultfd_ctx uff,
+	const char __user *user, bool keep_locked);
+
+static inline struct vm_area_struct *vma_merge(struct mm_struct *mm,
+	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
+	unsigned long vm_flags, struct anon_vma *anon, struct file *file,
+	pgoff_t off, struct mempolicy *pol, struct vm_userfaultfd_ctx uff,
+	const char __user *user)
+{
+	return __vma_merge(mm, prev, addr, end, vm_flags, anon, file, off,
+			   pol, uff, user, false);
+}
+
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int split_vma(struct mm_struct *,
 	struct vm_area_struct *, unsigned long addr, int new_below);
@@ -2246,6 +2367,7 @@
 #define FOLL_MLOCK	0x1000	/* lock present pages */
 #define FOLL_REMOTE	0x2000	/* we are working on non-current tsk/mm */
 #define FOLL_COW	0x4000	/* internal GUP flag */
+#define FOLL_ANON	0x8000	/* don't do file mappings */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 			void *data);
@@ -2464,6 +2586,8 @@
 static inline void setup_nr_node_ids(void) {}
 #endif
 
+extern int want_old_faultaround_pte;
+
 #ifdef CONFIG_PROCESS_RECLAIM
 struct reclaim_param {
 	struct vm_area_struct *vma;
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 41d376e..e030a68 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -50,6 +50,13 @@
 	list_add(&page->lru, &lruvec->lists[lru]);
 }
 
+static __always_inline void add_page_to_lru_list_tail(struct page *page,
+				struct lruvec *lruvec, enum lru_list lru)
+{
+	update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+	list_add_tail(&page->lru, &lruvec->lists[lru]);
+}
+
 static __always_inline void del_page_from_lru_list(struct page *page,
 				struct lruvec *lruvec, enum lru_list lru)
 {
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5942478..cb2cc30 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -365,6 +365,10 @@
 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
 #endif
 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+	seqcount_t vm_sequence;
+	atomic_t vm_ref_count;		/* see vma_get(), vma_put() */
+#endif
 };
 
 struct core_thread {
@@ -403,6 +407,9 @@
 struct mm_struct {
 	struct vm_area_struct *mmap;		/* list of VMAs */
 	struct rb_root mm_rb;
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+	rwlock_t mm_rb_lock;
+#endif
 	u32 vmacache_seqnum;                   /* per-thread vmacache */
 #ifdef CONFIG_MMU
 	unsigned long (*get_unmapped_area) (struct file *filp,
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7228bcd..149b718 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -652,6 +652,8 @@
 	void *cmdq_private;
 	struct mmc_request	*err_mrq;
 
+	bool inlinecrypt_support;  /* Inline encryption support */
+
 	atomic_t rpmb_req_pending;
 	struct mutex		rpmb_req_mutex;
 	unsigned long		private[0] ____cacheline_aligned;
@@ -692,6 +694,7 @@
 #define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & \
 				MMC_BUSRESUME_MANUAL_RESUME)
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
 {
 	if (manual)
@@ -699,6 +702,11 @@
 	else
 		host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
 }
+#else
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+}
+#endif
 
 extern int mmc_resume_bus(struct mmc_host *host);
 
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0c28c28..815d0f4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -245,8 +245,6 @@
 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
 
-/* Isolate clean file */
-#define ISOLATE_CLEAN		((__force isolate_mode_t)0x1)
 /* Isolate unmapped file */
 #define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x2)
 /* Isolate for asynchronous migration */
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 6e0b439..79e7daa 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
 #ifndef MSM_GSI_H
 #define MSM_GSI_H
 #include <linux/types.h>
+#include <linux/interrupt.h>
 
 enum gsi_ver {
 	GSI_VER_ERR = 0,
@@ -82,6 +83,9 @@
  * @irq:        IRQ number
  * @phys_addr:  physical address of GSI block
  * @size:       register size of GSI block
+ * @emulator_intcntrlr_addr: the location of emulator's interrupt control block
+ * @emulator_intcntrlr_size: the sise of emulator_intcntrlr_addr
+ * @emulator_intcntrlr_client_isr: client's isr. Called by the emulator's isr
  * @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits
  * @mhi_er_id_limits: MHI event ring start and end ids
  * @notify_cb:  general notification callback
@@ -107,6 +111,9 @@
 	unsigned int irq;
 	phys_addr_t phys_addr;
 	unsigned long size;
+	phys_addr_t emulator_intcntrlr_addr;
+	unsigned long emulator_intcntrlr_size;
+	irq_handler_t emulator_intcntrlr_client_isr;
 	bool mhi_er_id_limits_valid;
 	uint32_t mhi_er_id_limits[2];
 	void (*notify_cb)(struct gsi_per_notify *notify);
@@ -219,6 +226,11 @@
 	GSI_TWO_PREFETCH_SEG = 0x1
 };
 
+enum gsi_prefetch_mode {
+	GSI_USE_PREFETCH_BUFS = 0x0,
+	GSI_ESCAPE_BUF_ONLY = 0x1
+};
+
 enum gsi_chan_evt {
 	GSI_CHAN_EVT_INVALID = 0x0,
 	GSI_CHAN_EVT_SUCCESS = 0x1,
@@ -357,6 +369,7 @@
 	enum gsi_chan_use_db_eng use_db_eng;
 	enum gsi_max_prefetch max_prefetch;
 	uint8_t low_weight;
+	enum gsi_prefetch_mode prefetch_mode;
 	void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
 	void (*err_cb)(struct gsi_chan_err_notify *notify);
 	void *chan_user_data;
@@ -841,6 +854,18 @@
 		union __packed gsi_channel_scratch val);
 
 /**
+ * gsi_read_channel_scratch - Peripheral should call this function to
+ * read the scratch area of the channel context
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ *
+ * @Return gsi_status
+ */
+int gsi_read_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch *ch_scratch);
+
+/**
  * gsi_start_channel - Peripheral should call this function to
  * start a channel i.e put into running state
  *
@@ -1098,6 +1123,7 @@
  * gsi_alloc_channel (for as many channels as needed; channels can have
  * no event ring, an exclusive event ring or a shared event ring)
  * gsi_write_channel_scratch
+ * gsi_read_channel_scratch
  * gsi_start_channel
  * gsi_queue_xfer/gsi_start_xfer
  * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
@@ -1182,6 +1208,12 @@
 	return -GSI_STATUS_UNSUPPORTED_OP;
 }
 
+static inline int gsi_read_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch *ch_scratch)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
 static inline int gsi_start_channel(unsigned long chan_hdl)
 {
 	return -GSI_STATUS_UNSUPPORTED_OP;
@@ -1279,7 +1311,8 @@
 	return -GSI_STATUS_UNSUPPORTED_OP;
 }
 
-static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
+static inline int gsi_enable_fw(
+	phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
 {
 	return -GSI_STATUS_UNSUPPORTED_OP;
 }
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa45..3529683 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@
 	unsigned int write_suspended:1;
 	unsigned int erase_suspended:1;
 	unsigned long in_progress_block_addr;
+	unsigned long in_progress_block_mask;
 
 	struct mutex mutex;
 	wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 9bfeb88..69111fa 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -254,6 +254,8 @@
 bool xt_find_jump_offset(const unsigned int *offsets,
 			 unsigned int target, unsigned int size);
 
+int xt_check_proc_name(const char *name, unsigned int size);
+
 int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
 		   bool inv_proto);
 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index e791ebc..0c5ef54 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -7,6 +7,8 @@
 #define _LINUX_NOSPEC_H
 #include <asm/barrier.h>
 
+struct task_struct;
+
 /**
  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
  * @index: array element index
@@ -55,4 +57,12 @@
 									\
 	(typeof(_i)) (_i & _mask);					\
 })
+
+/* Speculation control prctl */
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+			     unsigned long ctrl);
+/* Speculation control for seccomp enforced mitigation */
+void arch_seccomp_spec_mitigate(struct task_struct *task);
+
 #endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 299aeb1..e44e9a3 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -39,7 +39,9 @@
 	struct property *next;
 	unsigned long _flags;
 	unsigned int unique_id;
+#if defined(CONFIG_OF_KOBJ)
 	struct bin_attribute attr;
+#endif
 };
 
 #if defined(CONFIG_SPARC)
@@ -58,7 +60,9 @@
 	struct	device_node *parent;
 	struct	device_node *child;
 	struct	device_node *sibling;
+#if defined(CONFIG_OF_KOBJ)
 	struct	kobject kobj;
+#endif
 	unsigned long _flags;
 	void	*data;
 #if defined(CONFIG_SPARC)
@@ -102,21 +106,17 @@
 extern struct kobj_type of_node_ktype;
 static inline void of_node_init(struct device_node *node)
 {
+#if defined(CONFIG_OF_KOBJ)
 	kobject_init(&node->kobj, &of_node_ktype);
+#endif
 	node->fwnode.type = FWNODE_OF;
 }
 
-/* true when node is initialized */
-static inline int of_node_is_initialized(struct device_node *node)
-{
-	return node && node->kobj.state_initialized;
-}
-
-/* true when node is attached (i.e. present on sysfs) */
-static inline int of_node_is_attached(struct device_node *node)
-{
-	return node && node->kobj.state_in_sysfs;
-}
+#if defined(CONFIG_OF_KOBJ)
+#define of_node_kobj(n) (&(n)->kobj)
+#else
+#define of_node_kobj(n) NULL
+#endif
 
 #ifdef CONFIG_OF_DYNAMIC
 extern struct device_node *of_node_get(struct device_node *node);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 791c547..9dbf9c3 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -427,8 +427,8 @@
 	pgoff_t pgoff;
 	if (unlikely(is_vm_hugetlb_page(vma)))
 		return linear_hugepage_index(vma, address);
-	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
-	pgoff += vma->vm_pgoff;
+	pgoff = (address - READ_ONCE(vma->vm_start)) >> PAGE_SHIFT;
+	pgoff += READ_ONCE(vma->vm_pgoff);
 	return pgoff;
 }
 
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1b71179..3652290 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1348,9 +1348,9 @@
 		unsigned int min_vecs, unsigned int max_vecs,
 		unsigned int flags)
 {
-	if (min_vecs > 1)
-		return -EINVAL;
-	return 1;
+	if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
+		return 1;
+	return -ENOSPC;
 }
 static inline void pci_free_irq_vectors(struct pci_dev *dev)
 {
diff --git a/include/linux/pfk.h b/include/linux/pfk.h
new file mode 100644
index 0000000..d7405ea
--- /dev/null
+++ b/include/linux/pfk.h
@@ -0,0 +1,70 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_H_
+#define PFK_H_
+
+#include <linux/bio.h>
+
+struct ice_crypto_setting;
+
+#ifdef CONFIG_PFK
+
+/*
+ * Default key for inline encryption.
+ *
+ * For now only AES-256-XTS is supported, so this is a fixed length.  But if
+ * ever needed, this should be made variable-length with a 'mode' and 'size'.
+ * (Remember to update pfk_allow_merge_bio() when doing so!)
+ */
+#define BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS 64
+
+struct blk_encryption_key {
+	u8 raw[BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS];
+};
+
+int pfk_load_key_start(const struct bio *bio,
+		struct ice_crypto_setting *ice_setting, bool *is_pfe, bool);
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
+int pfk_remove_key(const unsigned char *key, size_t key_size);
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2);
+void pfk_clear_on_reset(void);
+
+#else
+static inline int pfk_load_key_start(const struct bio *bio,
+	struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
+{
+	return -ENODEV;
+}
+
+static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+	return -ENODEV;
+}
+
+static inline int pfk_remove_key(const unsigned char *key, size_t key_size)
+{
+	return -ENODEV;
+}
+
+static inline bool pfk_allow_merge_bio(const struct bio *bio1,
+		const struct bio *bio2)
+{
+	return true;
+}
+
+static inline void pfk_clear_on_reset(void)
+{}
+
+#endif /* CONFIG_PFK */
+
+#endif /* PFK_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 2fb9f03..9412480 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -125,6 +125,18 @@
 	POWER_SUPPLY_PL_NON_STACKED_BATFET,
 };
 
+enum {
+	POWER_SUPPLY_PD_INACTIVE = 0,
+	POWER_SUPPLY_PD_ACTIVE,
+	POWER_SUPPLY_PD_PPS_ACTIVE,
+};
+
+enum {
+	POWER_SUPPLY_QC_CTM_DISABLE = BIT(0),
+	POWER_SUPPLY_QC_THERMAL_BALANCE_DISABLE = BIT(1),
+	POWER_SUPPLY_QC_INOV_THERMAL_DISABLE = BIT(2),
+};
+
 enum power_supply_property {
 	/* Properties of type `int' */
 	POWER_SUPPLY_PROP_STATUS = 0,
@@ -282,6 +294,14 @@
 	POWER_SUPPLY_PROP_ALLOW_HVDCP3,
 	POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
 	POWER_SUPPLY_PROP_MAX_PULSE_ALLOWED,
+	POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE,
+	POWER_SUPPLY_PROP_BATTERY_INFO,
+	POWER_SUPPLY_PROP_BATTERY_INFO_ID,
+	POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION,
+	POWER_SUPPLY_PROP_ESR_ACTUAL,
+	POWER_SUPPLY_PROP_ESR_NOMINAL,
+	POWER_SUPPLY_PROP_SOH,
+	POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index b97bf2e..b326d0a 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -74,6 +74,12 @@
 
 #endif /* CONFIG_PROC_FS */
 
+#ifdef CONFIG_PROC_UID
+extern void proc_register_uid(kuid_t uid);
+#else
+static inline void proc_register_uid(kuid_t uid) {}
+#endif
+
 struct net;
 
 static inline struct proc_dir_entry *proc_net_mkdir(
diff --git a/include/linux/property.h b/include/linux/property.h
index 338f9b7..459337f 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -187,7 +187,7 @@
  */
 
 #define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_)	\
-{								\
+(struct property_entry) {					\
 	.name = _name_,						\
 	.length = ARRAY_SIZE(_val_) * sizeof(_type_),		\
 	.is_array = true,					\
@@ -205,7 +205,7 @@
 	PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_)
 
 #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_)		\
-{								\
+(struct property_entry) {					\
 	.name = _name_,						\
 	.length = ARRAY_SIZE(_val_) * sizeof(const char *),	\
 	.is_array = true,					\
@@ -214,7 +214,7 @@
 }
 
 #define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_)	\
-{							\
+(struct property_entry) {				\
 	.name = _name_,					\
 	.length = sizeof(_type_),			\
 	.is_string = false,				\
@@ -231,7 +231,7 @@
 	PROPERTY_ENTRY_INTEGER(_name_, u64, _val_)
 
 #define PROPERTY_ENTRY_STRING(_name_, _val_)		\
-{							\
+(struct property_entry) {				\
 	.name = _name_,					\
 	.length = sizeof(_val_),			\
 	.is_string = true,				\
@@ -239,7 +239,7 @@
 }
 
 #define PROPERTY_ENTRY_BOOL(_name_)		\
-{						\
+(struct property_entry) {			\
 	.name = _name_,				\
 }
 
diff --git a/include/linux/psci.h b/include/linux/psci.h
index 6306ab1..347077c 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -25,6 +25,17 @@
 int psci_cpu_init_idle(unsigned int cpu);
 int psci_cpu_suspend_enter(unsigned long index);
 
+enum psci_conduit {
+	PSCI_CONDUIT_NONE,
+	PSCI_CONDUIT_SMC,
+	PSCI_CONDUIT_HVC,
+};
+
+enum smccc_version {
+	SMCCC_VERSION_1_0,
+	SMCCC_VERSION_1_1,
+};
+
 struct psci_operations {
 	u32 (*get_version)(void);
 	int (*cpu_suspend)(u32 state, unsigned long entry_point);
@@ -34,6 +45,8 @@
 	int (*affinity_info)(unsigned long target_affinity,
 			unsigned long lowest_affinity_level);
 	int (*migrate_info_type)(void);
+	enum psci_conduit conduit;
+	enum smccc_version smccc_version;
 };
 
 extern struct psci_operations psci_ops;
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 05c6d20..ac377a2 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -351,7 +351,7 @@
 
 static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
 {
-	if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
+	if (size > KMALLOC_MAX_SIZE / sizeof(void *))
 		return NULL;
 	return kcalloc(size, sizeof(void *), gfp);
 }
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index b8dd63a..886b7d6 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -40,6 +40,7 @@
 
 /**
  * struct geni_se_rsc - GENI Serial Engine Resource
+ * @ctrl_dev		Pointer to controller device.
  * @wrapper_dev:	Pointer to the parent QUPv3 core.
  * @se_clk:		Handle to the core serial engine clock.
  * @m_ahb_clk:		Handle to the primary AHB clock.
@@ -53,6 +54,7 @@
  * @geni_gpi_sleep:	Handle to the sleep pinctrl state.
  */
 struct se_geni_rsc {
+	struct device *ctrl_dev;
 	struct device *wrapper_dev;
 	struct clk *se_clk;
 	struct clk *m_ahb_clk;
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
new file mode 100644
index 0000000..600aadf
--- /dev/null
+++ b/include/linux/refcount.h
@@ -0,0 +1,294 @@
+#ifndef _LINUX_REFCOUNT_H
+#define _LINUX_REFCOUNT_H
+
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * It differs in that the counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free issues.
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_REFCOUNT
+#define REFCOUNT_WARN(cond, str) WARN_ON(cond)
+#define __refcount_check	__must_check
+#else
+#define REFCOUNT_WARN(cond, str) (void)(cond)
+#define __refcount_check
+#endif
+
+typedef struct refcount_struct {
+	atomic_t refs;
+} refcount_t;
+
+#define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
+
+static inline void refcount_set(refcount_t *r, unsigned int n)
+{
+	atomic_set(&r->refs, n);
+}
+
+static inline unsigned int refcount_read(const refcount_t *r)
+{
+	return atomic_read(&r->refs);
+}
+
+static inline __refcount_check
+bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (!val)
+			return false;
+
+		if (unlikely(val == UINT_MAX))
+			return true;
+
+		new = val + i;
+		if (new < val)
+			new = UINT_MAX;
+		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+	return true;
+}
+
+static inline void refcount_add(unsigned int i, refcount_t *r)
+{
+	REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_inc_not_zero(refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		new = val + 1;
+
+		if (!val)
+			return false;
+
+		if (unlikely(!new))
+			return true;
+
+		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+	return true;
+}
+
+/*
+ * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object, will WARN when this is not so.
+ */
+static inline void refcount_inc(refcount_t *r)
+{
+	REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (unlikely(val == UINT_MAX))
+			return false;
+
+		new = val - i;
+		if (new > val) {
+			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+			return false;
+		}
+
+		old = atomic_cmpxchg_release(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	return !new;
+}
+
+static inline __refcount_check
+bool refcount_dec_and_test(refcount_t *r)
+{
+	return refcount_sub_and_test(1, r);
+}
+
+/*
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
+static inline
+void refcount_dec(refcount_t *r)
+{
+	REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+}
+
+/*
+ * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
+ * success thereof.
+ *
+ * Like all decrement operations, it provides release memory order and provides
+ * a control dependency.
+ *
+ * It can be used like a try-delete operator; this explicit case is provided
+ * and not cmpxchg in generic, because that would allow implementing unsafe
+ * operations.
+ */
+static inline __refcount_check
+bool refcount_dec_if_one(refcount_t *r)
+{
+	return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+}
+
+/*
+ * No atomic_t counterpart, it decrements unless the value is 1, in which case
+ * it will return false.
+ *
+ * Was often done like: atomic_add_unless(&var, -1, 1)
+ */
+static inline __refcount_check
+bool refcount_dec_not_one(refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (unlikely(val == UINT_MAX))
+			return true;
+
+		if (val == 1)
+			return false;
+
+		new = val - 1;
+		if (new > val) {
+			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+			return true;
+		}
+
+		old = atomic_cmpxchg_release(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	return true;
+}
+
+/*
+ * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
+ * to decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	mutex_lock(lock);
+	if (!refcount_dec_and_test(r)) {
+		mutex_unlock(lock);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	spin_lock(lock);
+	if (!refcount_dec_and_test(r)) {
+		spin_unlock(lock);
+		return false;
+	}
+
+	return true;
+}
+
+#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 71fd2b3..92a297c 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -168,8 +168,16 @@
 		unsigned long, bool);
 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
 			   unsigned long, int);
-void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
-		unsigned long, bool);
+void __page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
+			      unsigned long address, bool compound);
+static inline void page_add_new_anon_rmap(struct page *page,
+					  struct vm_area_struct *vma,
+					  unsigned long address, bool compound)
+{
+	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
+	__page_add_new_anon_rmap(page, vma, address, compound);
+}
+
 void page_add_file_rmap(struct page *, bool);
 void page_remove_rmap(struct page *, bool);
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4b2a1bc..6b03b08 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1664,6 +1664,7 @@
 	u64 dl_deadline;	/* relative deadline of each instance	*/
 	u64 dl_period;		/* separation of two instances (period) */
 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
+	u64 dl_density;		/* dl_runtime / dl_deadline		*/
 
 	/*
 	 * Actual scheduling parameters. Initialized with the values above,
@@ -1913,6 +1914,10 @@
 
 	cputime_t utime, stime, utimescaled, stimescaled;
 	cputime_t gtime;
+#ifdef CONFIG_CPU_FREQ_TIMES
+	u64 *time_in_state;
+	unsigned int max_state;
+#endif
 	struct prev_cputime prev_cputime;
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 	seqcount_t vtime_seqcount;
@@ -2624,6 +2629,8 @@
 #define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
 #define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
 #define PFA_LMK_WAITING  3      /* Lowmemorykiller is waiting */
+#define PFA_SPEC_SSB_DISABLE		4	/* Speculative Store Bypass disabled */
+#define PFA_SPEC_SSB_FORCE_DISABLE	5	/* Speculative Store Bypass force disabled*/
 
 
 #define TASK_PFA_TEST(name, func)					\
@@ -2650,6 +2657,13 @@
 TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
 TASK_PFA_SET(LMK_WAITING, lmk_waiting)
 
+TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
+
+TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+
 /*
  * task->jobctl flags
  */
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index ecc296c..50c460a 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,7 +3,8 @@
 
 #include <uapi/linux/seccomp.h>
 
-#define SECCOMP_FILTER_FLAG_MASK	(SECCOMP_FILTER_FLAG_TSYNC)
+#define SECCOMP_FILTER_FLAG_MASK	(SECCOMP_FILTER_FLAG_TSYNC	| \
+					 SECCOMP_FILTER_FLAG_SPEC_ALLOW)
 
 #ifdef CONFIG_SECCOMP
 
diff --git a/include/linux/security.h b/include/linux/security.h
index 3632428..bfb1b74 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -30,6 +30,7 @@
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
+#include <linux/bio.h>
 
 struct linux_binprm;
 struct cred;
@@ -256,6 +257,8 @@
 				     const struct qstr *qstr, const char **name,
 				     void **value, size_t *len);
 int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+					umode_t mode);
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
 			 struct dentry *new_dentry);
 int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -304,6 +307,7 @@
 				 struct fown_struct *fown, int sig);
 int security_file_receive(struct file *file);
 int security_file_open(struct file *file, const struct cred *cred);
+
 int security_task_create(unsigned long clone_flags);
 void security_task_free(struct task_struct *task);
 int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -637,6 +641,13 @@
 	return 0;
 }
 
+static inline int security_inode_post_create(struct inode *dir,
+					struct dentry *dentry,
+					umode_t mode)
+{
+	return 0;
+}
+
 static inline int security_inode_link(struct dentry *old_dentry,
 				       struct inode *dir,
 				       struct dentry *new_dentry)
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 1a94397..42e3f06 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -348,10 +348,10 @@
 	char	name[16];
 	char	compatible[128];
 	int	(*setup)(struct earlycon_device *, const char *options);
-} __aligned(32);
+};
 
-extern const struct earlycon_id __earlycon_table[];
-extern const struct earlycon_id __earlycon_table_end[];
+extern const struct earlycon_id *__earlycon_table[];
+extern const struct earlycon_id *__earlycon_table_end[];
 
 #if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
 #define EARLYCON_USED_OR_UNUSED	__used
@@ -359,12 +359,19 @@
 #define EARLYCON_USED_OR_UNUSED	__maybe_unused
 #endif
 
-#define OF_EARLYCON_DECLARE(_name, compat, fn)				\
-	static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name)	\
-	     EARLYCON_USED_OR_UNUSED __section(__earlycon_table)	\
+#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id)		\
+	static const struct earlycon_id unique_id			\
+	     EARLYCON_USED_OR_UNUSED __initconst			\
 		= { .name = __stringify(_name),				\
 		    .compatible = compat,				\
-		    .setup = fn  }
+		    .setup = fn  };					\
+	static const struct earlycon_id EARLYCON_USED_OR_UNUSED		\
+		__section(__earlycon_table)				\
+		* const __PASTE(__p, unique_id) = &unique_id
+
+#define OF_EARLYCON_DECLARE(_name, compat, fn)				\
+	_OF_EARLYCON_DECLARE(_name, compat, fn,				\
+			     __UNIQUE_ID(__earlycon_##_name))
 
 #define EARLYCON_DECLARE(_name, fn)	OF_EARLYCON_DECLARE(_name, "", fn)
 
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b63f63e..5308304 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -97,6 +97,23 @@
 	}
 }
 
+static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
+{
+	switch (_NSIG_WORDS) {
+	case 4:
+		return	(set1->sig[3] == set2->sig[3]) &&
+			(set1->sig[2] == set2->sig[2]) &&
+			(set1->sig[1] == set2->sig[1]) &&
+			(set1->sig[0] == set2->sig[0]);
+	case 2:
+		return	(set1->sig[1] == set2->sig[1]) &&
+			(set1->sig[0] == set2->sig[0]);
+	case 1:
+		return	set1->sig[0] == set2->sig[0];
+	}
+	return 0;
+}
+
 #define sigmask(sig)	(1UL << ((sig) - 1))
 
 #ifndef __HAVE_ARCH_SIG_SETOPS
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c73bef9..3fbfe96 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -988,10 +988,10 @@
 				     unsigned int headroom);
 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
 				int newtailroom, gfp_t priority);
-int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
-			int offset, int len);
-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
-		 int len);
+int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+				     int offset, int len);
+int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
+			      int offset, int len);
 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
 int skb_pad(struct sk_buff *skb, int pad);
 #define dev_kfree_skb(a)	consume_skb(a)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 448321b..90d8569 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -378,6 +378,8 @@
 extern void swsusp_set_page_free(struct page *);
 extern void swsusp_unset_page_free(struct page *);
 extern unsigned long get_safe_page(gfp_t gfp_mask);
+extern asmlinkage int swsusp_arch_suspend(void);
+extern asmlinkage int swsusp_arch_resume(void);
 
 extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
 extern int hibernate(void);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 92d1fde..7b488ec 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -313,8 +313,14 @@
 
 extern void add_page_to_unevictable_list(struct page *page);
 
-extern void lru_cache_add_active_or_unevictable(struct page *page,
-						struct vm_area_struct *vma);
+extern void __lru_cache_add_active_or_unevictable(struct page *page,
+						unsigned long vma_flags);
+
+static inline void lru_cache_add_active_or_unevictable(struct page *page,
+						struct vm_area_struct *vma)
+{
+	return __lru_cache_add_active_or_unevictable(page, vma->vm_flags);
+}
 
 /* linux/mm/vmscan.c */
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 00a1f33..9c452f6d 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -518,7 +518,7 @@
 }
 
 static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent,
-						   const unsigned char *name)
+						   const char *name)
 {
 	return kernfs_find_and_get(parent, name);
 }
diff --git a/include/linux/tty.h b/include/linux/tty.h
index a41244f..fe1b862 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -355,6 +355,7 @@
 #define TTY_PTY_LOCK 		16	/* pty private */
 #define TTY_NO_WRITE_SPLIT 	17	/* Preserve write boundaries to driver */
 #define TTY_HUPPED 		18	/* Post driver->hangup() */
+#define TTY_HUPPING		19	/* Hangup in progress */
 #define TTY_LDISC_HALTED	22	/* Line discipline is halted */
 
 /* Values for tty->flow_change */
@@ -656,7 +657,7 @@
 extern int tty_set_ldisc(struct tty_struct *tty, int disc);
 extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
 extern void tty_ldisc_release(struct tty_struct *tty);
-extern void tty_ldisc_init(struct tty_struct *tty);
+extern int __must_check tty_ldisc_init(struct tty_struct *tty);
 extern void tty_ldisc_deinit(struct tty_struct *tty);
 extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
 				 char *f, int count);
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 7f4367b..87e97bb 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -45,6 +45,9 @@
 #define FUNC_SUSPEND_OPT_SUSP_MASK BIT(0)
 #define FUNC_SUSPEND_OPT_RW_EN_MASK BIT(1)
 
+#define FUNC_WAKEUP_CAPABLE_SHIFT  0
+#define FUNC_WAKEUP_ENABLE_SHIFT   1
+
 /*
  * USB function drivers should return USB_GADGET_DELAYED_STATUS if they
  * wish to delay the data/status stages of the control transfer till they
@@ -57,6 +60,9 @@
 /* big enough to hold our biggest descriptor */
 #define USB_COMP_EP0_BUFSIZ	4096
 
+/* OS feature descriptor length <= 4kB */
+#define USB_COMP_EP0_OS_DESC_BUFSIZ	4096
+
 #define USB_MS_TO_HS_INTERVAL(x)	(ilog2((x * 1000 / 125)) + 1)
 struct usb_configuration;
 
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index c40355f..d4f1759 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -44,6 +44,33 @@
 };
 
 /**
+ * Different states involved in USB charger detection.
+ *
+ * USB_CHG_STATE_UNDEFINED	USB charger is not connected or detection
+ *                              process is not yet started.
+ * USB_CHG_STATE_IN_PROGRESS	Charger detection in progress
+ * USB_CHG_STATE_WAIT_FOR_DCD	Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE	Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE	Primary detection is completed (Detects
+ *                              between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE	Secondary detection is completed (Detects
+ *                              between DCP and CDP).
+ * USB_CHG_STATE_DETECTED	USB charger type is determined.
+ * USB_CHG_STATE_QUEUE_SM_WORK	SM work to start/stop gadget is queued.
+ *
+ */
+enum usb_chg_state {
+	USB_CHG_STATE_UNDEFINED = 0,
+	USB_CHG_STATE_IN_PROGRESS,
+	USB_CHG_STATE_WAIT_FOR_DCD,
+	USB_CHG_STATE_DCD_DONE,
+	USB_CHG_STATE_PRIMARY_DONE,
+	USB_CHG_STATE_SECONDARY_DONE,
+	USB_CHG_STATE_DETECTED,
+	USB_CHG_STATE_QUEUE_SM_WORK,
+};
+
+/**
  * USB charger types
  *
  * USB_INVALID_CHARGER	Invalid USB charger.
@@ -126,7 +153,10 @@
  * @async_int: IRQ line on which ASYNC interrupt arrived in LPM.
  * @cur_power: The amount of mA available from downstream port.
  * @otg_wq: Strict order otg workqueue for OTG works (SM/ID/SUSPEND).
+ * @chg_work: Charger detection work.
+ * @chg_state: The state of charger detection process.
  * @chg_type: The type of charger attached.
+ * @chg_detection: True if PHY is doing charger type detection.
  * @bus_perf_client: Bus performance client handle to request BUS bandwidth
  * @host_bus_suspend: indicates host bus suspend or not.
  * @device_bus_suspend: indicates device bus suspend or not.
@@ -149,6 +179,7 @@
  * @max_nominal_system_clk_rate: max freq at which system clock can run in
 		nominal mode.
  * @sdp_check: SDP detection work in case of USB_FLOAT power supply
+ * @notify_charger_work: Charger notification work.
  */
 struct msm_otg {
 	struct usb_phy phy;
@@ -191,8 +222,11 @@
 	int async_int;
 	unsigned int cur_power;
 	struct workqueue_struct *otg_wq;
+	struct delayed_work chg_work;
 	struct delayed_work id_status_work;
+	enum usb_chg_state chg_state;
 	enum usb_chg_type chg_type;
+	bool chg_detection;
 	unsigned int dcd_time;
 	unsigned long caps;
 	uint32_t bus_perf_client;
@@ -278,7 +312,7 @@
 	struct pm_qos_request pm_qos_req_dma;
 	struct delayed_work perf_vote_work;
 	struct delayed_work sdp_check;
-	struct work_struct notify_chg_current_work;
+	struct work_struct notify_charger_work;
 };
 
 struct ci13xxx_platform_data {
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d5eb547..3f8f3505 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -143,6 +143,9 @@
 int virtio_device_restore(struct virtio_device *dev);
 #endif
 
+#define virtio_device_for_each_vq(vdev, vq) \
+	list_for_each_entry(vq, &vdev->vqs, list)
+
 /**
  * virtio_driver - operations for a virtio I/O driver
  * @driver: underlying device driver (populate name and owner).
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index a9c2e4c..4c679792 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -99,6 +99,9 @@
 		VMACACHE_FIND_HITS,
 		VMACACHE_FULL_FLUSHES,
 #endif
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+		SPECULATIVE_PGFAULT,
+#endif
 		NR_VM_EVENT_ITEMS
 };
 
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index b37f8df..05fb7b5 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -63,6 +63,13 @@
 	WCNSS_WLAN_MAX_GPIO,
 };
 
+enum wcnss_log_type {
+	ERR,
+	WARN,
+	INFO,
+	DBG,
+};
+
 #define WCNSS_VBATT_THRESHOLD           3500000
 #define WCNSS_VBATT_GUARD               20000
 #define WCNSS_VBATT_HIGH                3700000
@@ -145,6 +152,7 @@
 void wcnss_snoc_vote(bool clk_chk_en);
 int wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
 				  struct device *dev);
+void wcnss_log(enum wcnss_log_type type, const char *_fmt, ...);
 
 #ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE
 void wcnss_log_debug_regs_on_bite(void);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 9a8eb83..3eed4f1 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -43,7 +43,7 @@
  */
 enum wb_reason {
 	WB_REASON_BACKGROUND,
-	WB_REASON_TRY_TO_FREE_PAGES,
+	WB_REASON_VMSCAN,
 	WB_REASON_SYNC,
 	WB_REASON_PERIODIC,
 	WB_REASON_LAPTOP_TIMER,
diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
index 2c8b651..54cd27b 100644
--- a/include/media/msm_cam_sensor.h
+++ b/include/media/msm_cam_sensor.h
@@ -26,6 +26,21 @@
 	uint16_t size_down;
 };
 
+struct msm_camera_i2c_reg_setting32 {
+	compat_uptr_t reg_setting;
+	uint16_t size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	uint16_t delay;
+};
+
+struct msm_sensor_id_info_t32 {
+	unsigned short sensor_id_reg_addr;
+	unsigned short sensor_id;
+	unsigned short sensor_id_mask;
+	struct msm_camera_i2c_reg_setting32 setting;
+};
+
 struct msm_camera_sensor_slave_info32 {
 	char sensor_name[32];
 	char eeprom_name[32];
@@ -36,7 +51,7 @@
 	uint16_t slave_addr;
 	enum i2c_freq_mode_t i2c_freq_mode;
 	enum msm_camera_i2c_reg_addr_type addr_type;
-	struct msm_sensor_id_info_t sensor_id_info;
+	struct msm_sensor_id_info_t32 sensor_id_info;
 	struct msm_sensor_power_setting_array32 power_setting_array;
 	uint8_t  is_init_params_valid;
 	struct msm_sensor_init_params sensor_init_params;
@@ -128,14 +143,6 @@
 	uint16_t delay;
 };
 
-struct msm_camera_i2c_reg_setting32 {
-	compat_uptr_t reg_setting;
-	uint16_t size;
-	enum msm_camera_i2c_reg_addr_type addr_type;
-	enum msm_camera_i2c_data_type data_type;
-	uint16_t delay;
-};
-
 struct msm_camera_i2c_array_write_config32 {
 	struct msm_camera_i2c_reg_setting32 conf_array;
 	uint16_t slave_addr;
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 95679cb..176dd2e 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -28,6 +28,19 @@
 		struct msm_isp_sof_info sof_info;
 	} u;
 };
+
+struct msm_isp32_event_data32 {
+	struct compat_timeval timestamp;
+	struct compat_timeval mono_timestamp;
+	enum msm_vfe_input_src input_intf;
+	uint32_t frame_id;
+	union {
+		struct msm_isp_stats_event stats;
+		struct msm_isp_buf_event buf_done;
+		struct msm_isp32_error_info error_info;
+	} u;
+};
+
 #endif
 #ifdef CONFIG_MSM_AVTIMER
 struct avtimer_fptr_t {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 554671c..4931787 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -893,7 +893,7 @@
 				     u16 conn_timeout);
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
 				u8 dst_type, u8 sec_level, u16 conn_timeout,
-				u8 role);
+				u8 role, bdaddr_t *direct_rpa);
 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
 				 u8 sec_level, u8 auth_type);
 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
diff --git a/include/net/bonding.h b/include/net/bonding.h
index f32f7ef..7734cc9 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -197,6 +197,7 @@
 	struct   slave __rcu *primary_slave;
 	struct   bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
 	bool     force_primary;
+	u32      nest_level;
 	s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
 	int     (*recv_probe)(const struct sk_buff *, struct bonding *,
 			      struct slave *);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 520c4c3..f656728 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -64,6 +64,9 @@
 /* Indicate backport support for external authentication*/
 #define CFG80211_EXTERNAL_AUTH_SUPPORT 1
 
+/* Indicate backport support for processing user cell base hint */
+#define CFG80211_USER_HINT_CELL_BASE_SELF_MANAGED 1
+
 /**
  * DOC: Introduction
  *
@@ -1011,9 +1014,9 @@
  * @RATE_INFO_BW_160: 160 MHz bandwidth
  */
 enum rate_info_bw {
+	RATE_INFO_BW_20 = 0,
 	RATE_INFO_BW_5,
 	RATE_INFO_BW_10,
-	RATE_INFO_BW_20,
 	RATE_INFO_BW_40,
 	RATE_INFO_BW_80,
 	RATE_INFO_BW_160,
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index c9b3eb7..567017b 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -55,6 +55,7 @@
 #define tw_family		__tw_common.skc_family
 #define tw_state		__tw_common.skc_state
 #define tw_reuse		__tw_common.skc_reuse
+#define tw_reuseport		__tw_common.skc_reuseport
 #define tw_ipv6only		__tw_common.skc_ipv6only
 #define tw_bound_dev_if		__tw_common.skc_bound_dev_if
 #define tw_node			__tw_common.skc_nulls_node
diff --git a/include/net/ip.h b/include/net/ip.h
index ad065a4..b1b5ee0 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -307,6 +307,13 @@
 	return --iph->ttl;
 }
 
+static inline int ip_mtu_locked(const struct dst_entry *dst)
+{
+	const struct rtable *rt = (const struct rtable *)dst;
+
+	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
+}
+
 static inline
 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
 {
@@ -314,7 +321,7 @@
 
 	return  pmtudisc == IP_PMTUDISC_DO ||
 		(pmtudisc == IP_PMTUDISC_WANT &&
-		 !(dst_metric_locked(dst, RTAX_MTU)));
+		 !ip_mtu_locked(dst));
 }
 
 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
@@ -340,7 +347,7 @@
 	struct net *net = dev_net(dst->dev);
 
 	if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
-	    dst_metric_locked(dst, RTAX_MTU) ||
+	    ip_mtu_locked(dst) ||
 	    !forwarding)
 		return dst_mtu(dst);
 
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index aa75828..978387d 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -57,6 +57,7 @@
 	int				fnhe_genid;
 	__be32				fnhe_daddr;
 	u32				fnhe_pmtu;
+	bool				fnhe_mtu_locked;
 	__be32				fnhe_gw;
 	unsigned long			fnhe_expires;
 	struct rtable __rcu		*fnhe_rth_input;
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index fe994d2..df528a6 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -97,13 +97,14 @@
 
 struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
 			  struct proto *prot, int kern);
+void llc_sk_stop_all_timers(struct sock *sk, bool sync);
 void llc_sk_free(struct sock *sk);
 
 void llc_sk_reset(struct sock *sk);
 
 /* Access to a connection */
 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
 void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
 void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
 void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 8fd61bc..920a771 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4091,7 +4091,7 @@
  * The TX headroom reserved by mac80211 for its own tx_status functions.
  * This is enough for the radiotap header.
  */
-#define IEEE80211_TX_STATUS_HEADROOM	14
+#define IEEE80211_TX_STATUS_HEADROOM	ALIGN(14, 4)
 
 /**
  * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 3334dbf..7fc7866 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -6,7 +6,7 @@
 
 static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
 {
-	return remaining >= sizeof(*rtnh) &&
+	return remaining >= (int)sizeof(*rtnh) &&
 	       rtnh->rtnh_len >= sizeof(*rtnh) &&
 	       rtnh->rtnh_len <= remaining;
 }
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index ebc5a2e..f83cacc 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -78,7 +78,7 @@
 	int wiphy_idx;
 	enum nl80211_reg_initiator initiator;
 	enum nl80211_user_reg_hint_type user_reg_hint_type;
-	char alpha2[2];
+	char alpha2[3];
 	enum nl80211_dfs_regions dfs_region;
 	bool intersect;
 	bool processed;
diff --git a/include/net/route.h b/include/net/route.h
index c0874c8..2702b7a 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -63,7 +63,8 @@
 	__be32			rt_gateway;
 
 	/* Miscellaneous cached information */
-	u32			rt_pmtu;
+	u32			rt_mtu_locked:1,
+				rt_pmtu:31;
 
 	u32			rt_table_id;
 
diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
index 8716d59..8fcf890 100644
--- a/include/net/slhc_vj.h
+++ b/include/net/slhc_vj.h
@@ -127,6 +127,7 @@
  */
 struct cstate {
 	byte_t	cs_this;	/* connection id number (xmit) */
+	bool	initialized;	/* true if initialized */
 	struct cstate *next;	/* next in ring (xmit) */
 	struct iphdr cs_ip;	/* ip/tcp hdr from most recent packet */
 	struct tcphdr cs_tcp;
diff --git a/include/net/x25.h b/include/net/x25.h
index c383aa4..6d30a01 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -298,10 +298,10 @@
 
 /* sysctl_net_x25.c */
 #ifdef CONFIG_SYSCTL
-void x25_register_sysctl(void);
+int x25_register_sysctl(void);
 void x25_unregister_sysctl(void);
 #else
-static inline void x25_register_sysctl(void) {};
+static inline int x25_register_sysctl(void) { return 0; };
 static inline void x25_unregister_sysctl(void) {};
 #endif /* CONFIG_SYSCTL */
 
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 818a38f..f888263 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -129,6 +129,8 @@
 	      const unsigned char *dst_dev_addr);
 
 int rdma_addr_size(struct sockaddr *addr);
+int rdma_addr_size_in6(struct sockaddr_in6 *addr);
+int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
 
 int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
 int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7e5430d..08b1b7b 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -662,6 +662,9 @@
 	/* The controller does not support WRITE SAME */
 	unsigned no_write_same:1;
 
+	/* Inline encryption support? */
+	unsigned inlinecrypt_support:1;
+
 	unsigned use_blk_mq:1;
 	unsigned use_cmd_list:1;
 
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index 0cd4c1147..226f915 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -668,6 +668,10 @@
 #define UCC_FAST_GUMR_CTSS	0x00800000
 #define UCC_FAST_GUMR_TXSY	0x00020000
 #define UCC_FAST_GUMR_RSYN	0x00010000
+#define UCC_FAST_GUMR_SYNL_MASK	0x0000C000
+#define UCC_FAST_GUMR_SYNL_16	0x0000C000
+#define UCC_FAST_GUMR_SYNL_8	0x00008000
+#define UCC_FAST_GUMR_SYNL_AUTO	0x00004000
 #define UCC_FAST_GUMR_RTSM	0x00002000
 #define UCC_FAST_GUMR_REVD	0x00000400
 #define UCC_FAST_GUMR_ENR	0x00000020
diff --git a/include/soc/qcom/camera2.h b/include/soc/qcom/camera2.h
index c529aff..5139d22 100644
--- a/include/soc/qcom/camera2.h
+++ b/include/soc/qcom/camera2.h
@@ -47,6 +47,7 @@
 	uint16_t sensor_id_reg_addr;
 	uint16_t sensor_id;
 	uint16_t sensor_id_mask;
+	struct msm_camera_i2c_reg_setting *setting;
 };
 
 struct msm_cam_clk_info {
diff --git a/include/soc/qcom/clock-pll.h b/include/soc/qcom/clock-pll.h
index 1865e3c..dd7e186 100644
--- a/include/soc/qcom/clock-pll.h
+++ b/include/soc/qcom/clock-pll.h
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -174,6 +175,7 @@
 
 extern const struct clk_ops clk_ops_local_pll;
 extern const struct clk_ops clk_ops_sr2_pll;
+extern const struct clk_ops clk_ops_acpu_pll;
 extern const struct clk_ops clk_ops_variable_rate_pll;
 extern const struct clk_ops clk_ops_variable_rate_pll_hwfsm;
 
diff --git a/include/soc/qcom/pm-legacy.h b/include/soc/qcom/pm-legacy.h
new file mode 100644
index 0000000..0ae5d7b
--- /dev/null
+++ b/include/soc/qcom/pm-legacy.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2016, 2018, The Linux Foundation. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_PM_H
+#define __ARCH_ARM_MACH_MSM_PM_H
+
+#include <linux/types.h>
+#include <linux/cpuidle.h>
+#include <asm/smp_plat.h>
+#include <asm/barrier.h>
+#include <dt-bindings/msm/pm.h>
+
+#if !defined(CONFIG_SMP)
+#define msm_secondary_startup NULL
+#elif defined(CONFIG_CPU_V7)
+#define msm_secondary_startup secondary_startup
+#else
+#define msm_secondary_startup secondary_holding_pen
+#endif
+
+enum msm_pm_sleep_mode {
+	MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+	MSM_PM_SLEEP_MODE_RETENTION,
+	MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+	MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+	MSM_PM_SLEEP_MODE_FASTPC,
+	MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND,
+	MSM_PM_SLEEP_MODE_NR,
+	MSM_PM_SLEEP_MODE_NOT_SELECTED,
+};
+
+enum msm_pm_l2_scm_flag {
+	MSM_SCM_L2_ON = 0,
+	MSM_SCM_L2_OFF = 1,
+	MSM_SCM_L2_GDHS = 3,
+	MSM_SCM_L3_PC_OFF = 4,
+};
+
+#define MSM_PM_MODE(cpu, mode_nr)  ((cpu) *MSM_PM_SLEEP_MODE_NR + (mode_nr))
+
+struct msm_pm_time_params {
+	uint32_t latency_us;
+	uint32_t sleep_us;
+	uint32_t next_event_us;
+	uint32_t modified_time_us;
+};
+
+struct msm_pm_sleep_status_data {
+	void __iomem *base_addr;
+	uint32_t mask;
+};
+
+struct latency_level {
+	int affinity_level;
+	int reset_level;
+	const char *level_name;
+};
+
+/**
+ * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ
+ *
+ * @cpu: cpuid of the CPU going down.
+ *
+ * Returns the l2 flush flag enum that is passed down to TZ during power
+ * collaps
+ */
+enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu);
+
+/**
+ * msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed.
+ * @cpu:	CPU on which to check for the sleep mode.
+ * @mode:	Sleep Mode to check for.
+ * @idle:	Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed for a specific CPU.
+ *
+ * Return: 1 for allowed; 0 if not allowed.
+ */
+int msm_pm_sleep_mode_allow(unsigned int cpu, unsigned int mode, bool idle);
+
+/**
+ * msm_pm_sleep_mode_supported() - API to determine if sleep mode is
+ * supported.
+ * @cpu:	CPU on which to check for the sleep mode.
+ * @mode:	Sleep Mode to check for.
+ * @idle:	Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed and enabled for a specific CPU.
+ *
+ * Return: 1 for supported; 0 if not supported.
+ */
+int msm_pm_sleep_mode_supported(unsigned int cpu, unsigned int mode, bool idle);
+
+struct msm_pm_cpr_ops {
+	void (*cpr_suspend)(void);
+	void (*cpr_resume)(void);
+};
+
+void __init msm_pm_set_tz_retention_flag(unsigned int flag);
+void msm_pm_enable_retention(bool enable);
+bool msm_pm_retention_enabled(void);
+bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle);
+static inline void msm_arch_idle(void)
+{
+	/* memory barrier */
+	mb();
+	wfi();
+}
+
+#ifdef CONFIG_MSM_PM_LEGACY
+
+void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
+int msm_pm_wait_cpu_shutdown(unsigned int cpu);
+int __init msm_pm_sleep_status_init(void);
+void lpm_cpu_hotplug_enter(unsigned int cpu);
+s32 msm_cpuidle_get_deep_idle_latency(void);
+int msm_pm_collapse(unsigned long unused);
+
+/**
+ * lpm_get_latency() - API to get latency for a low power mode
+ * @latency_level:	pointer to structure with below elements
+ * affinity_level: The level (CPU/L2/CCI etc.) for which the
+ *	latency is required.
+ *	LPM_AFF_LVL_CPU : CPU level
+ *	LPM_AFF_LVL_L2  : L2 level
+ *	LPM_AFF_LVL_CCI : CCI level
+ * reset_level: Can be passed "LPM_RESET_LVL_GDHS" for
+ *	low power mode with control logic power collapse or
+ *	"LPM_RESET_LVL_PC" for low power mode with control and
+ *	memory logic power collapse or "LPM_RESET_LVL_RET" for
+ *	retention mode.
+ * level_name: Pointer to the cluster name for which the latency
+ *	is required or NULL if the minimum value out of all the
+ *	clusters is to be returned. For CPU level, the name of the
+ *	L2 cluster to be passed. For CCI it has no effect.
+ * @latency:	address to get the latency value.
+ *
+ * latency value will be for the particular cluster or the minimum
+ * value out of all the clusters at the particular affinity_level
+ * and reset_level.
+ *
+ * Return: 0 for success; Error number for failure.
+ */
+int lpm_get_latency(struct latency_level *level, uint32_t *latency);
+
+#else
+static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {}
+static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; }
+static inline int msm_pm_sleep_status_init(void) { return 0; };
+
+static inline void lpm_cpu_hotplug_enter(unsigned int cpu)
+{
+	msm_arch_idle();
+};
+
+static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
+#define msm_pm_collapse NULL
+
+static inline int lpm_get_latency(struct latency_level *level,
+						uint32_t *latency)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+void qcom_cpu_die_legacy(unsigned int cpu);
+int qcom_cpu_kill_legacy(unsigned int cpu);
+int msm_platform_secondary_init(unsigned int cpu);
+#else
+static inline int msm_platform_secondary_init(unsigned int cpu) { return 0; }
+static inline void qcom_cpu_die_legacy(unsigned int cpu) {}
+static inline int qcom_cpu_kill_legacy(unsigned int cpu) { return 0; }
+#endif
+
+enum msm_pm_time_stats_id {
+	MSM_PM_STAT_REQUESTED_IDLE = 0,
+	MSM_PM_STAT_IDLE_SPIN,
+	MSM_PM_STAT_IDLE_WFI,
+	MSM_PM_STAT_RETENTION,
+	MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+	MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
+	MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+	MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+	MSM_PM_STAT_SUSPEND,
+	MSM_PM_STAT_FAILED_SUSPEND,
+	MSM_PM_STAT_NOT_IDLE,
+	MSM_PM_STAT_COUNT
+};
+
+#ifdef CONFIG_MSM_IDLE_STATS
+void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size);
+void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t);
+void msm_pm_l2_add_stat(uint32_t id, int64_t t);
+#else
+static inline void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats,
+		int size) {}
+static inline void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) {}
+static inline void msm_pm_l2_add_stat(uint32_t id, int64_t t) {}
+#endif
+
+void msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops);
+extern dma_addr_t msm_pc_debug_counters_phys;
+#endif  /* __ARCH_ARM_MACH_MSM_PM_H */
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
index a7d4190..d249168 100644
--- a/include/soc/qcom/qseecomi.h
+++ b/include/soc/qcom/qseecomi.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
 #define QSEECOM_KEY_ID_SIZE   32
 
 #define QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD  -19   /*0xFFFFFFED*/
+#define QSEOS_RESULT_FAIL_APP_ALREADY_LOADED  -38   /*0xFFFFFFDA*/
 #define QSEOS_RESULT_FAIL_UNSUPPORTED_CE_PIPE -63
 #define QSEOS_RESULT_FAIL_KS_OP               -64
 #define QSEOS_RESULT_FAIL_KEY_ID_EXISTS       -65
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 5f496a8..d4ebb7d 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -102,16 +102,22 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm845")
 #define early_machine_is_sdm670()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm670")
+#define early_machine_is_sxr1130()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sxr1130")
 #define early_machine_is_qcs605()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcs605")
 #define early_machine_is_sda670()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda670")
 #define early_machine_is_sdm710()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm670")
+#define early_machine_is_sxr1120()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sxr1120")
 #define early_machine_is_msm8953()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8953")
 #define early_machine_is_msm8937()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8937")
+#define early_machine_is_msm8917()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8917")
 #define early_machine_is_mdm9607()      \
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9607")
 #define early_machine_is_sdm450()	\
@@ -169,11 +175,14 @@
 #define early_machine_is_sdxpoorwills()	0
 #define early_machine_is_sdm845()	0
 #define early_machine_is_sdm670()	0
+#define early_machine_is_sxr1130()	0
 #define early_machine_is_qcs605()	0
 #define early_machine_is_sda670()	0
 #define early_machine_is_sdm710()	0
+#define early_machine_is_sxr1120()	0
 #define early_machine_is_msm8953()	0
 #define early_machine_is_msm8937()	0
+#define early_machine_is_msm8917()	0
 #define early_machine_is_sdm450()	0
 #define early_machine_is_sdm632()	0
 #define early_machine_is_sdm439()	0
@@ -243,14 +252,17 @@
 	SDX_CPU_SDXPOORWILLS,
 	MSM_CPU_SDM845,
 	MSM_CPU_SDM670,
+	MSM_CPU_SXR1130,
 	MSM_CPU_QCS605,
 	MSM_CPU_SDA670,
 	MSM_CPU_SDM710,
+	MSM_CPU_SXR1120,
 	MSM_CPU_8953,
 	MSM_CPU_SDM450,
 	MSM_CPU_SDM632,
 	MSM_CPU_SDA632,
 	MSM_CPU_8937,
+	MSM_CPU_8917,
 	MSM_CPU_9607,
 	MSM_CPU_SDM439,
 	MSM_CPU_SDM429,
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
index d0f76d0..a6f38cd 100644
--- a/include/soc/qcom/spm.h
+++ b/include/soc/qcom/spm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
 #if defined(CONFIG_MSM_SPM)
 
 int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm);
+void msm_spm_set_rpm_hs(bool allow_rpm_hs);
 int msm_spm_probe_done(void);
 int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel);
 int msm_spm_get_vdd(unsigned int cpu);
@@ -43,6 +44,8 @@
 struct msm_spm_device *msm_spm_get_device_by_name(const char *name);
 int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
 		unsigned int mode, bool notify_rpm);
+int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm);
 int msm_spm_device_init(void);
 bool msm_spm_is_mode_avail(unsigned int mode);
 void msm_spm_dump_regs(unsigned int cpu);
@@ -80,6 +83,8 @@
 	return -ENODEV;
 }
 
+static inline void msm_spm_set_rpm_hs(bool allow_rpm_hs) {}
+
 static inline int msm_spm_probe_done(void)
 {
 	return -ENODEV;
@@ -114,6 +119,13 @@
 {
 	return -ENODEV;
 }
+
+static inline int msm_spm_config_low_power_mode_addr(
+	struct msm_spm_device *dev, unsigned int mode, bool notify_rpm)
+{
+	return -ENODEV;
+}
+
 static inline struct msm_spm_device *msm_spm_get_device_by_name(
 				const char *name)
 {
diff --git a/include/sound/control.h b/include/sound/control.h
index 21d047f..4142757 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -22,6 +22,7 @@
  *
  */
 
+#include <linux/nospec.h>
 #include <sound/asound.h>
 
 #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
@@ -147,12 +148,14 @@
 
 static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
 {
-	return id->numid - kctl->id.numid;
+	unsigned int ioff = id->numid - kctl->id.numid;
+	return array_index_nospec(ioff, kctl->count);
 }
 
 static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
 {
-	return id->index - kctl->id.index;
+	unsigned int ioff = id->index - kctl->id.index;
+	return array_index_nospec(ioff, kctl->count);
 }
 
 static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
index 760c969..12bbf8c 100644
--- a/include/sound/pcm_oss.h
+++ b/include/sound/pcm_oss.h
@@ -57,6 +57,7 @@
 	char *buffer;				/* vmallocated period */
 	size_t buffer_used;			/* used length from period buffer */
 	struct mutex params_lock;
+	atomic_t rw_ref;		/* concurrent read/write accesses */
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
 	struct snd_pcm_plugin *plugin_first;
 	struct snd_pcm_plugin *plugin_last;
diff --git a/include/trace/events/pagefault.h b/include/trace/events/pagefault.h
new file mode 100644
index 0000000..a9643b3
--- /dev/null
+++ b/include/trace/events/pagefault.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pagefault
+
+#if !defined(_TRACE_PAGEFAULT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PAGEFAULT_H
+
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+
+DECLARE_EVENT_CLASS(spf,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, caller)
+		__field(unsigned long, vm_start)
+		__field(unsigned long, vm_end)
+		__field(unsigned long, address)
+	),
+
+	TP_fast_assign(
+		__entry->caller		= caller;
+		__entry->vm_start	= vma->vm_start;
+		__entry->vm_end		= vma->vm_end;
+		__entry->address	= address;
+	),
+
+	TP_printk("ip:%lx vma:%lx-%lx address:%lx",
+		  __entry->caller, __entry->vm_start, __entry->vm_end,
+		  __entry->address)
+);
+
+DEFINE_EVENT(spf, spf_pte_lock,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_changed,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_noanon,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_notsup,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_access,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_pmd_changed,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+#endif /* _TRACE_PAGEFAULT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/pdc.h b/include/trace/events/pdc.h
index 400e959..fca0548 100644
--- a/include/trace/events/pdc.h
+++ b/include/trace/events/pdc.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -41,7 +41,7 @@
 	),
 
 	TP_printk("%s hwirq:%u pin:%u type:%u enable:%u",
-		__entry->func, __entry->pin, __entry->hwirq, __entry->type,
+		__entry->func, __entry->hwirq, __entry->pin, __entry->type,
 		__entry->enable)
 );
 
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e06df4d..b355ebf 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -713,10 +713,10 @@
 TRACE_EVENT(sched_task_util,
 
 	TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu,
-		 int target_cpu, bool sync, bool need_idle,
+		 int target_cpu, bool sync, bool need_idle, int fastpath,
 		 bool placement_boost, int rtg_cpu, u64 start_t),
 
-	TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle,
+	TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle, fastpath,
 		placement_boost, rtg_cpu, start_t),
 
 	TP_STRUCT__entry(
@@ -729,6 +729,7 @@
 		__field(int, target_cpu			)
 		__field(bool, sync			)
 		__field(bool, need_idle			)
+		__field(int, fastpath			)
 		__field(bool, placement_boost		)
 		__field(int, rtg_cpu			)
 		__field(u64, latency			)
@@ -744,13 +745,14 @@
 		__entry->target_cpu		= target_cpu;
 		__entry->sync			= sync;
 		__entry->need_idle		= need_idle;
+		__entry->fastpath		= fastpath;
 		__entry->placement_boost	= placement_boost;
 		__entry->rtg_cpu		= rtg_cpu;
 		__entry->latency		= (sched_clock() - start_t);
 	),
 
-	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d placement_boost=%d rtg_cpu=%d latency=%llu",
-		__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->next_cpu, __entry->backup_cpu, __entry->target_cpu, __entry->sync, __entry->need_idle, __entry->placement_boost, __entry->rtg_cpu, __entry->latency)
+	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu",
+		__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->next_cpu, __entry->backup_cpu, __entry->target_cpu, __entry->sync, __entry->need_idle,  __entry->fastpath, __entry->placement_boost, __entry->rtg_cpu, __entry->latency)
 );
 
 #endif
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 28c5da6..3411da7 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -125,6 +125,20 @@
 	TP_ARGS(timer)
 );
 
+#define decode_clockid(type)						\
+	__print_symbolic(type,						\
+		{ CLOCK_REALTIME,	"CLOCK_REALTIME"	},	\
+		{ CLOCK_MONOTONIC,	"CLOCK_MONOTONIC"	},	\
+		{ CLOCK_BOOTTIME,	"CLOCK_BOOTTIME"	},	\
+		{ CLOCK_TAI,		"CLOCK_TAI"		})
+
+#define decode_hrtimer_mode(mode)					\
+	__print_symbolic(mode,						\
+		{ HRTIMER_MODE_ABS,		"ABS"		},	\
+		{ HRTIMER_MODE_REL,		"REL"		},	\
+		{ HRTIMER_MODE_ABS_PINNED,	"ABS|PINNED"	},	\
+		{ HRTIMER_MODE_REL_PINNED,	"REL|PINNED"	})
+
 /**
  * hrtimer_init - called when the hrtimer is initialized
  * @hrtimer:	pointer to struct hrtimer
@@ -151,10 +165,8 @@
 	),
 
 	TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
-		  __entry->clockid == CLOCK_REALTIME ?
-			"CLOCK_REALTIME" : "CLOCK_MONOTONIC",
-		  __entry->mode == HRTIMER_MODE_ABS ?
-			"HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
+		  decode_clockid(__entry->clockid),
+		  decode_hrtimer_mode(__entry->mode))
 );
 
 /**
diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h
index c25da0e..54c1272 100644
--- a/include/trace/events/trace_msm_low_power.h
+++ b/include/trace/events/trace_msm_low_power.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -250,6 +250,25 @@
 		__entry->sample, __entry->tmr)
 );
 
+TRACE_EVENT(pre_pc_cb,
+
+	TP_PROTO(int tzflag),
+
+	TP_ARGS(tzflag),
+
+	TP_STRUCT__entry(
+		__field(int, tzflag)
+	),
+
+	TP_fast_assign(
+		__entry->tzflag = tzflag;
+	),
+
+	TP_printk("tzflag:%d",
+		__entry->tzflag
+	)
+);
+
 #endif
 #define TRACE_INCLUDE_FILE trace_msm_low_power
 #include <trace/define_trace.h>
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 2ccd9cc..7bd8783 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -31,7 +31,7 @@
 
 #define WB_WORK_REASON							\
 	EM( WB_REASON_BACKGROUND,		"background")		\
-	EM( WB_REASON_TRY_TO_FREE_PAGES,	"try_to_free_pages")	\
+	EM( WB_REASON_VMSCAN,			"vmscan")		\
 	EM( WB_REASON_SYNC,			"sync")			\
 	EM( WB_REASON_PERIODIC,			"periodic")		\
 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index bce990f..d6be935 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -377,22 +377,6 @@
 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
 
-TRACE_EVENT(xen_mmu_flush_tlb_all,
-	    TP_PROTO(int x),
-	    TP_ARGS(x),
-	    TP_STRUCT__entry(__array(char, x, 0)),
-	    TP_fast_assign((void)x),
-	    TP_printk("%s", "")
-	);
-
-TRACE_EVENT(xen_mmu_flush_tlb,
-	    TP_PROTO(int x),
-	    TP_ARGS(x),
-	    TP_STRUCT__entry(__array(char, x, 0)),
-	    TP_fast_assign((void)x),
-	    TP_printk("%s", "")
-	);
-
 TRACE_EVENT(xen_mmu_flush_tlb_single,
 	    TP_PROTO(unsigned long addr),
 	    TP_ARGS(addr),
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index d0341cd..4b26cba 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -180,8 +180,12 @@
  */
 struct drm_msm_gem_submit_reloc {
 	__u32 submit_offset;  /* in, offset from submit_bo */
-	__u32 or;             /* in, value OR'd with result */
-	__s32 shift;          /* in, amount of left shift (can be negative) */
+#ifdef __cplusplus
+	__u32 or_val;
+#else
+	__u32 or; /* in, value OR'd with result */
+#endif
+	__s32  shift;          /* in, amount of left shift (can be negative) */
 	__u32 reloc_idx;      /* in, index of reloc_bo buffer */
 	__u64 reloc_offset;   /* in, offset from start of reloc_bo */
 };
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 91a31ff..9a781f0 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -63,6 +63,7 @@
 };
 
 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
 
 struct drm_virtgpu_getparam {
 	__u64 param;
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index c9248e5a..5da3634 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -472,6 +472,8 @@
 header-y += v4l2-dv-timings.h
 header-y += v4l2-mediabus.h
 header-y += v4l2-subdev.h
+header-y += msm_vidc_dec.h
+header-y += msm_vidc_enc.h
 header-y += veth.h
 header-y += vfio.h
 header-y += vhost.h
diff --git a/include/uapi/linux/bgcom_interface.h b/include/uapi/linux/bgcom_interface.h
index f18280a..1ee7b87 100644
--- a/include/uapi/linux/bgcom_interface.h
+++ b/include/uapi/linux/bgcom_interface.h
@@ -19,6 +19,7 @@
 #define BGCOM_REG_WRITE  5
 #define BGCOM_SOFT_RESET  6
 #define BGCOM_MODEM_DOWN2_BG  7
+#define BGCOM_TWM_EXIT  8
 #define EXCHANGE_CODE  'V'
 
 struct bg_ui_data {
@@ -57,6 +58,9 @@
 #define BG_SOFT_RESET \
 	_IOWR(EXCHANGE_CODE, BGCOM_SOFT_RESET, \
 	struct bg_ui_data)
+#define BG_TWM_EXIT \
+	_IOWR(EXCHANGE_CODE, BGCOM_TWM_EXIT, \
+	struct bg_ui_data)
 #define BG_MODEM_DOWN2_BG_DONE \
 	_IOWR(EXCHANGE_CODE, BGCOM_MODEM_DOWN2_BG, \
 	struct bg_ui_data)
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index beed138..f85ed3a 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -43,6 +43,27 @@
 /* (1U << 31) is reserved for signed error codes */
 
 /*
+ * Set/Get write life time hints. {GET,SET}_RW_HINT operate on the
+ * underlying inode, while {GET,SET}_FILE_RW_HINT operate only on
+ * the specific file.
+ */
+#define F_GET_RW_HINT		(F_LINUX_SPECIFIC_BASE + 11)
+#define F_SET_RW_HINT		(F_LINUX_SPECIFIC_BASE + 12)
+#define F_GET_FILE_RW_HINT	(F_LINUX_SPECIFIC_BASE + 13)
+#define F_SET_FILE_RW_HINT	(F_LINUX_SPECIFIC_BASE + 14)
+
+/*
+ * Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
+ * used to clear any hints previously set.
+ */
+#define RWF_WRITE_LIFE_NOT_SET	0
+#define RWH_WRITE_LIFE_NONE	1
+#define RWH_WRITE_LIFE_SHORT	2
+#define RWH_WRITE_LIFE_MEDIUM	3
+#define RWH_WRITE_LIFE_LONG	4
+#define RWH_WRITE_LIFE_EXTREME	5
+
+/*
  * Types of directory notifications that may be requested.
  */
 #define DN_ACCESS	0x00000001	/* File accessed */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 12263e4..fcc0e76 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -226,7 +226,6 @@
 #define BLKSECDISCARD _IO(0x12,125)
 #define BLKROTATIONAL _IO(0x12,126)
 #define BLKZEROOUT _IO(0x12,127)
-#define BLKGETSTPART _IO(0x12, 128)
 
 #define BMAP_IOCTL 1		/* obsolete - kept for compatibility */
 #define FIBMAP	   _IO(0x00,1)	/* bmap access */
@@ -273,6 +272,9 @@
 #define FS_ENCRYPTION_MODE_AES_256_CTS		4
 #define FS_ENCRYPTION_MODE_AES_128_CBC		5
 #define FS_ENCRYPTION_MODE_AES_128_CTS		6
+#define FS_ENCRYPTION_MODE_SPECK128_256_XTS	7
+#define FS_ENCRYPTION_MODE_SPECK128_256_CTS	8
+#define FS_ENCRYPTION_MODE_PRIVATE		127
 
 struct fscrypt_policy {
 	__u8 version;
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index cae866f..6475a16 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -29,6 +29,7 @@
  */
 
 #define ETH_ALEN	6		/* Octets in one ethernet addr	 */
+#define ETH_TLEN	2		/* Octets in ethernet type field */
 #define ETH_HLEN	14		/* Total octets in header.	 */
 #define ETH_ZLEN	60		/* Min. octets in frame sans FCS */
 #define ETH_DATA_LEN	1500		/* Max. octets in payload	 */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4ee67cb..05b9bb6 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -870,6 +870,7 @@
 #define KVM_CAP_S390_USER_INSTR0 130
 #define KVM_CAP_MSI_DEVID 131
 #define KVM_CAP_PPC_HTM 132
+#define KVM_CAP_S390_BPB 152
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 1c39daf..be4cb02 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -98,6 +98,8 @@
 #define IPA_IOCTL_CLEANUP                       56
 #define IPA_IOCTL_QUERY_WLAN_CLIENT             57
 #define IPA_IOCTL_GET_VLAN_MODE                 58
+#define IPA_IOCTL_ADD_BRIDGE_VLAN_MAPPING       59
+#define IPA_IOCTL_DEL_BRIDGE_VLAN_MAPPING       60
 
 /**
  * max size of the header to be inserted
@@ -512,7 +514,13 @@
 	IPA_PER_CLIENT_STATS_EVENT_MAX
 };
 
-#define IPA_EVENT_MAX_NUM (IPA_PER_CLIENT_STATS_EVENT_MAX)
+enum ipa_vlan_bridge_event {
+	ADD_BRIDGE_VLAN_MAPPING = IPA_PER_CLIENT_STATS_EVENT_MAX,
+	DEL_BRIDGE_VLAN_MAPPING,
+	BRIDGE_VLAN_MAPPING_MAX
+};
+
+#define IPA_EVENT_MAX_NUM (BRIDGE_VLAN_MAPPING_MAX)
 #define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
 
 /**
@@ -1872,6 +1880,20 @@
 };
 
 /**
+ * struct ipa_ioc_bridge_vlan_mapping_info - vlan to bridge mapping info
+ * @bridge_name: bridge interface name
+ * @vlan_id: vlan ID bridge is mapped to
+ * @bridge_ipv4: bridge interface ipv4 address
+ * @subnet_mask: bridge interface subnet mask
+ */
+struct ipa_ioc_bridge_vlan_mapping_info {
+	char bridge_name[IPA_RESOURCE_NAME_MAX];
+	uint16_t vlan_id;
+	uint32_t bridge_ipv4;
+	uint32_t subnet_mask;
+};
+
+/**
  *   actual IOCTLs supported by IPA driver
  */
 #define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
@@ -2056,6 +2078,15 @@
 #define IPA_IOC_GET_VLAN_MODE _IOWR(IPA_IOC_MAGIC, \
 				IPA_IOCTL_GET_VLAN_MODE, \
 				struct ipa_ioc_get_vlan_mode *)
+
+#define IPA_IOC_ADD_BRIDGE_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_BRIDGE_VLAN_MAPPING, \
+				struct ipa_ioc_bridge_vlan_mapping_info)
+
+#define IPA_IOC_DEL_BRIDGE_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_BRIDGE_VLAN_MAPPING, \
+				struct ipa_ioc_bridge_vlan_mapping_info)
+
 /*
  * unique magic number of the Tethering bridge ioctls
  */
diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h
index 1a2a7e2c..6e4a6d7 100644
--- a/include/uapi/linux/msm_mdp_ext.h
+++ b/include/uapi/linux/msm_mdp_ext.h
@@ -34,9 +34,9 @@
  * To allow proper structure padding for 64bit/32bit target
  */
 #ifdef __LP64
-#define MDP_LAYER_COMMIT_V1_PAD 2
+#define MDP_LAYER_COMMIT_V1_PAD 1
 #else
-#define MDP_LAYER_COMMIT_V1_PAD 3
+#define MDP_LAYER_COMMIT_V1_PAD 2
 #endif
 
 /*
@@ -147,6 +147,9 @@
  */
 #define MDP_COMMIT_AVR_ONE_SHOT_MODE		0x10
 
+/* Flag to update brightness when commit */
+#define MDP_COMMIT_UPDATE_BRIGHTNESS		0x40
+
 /* Flag to enable concurrent writeback for the frame */
 #define MDP_COMMIT_CWB_EN 0x800
 
@@ -485,6 +488,9 @@
 	/* FRC info per device which contains frame count and timestamp */
 	struct mdp_frc_info __user *frc_info;
 
+	/* Backlight level that would update when display commit */
+	uint32_t		bl_level;
+
 	/* 32-bits reserved value for future usage. */
 	uint32_t		reserved[MDP_LAYER_COMMIT_V1_PAD];
 };
diff --git a/include/uapi/linux/msm_vidc_dec.h b/include/uapi/linux/msm_vidc_dec.h
new file mode 100644
index 0000000..46af82b
--- /dev/null
+++ b/include/uapi/linux/msm_vidc_dec.h
@@ -0,0 +1,629 @@
+#ifndef _UAPI_MSM_VIDC_DEC_H_
+#define _UAPI_MSM_VIDC_DEC_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* STATUS CODES */
+/* Base value for status codes */
+#define VDEC_S_BASE	0x40000000
+/* Success */
+#define VDEC_S_SUCCESS	(VDEC_S_BASE)
+/* General failure */
+#define VDEC_S_EFAIL	(VDEC_S_BASE + 1)
+/* Fatal irrecoverable  failure. Need to  tear down session. */
+#define VDEC_S_EFATAL   (VDEC_S_BASE + 2)
+/* Error detected in the passed  parameters */
+#define VDEC_S_EBADPARAM	(VDEC_S_BASE + 3)
+/* Command called in invalid  state. */
+#define VDEC_S_EINVALSTATE	(VDEC_S_BASE + 4)
+ /* Insufficient OS  resources - thread, memory etc. */
+#define VDEC_S_ENOSWRES	(VDEC_S_BASE + 5)
+ /* Insufficient HW resources -  core capacity  maxed  out. */
+#define VDEC_S_ENOHWRES	(VDEC_S_BASE + 6)
+/* Invalid command  called */
+#define VDEC_S_EINVALCMD	(VDEC_S_BASE + 7)
+/* Command timeout. */
+#define VDEC_S_ETIMEOUT	(VDEC_S_BASE + 8)
+/* Pre-requirement is  not met for API. */
+#define VDEC_S_ENOPREREQ	(VDEC_S_BASE + 9)
+/* Command queue is full. */
+#define VDEC_S_ECMDQFULL	(VDEC_S_BASE + 10)
+/* Command is not supported  by this driver */
+#define VDEC_S_ENOTSUPP	(VDEC_S_BASE + 11)
+/* Command is not implemented by thedriver. */
+#define VDEC_S_ENOTIMPL	(VDEC_S_BASE + 12)
+/* Command is not implemented by the driver.  */
+#define VDEC_S_BUSY	(VDEC_S_BASE + 13)
+#define VDEC_S_INPUT_BITSTREAM_ERR (VDEC_S_BASE + 14)
+
+#define VDEC_INTF_VER	1
+#define VDEC_MSG_BASE	0x0000000
+/*
+ *Codes to identify asynchronous message responses and events that driver
+ *wants to communicate to the app.
+ */
+#define VDEC_MSG_INVALID	(VDEC_MSG_BASE + 0)
+#define VDEC_MSG_RESP_INPUT_BUFFER_DONE	(VDEC_MSG_BASE + 1)
+#define VDEC_MSG_RESP_OUTPUT_BUFFER_DONE	(VDEC_MSG_BASE + 2)
+#define VDEC_MSG_RESP_INPUT_FLUSHED	(VDEC_MSG_BASE + 3)
+#define VDEC_MSG_RESP_OUTPUT_FLUSHED	(VDEC_MSG_BASE + 4)
+#define VDEC_MSG_RESP_FLUSH_INPUT_DONE	(VDEC_MSG_BASE + 5)
+#define VDEC_MSG_RESP_FLUSH_OUTPUT_DONE	(VDEC_MSG_BASE + 6)
+#define VDEC_MSG_RESP_START_DONE	(VDEC_MSG_BASE + 7)
+#define VDEC_MSG_RESP_STOP_DONE	(VDEC_MSG_BASE + 8)
+#define VDEC_MSG_RESP_PAUSE_DONE	(VDEC_MSG_BASE + 9)
+#define VDEC_MSG_RESP_RESUME_DONE	(VDEC_MSG_BASE + 10)
+#define VDEC_MSG_RESP_RESOURCE_LOADED	(VDEC_MSG_BASE + 11)
+#define VDEC_EVT_RESOURCES_LOST	(VDEC_MSG_BASE + 12)
+#define VDEC_MSG_EVT_CONFIG_CHANGED	(VDEC_MSG_BASE + 13)
+#define VDEC_MSG_EVT_HW_ERROR	(VDEC_MSG_BASE + 14)
+#define VDEC_MSG_EVT_INFO_CONFIG_CHANGED	(VDEC_MSG_BASE + 15)
+#define VDEC_MSG_EVT_INFO_FIELD_DROPPED	(VDEC_MSG_BASE + 16)
+#define VDEC_MSG_EVT_HW_OVERLOAD	(VDEC_MSG_BASE + 17)
+#define VDEC_MSG_EVT_MAX_CLIENTS	(VDEC_MSG_BASE + 18)
+#define VDEC_MSG_EVT_HW_UNSUPPORTED	(VDEC_MSG_BASE + 19)
+
+/*Buffer flags bits masks.*/
+#define VDEC_BUFFERFLAG_EOS	0x00000001
+#define VDEC_BUFFERFLAG_DECODEONLY	0x00000004
+#define VDEC_BUFFERFLAG_DATACORRUPT	0x00000008
+#define VDEC_BUFFERFLAG_ENDOFFRAME	0x00000010
+#define VDEC_BUFFERFLAG_SYNCFRAME	0x00000020
+#define VDEC_BUFFERFLAG_EXTRADATA	0x00000040
+#define VDEC_BUFFERFLAG_CODECCONFIG	0x00000080
+
+/*Post processing flags bit masks*/
+#define VDEC_EXTRADATA_NONE 0x001
+#define VDEC_EXTRADATA_QP 0x004
+#define VDEC_EXTRADATA_MB_ERROR_MAP 0x008
+#define VDEC_EXTRADATA_SEI 0x010
+#define VDEC_EXTRADATA_VUI 0x020
+#define VDEC_EXTRADATA_VC1 0x040
+
+#define VDEC_EXTRADATA_EXT_DATA          0x0800
+#define VDEC_EXTRADATA_USER_DATA         0x1000
+#define VDEC_EXTRADATA_EXT_BUFFER        0x2000
+
+#define VDEC_CMDBASE	0x800
+#define VDEC_CMD_SET_INTF_VERSION	(VDEC_CMDBASE)
+
+#define VDEC_IOCTL_MAGIC 'v'
+
+struct vdec_ioctl_msg {
+	void __user *in;
+	void __user *out;
+};
+
+/*
+ * CMD params: InputParam:enum vdec_codec
+ * OutputParam: struct vdec_profile_level
+ */
+#define VDEC_IOCTL_GET_PROFILE_LEVEL_SUPPORTED \
+	_IOWR(VDEC_IOCTL_MAGIC, 0, struct vdec_ioctl_msg)
+
+/*
+ * CMD params:InputParam: NULL
+ * OutputParam: uint32_t(bitmask)
+ */
+#define VDEC_IOCTL_GET_INTERLACE_FORMAT \
+	_IOR(VDEC_IOCTL_MAGIC, 1, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: InputParam:  enum vdec_codec
+ * OutputParam: struct vdec_profile_level
+ */
+#define VDEC_IOCTL_GET_CURRENT_PROFILE_LEVEL \
+	_IOWR(VDEC_IOCTL_MAGIC, 2, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam: enum vdec_output_fromat  OutputParam: NULL
+ * GET:  InputParam: NULL OutputParam: enum vdec_output_fromat
+ */
+#define VDEC_IOCTL_SET_OUTPUT_FORMAT \
+	_IOWR(VDEC_IOCTL_MAGIC, 3, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_OUTPUT_FORMAT \
+	_IOWR(VDEC_IOCTL_MAGIC, 4, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam: enum vdec_codec OutputParam: NULL
+ * GET: InputParam: NULL OutputParam: enum vdec_codec
+ */
+#define VDEC_IOCTL_SET_CODEC \
+	_IOW(VDEC_IOCTL_MAGIC, 5, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_CODEC \
+	_IOR(VDEC_IOCTL_MAGIC, 6, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam: struct vdec_picsize outputparam: NULL
+ * GET: InputParam: NULL outputparam: struct vdec_picsize
+ */
+#define VDEC_IOCTL_SET_PICRES \
+	_IOW(VDEC_IOCTL_MAGIC, 7, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_PICRES \
+	_IOR(VDEC_IOCTL_MAGIC, 8, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_EXTRADATA \
+	_IOW(VDEC_IOCTL_MAGIC, 9, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_EXTRADATA \
+	_IOR(VDEC_IOCTL_MAGIC, 10, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_SEQUENCE_HEADER \
+	_IOW(VDEC_IOCTL_MAGIC, 11, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam - vdec_allocatorproperty, OutputParam - NULL
+ * GET: InputParam - NULL, OutputParam - vdec_allocatorproperty
+ */
+#define VDEC_IOCTL_SET_BUFFER_REQ \
+	_IOW(VDEC_IOCTL_MAGIC, 12, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_BUFFER_REQ \
+	_IOR(VDEC_IOCTL_MAGIC, 13, struct vdec_ioctl_msg)
+/* CMD params: InputParam - vdec_buffer, OutputParam - uint8_t** */
+#define VDEC_IOCTL_ALLOCATE_BUFFER \
+	_IOWR(VDEC_IOCTL_MAGIC, 14, struct vdec_ioctl_msg)
+/* CMD params: InputParam - uint8_t *, OutputParam - NULL.*/
+#define VDEC_IOCTL_FREE_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 15, struct vdec_ioctl_msg)
+
+/*CMD params: CMD: InputParam - struct vdec_setbuffer_cmd, OutputParam - NULL*/
+#define VDEC_IOCTL_SET_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 16, struct vdec_ioctl_msg)
+
+/* CMD params: InputParam - struct vdec_fillbuffer_cmd, OutputParam - NULL*/
+#define VDEC_IOCTL_FILL_OUTPUT_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 17, struct vdec_ioctl_msg)
+
+/*CMD params: InputParam - struct vdec_frameinfo , OutputParam - NULL*/
+#define VDEC_IOCTL_DECODE_FRAME \
+	_IOW(VDEC_IOCTL_MAGIC, 18, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_LOAD_RESOURCES _IO(VDEC_IOCTL_MAGIC, 19)
+#define VDEC_IOCTL_CMD_START _IO(VDEC_IOCTL_MAGIC, 20)
+#define VDEC_IOCTL_CMD_STOP _IO(VDEC_IOCTL_MAGIC, 21)
+#define VDEC_IOCTL_CMD_PAUSE _IO(VDEC_IOCTL_MAGIC, 22)
+#define VDEC_IOCTL_CMD_RESUME _IO(VDEC_IOCTL_MAGIC, 23)
+
+/*CMD params: InputParam - enum vdec_bufferflush , OutputParam - NULL */
+#define VDEC_IOCTL_CMD_FLUSH _IOW(VDEC_IOCTL_MAGIC, 24, struct vdec_ioctl_msg)
+
+/* ========================================================
+ * IOCTL for getting asynchronous notification from driver
+ * ========================================================
+ */
+
+/*IOCTL params: InputParam - NULL, OutputParam - struct vdec_msginfo*/
+#define VDEC_IOCTL_GET_NEXT_MSG \
+	_IOR(VDEC_IOCTL_MAGIC, 25, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_STOP_NEXT_MSG _IO(VDEC_IOCTL_MAGIC, 26)
+
+#define VDEC_IOCTL_GET_NUMBER_INSTANCES \
+	_IOR(VDEC_IOCTL_MAGIC, 27, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_PICTURE_ORDER \
+	_IOW(VDEC_IOCTL_MAGIC, 28, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_FRAME_RATE \
+	_IOW(VDEC_IOCTL_MAGIC, 29, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_H264_MV_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 30, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_FREE_H264_MV_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 31, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_MV_BUFFER_SIZE  \
+	_IOR(VDEC_IOCTL_MAGIC, 32, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_IDR_ONLY_DECODING \
+	_IO(VDEC_IOCTL_MAGIC, 33)
+
+#define VDEC_IOCTL_SET_CONT_ON_RECONFIG  \
+	_IO(VDEC_IOCTL_MAGIC, 34)
+
+#define VDEC_IOCTL_SET_DISABLE_DMX \
+	_IOW(VDEC_IOCTL_MAGIC, 35, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_DISABLE_DMX \
+	_IOR(VDEC_IOCTL_MAGIC, 36, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_DISABLE_DMX_SUPPORT \
+	_IOR(VDEC_IOCTL_MAGIC, 37, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_PERF_CLK \
+	_IOR(VDEC_IOCTL_MAGIC, 38, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_META_BUFFERS \
+	_IOW(VDEC_IOCTL_MAGIC, 39, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_FREE_META_BUFFERS \
+	_IO(VDEC_IOCTL_MAGIC, 40)
+
+enum vdec_picture {
+	PICTURE_TYPE_I,
+	PICTURE_TYPE_P,
+	PICTURE_TYPE_B,
+	PICTURE_TYPE_BI,
+	PICTURE_TYPE_SKIP,
+	PICTURE_TYPE_IDR,
+	PICTURE_TYPE_UNKNOWN
+};
+
+enum vdec_buffer {
+	VDEC_BUFFER_TYPE_INPUT,
+	VDEC_BUFFER_TYPE_OUTPUT
+};
+
+struct vdec_allocatorproperty {
+	enum vdec_buffer buffer_type;
+	uint32_t mincount;
+	uint32_t maxcount;
+	uint32_t actualcount;
+	size_t buffer_size;
+	uint32_t alignment;
+	uint32_t buf_poolid;
+	size_t meta_buffer_size;
+};
+
+struct vdec_bufferpayload {
+	void __user *bufferaddr;
+	size_t buffer_len;
+	int pmem_fd;
+	size_t offset;
+	size_t mmaped_size;
+};
+
+struct vdec_setbuffer_cmd {
+	enum vdec_buffer buffer_type;
+	struct vdec_bufferpayload buffer;
+};
+
+struct vdec_fillbuffer_cmd {
+	struct vdec_bufferpayload buffer;
+	void *client_data;
+};
+
+enum vdec_bufferflush {
+	VDEC_FLUSH_TYPE_INPUT,
+	VDEC_FLUSH_TYPE_OUTPUT,
+	VDEC_FLUSH_TYPE_ALL
+};
+
+enum vdec_codec {
+	VDEC_CODECTYPE_H264 = 0x1,
+	VDEC_CODECTYPE_H263 = 0x2,
+	VDEC_CODECTYPE_MPEG4 = 0x3,
+	VDEC_CODECTYPE_DIVX_3 = 0x4,
+	VDEC_CODECTYPE_DIVX_4 = 0x5,
+	VDEC_CODECTYPE_DIVX_5 = 0x6,
+	VDEC_CODECTYPE_DIVX_6 = 0x7,
+	VDEC_CODECTYPE_XVID = 0x8,
+	VDEC_CODECTYPE_MPEG1 = 0x9,
+	VDEC_CODECTYPE_MPEG2 = 0xa,
+	VDEC_CODECTYPE_VC1 = 0xb,
+	VDEC_CODECTYPE_VC1_RCV = 0xc,
+	VDEC_CODECTYPE_HEVC = 0xd,
+	VDEC_CODECTYPE_MVC = 0xe,
+	VDEC_CODECTYPE_VP8 = 0xf,
+	VDEC_CODECTYPE_VP9 = 0x10,
+};
+
+enum vdec_mpeg2_profile {
+	VDEC_MPEG2ProfileSimple = 0x1,
+	VDEC_MPEG2ProfileMain = 0x2,
+	VDEC_MPEG2Profile422 = 0x4,
+	VDEC_MPEG2ProfileSNR = 0x8,
+	VDEC_MPEG2ProfileSpatial = 0x10,
+	VDEC_MPEG2ProfileHigh = 0x20,
+	VDEC_MPEG2ProfileKhronosExtensions = 0x6F000000,
+	VDEC_MPEG2ProfileVendorStartUnused = 0x7F000000,
+	VDEC_MPEG2ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg2_level {
+
+	VDEC_MPEG2LevelLL = 0x1,
+	VDEC_MPEG2LevelML = 0x2,
+	VDEC_MPEG2LevelH14 = 0x4,
+	VDEC_MPEG2LevelHL = 0x8,
+	VDEC_MPEG2LevelKhronosExtensions = 0x6F000000,
+	VDEC_MPEG2LevelVendorStartUnused = 0x7F000000,
+	VDEC_MPEG2LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg4_profile {
+	VDEC_MPEG4ProfileSimple = 0x01,
+	VDEC_MPEG4ProfileSimpleScalable = 0x02,
+	VDEC_MPEG4ProfileCore = 0x04,
+	VDEC_MPEG4ProfileMain = 0x08,
+	VDEC_MPEG4ProfileNbit = 0x10,
+	VDEC_MPEG4ProfileScalableTexture = 0x20,
+	VDEC_MPEG4ProfileSimpleFace = 0x40,
+	VDEC_MPEG4ProfileSimpleFBA = 0x80,
+	VDEC_MPEG4ProfileBasicAnimated = 0x100,
+	VDEC_MPEG4ProfileHybrid = 0x200,
+	VDEC_MPEG4ProfileAdvancedRealTime = 0x400,
+	VDEC_MPEG4ProfileCoreScalable = 0x800,
+	VDEC_MPEG4ProfileAdvancedCoding = 0x1000,
+	VDEC_MPEG4ProfileAdvancedCore = 0x2000,
+	VDEC_MPEG4ProfileAdvancedScalable = 0x4000,
+	VDEC_MPEG4ProfileAdvancedSimple = 0x8000,
+	VDEC_MPEG4ProfileKhronosExtensions = 0x6F000000,
+	VDEC_MPEG4ProfileVendorStartUnused = 0x7F000000,
+	VDEC_MPEG4ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg4_level {
+	VDEC_MPEG4Level0 = 0x01,
+	VDEC_MPEG4Level0b = 0x02,
+	VDEC_MPEG4Level1 = 0x04,
+	VDEC_MPEG4Level2 = 0x08,
+	VDEC_MPEG4Level3 = 0x10,
+	VDEC_MPEG4Level4 = 0x20,
+	VDEC_MPEG4Level4a = 0x40,
+	VDEC_MPEG4Level5 = 0x80,
+	VDEC_MPEG4LevelKhronosExtensions = 0x6F000000,
+	VDEC_MPEG4LevelVendorStartUnused = 0x7F000000,
+	VDEC_MPEG4LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_avc_profile {
+	VDEC_AVCProfileBaseline = 0x01,
+	VDEC_AVCProfileMain = 0x02,
+	VDEC_AVCProfileExtended = 0x04,
+	VDEC_AVCProfileHigh = 0x08,
+	VDEC_AVCProfileHigh10 = 0x10,
+	VDEC_AVCProfileHigh422 = 0x20,
+	VDEC_AVCProfileHigh444 = 0x40,
+	VDEC_AVCProfileKhronosExtensions = 0x6F000000,
+	VDEC_AVCProfileVendorStartUnused = 0x7F000000,
+	VDEC_AVCProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_avc_level {
+	VDEC_AVCLevel1 = 0x01,
+	VDEC_AVCLevel1b = 0x02,
+	VDEC_AVCLevel11 = 0x04,
+	VDEC_AVCLevel12 = 0x08,
+	VDEC_AVCLevel13 = 0x10,
+	VDEC_AVCLevel2 = 0x20,
+	VDEC_AVCLevel21 = 0x40,
+	VDEC_AVCLevel22 = 0x80,
+	VDEC_AVCLevel3 = 0x100,
+	VDEC_AVCLevel31 = 0x200,
+	VDEC_AVCLevel32 = 0x400,
+	VDEC_AVCLevel4 = 0x800,
+	VDEC_AVCLevel41 = 0x1000,
+	VDEC_AVCLevel42 = 0x2000,
+	VDEC_AVCLevel5 = 0x4000,
+	VDEC_AVCLevel51 = 0x8000,
+	VDEC_AVCLevelKhronosExtensions = 0x6F000000,
+	VDEC_AVCLevelVendorStartUnused = 0x7F000000,
+	VDEC_AVCLevelMax = 0x7FFFFFFF
+};
+
+enum vdec_divx_profile {
+	VDEC_DIVXProfile_qMobile = 0x01,
+	VDEC_DIVXProfile_Mobile = 0x02,
+	VDEC_DIVXProfile_HD = 0x04,
+	VDEC_DIVXProfile_Handheld = 0x08,
+	VDEC_DIVXProfile_Portable = 0x10,
+	VDEC_DIVXProfile_HomeTheater = 0x20
+};
+
+enum vdec_xvid_profile {
+	VDEC_XVIDProfile_Simple = 0x1,
+	VDEC_XVIDProfile_Advanced_Realtime_Simple = 0x2,
+	VDEC_XVIDProfile_Advanced_Simple = 0x4
+};
+
+enum vdec_xvid_level {
+	VDEC_XVID_LEVEL_S_L0 = 0x1,
+	VDEC_XVID_LEVEL_S_L1 = 0x2,
+	VDEC_XVID_LEVEL_S_L2 = 0x4,
+	VDEC_XVID_LEVEL_S_L3 = 0x8,
+	VDEC_XVID_LEVEL_ARTS_L1 = 0x10,
+	VDEC_XVID_LEVEL_ARTS_L2 = 0x20,
+	VDEC_XVID_LEVEL_ARTS_L3 = 0x40,
+	VDEC_XVID_LEVEL_ARTS_L4 = 0x80,
+	VDEC_XVID_LEVEL_AS_L0 = 0x100,
+	VDEC_XVID_LEVEL_AS_L1 = 0x200,
+	VDEC_XVID_LEVEL_AS_L2 = 0x400,
+	VDEC_XVID_LEVEL_AS_L3 = 0x800,
+	VDEC_XVID_LEVEL_AS_L4 = 0x1000
+};
+
+enum vdec_h263profile {
+	VDEC_H263ProfileBaseline = 0x01,
+	VDEC_H263ProfileH320Coding = 0x02,
+	VDEC_H263ProfileBackwardCompatible = 0x04,
+	VDEC_H263ProfileISWV2 = 0x08,
+	VDEC_H263ProfileISWV3 = 0x10,
+	VDEC_H263ProfileHighCompression = 0x20,
+	VDEC_H263ProfileInternet = 0x40,
+	VDEC_H263ProfileInterlace = 0x80,
+	VDEC_H263ProfileHighLatency = 0x100,
+	VDEC_H263ProfileKhronosExtensions = 0x6F000000,
+	VDEC_H263ProfileVendorStartUnused = 0x7F000000,
+	VDEC_H263ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_h263level {
+	VDEC_H263Level10 = 0x01,
+	VDEC_H263Level20 = 0x02,
+	VDEC_H263Level30 = 0x04,
+	VDEC_H263Level40 = 0x08,
+	VDEC_H263Level45 = 0x10,
+	VDEC_H263Level50 = 0x20,
+	VDEC_H263Level60 = 0x40,
+	VDEC_H263Level70 = 0x80,
+	VDEC_H263LevelKhronosExtensions = 0x6F000000,
+	VDEC_H263LevelVendorStartUnused = 0x7F000000,
+	VDEC_H263LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_wmv_format {
+	VDEC_WMVFormatUnused = 0x01,
+	VDEC_WMVFormat7 = 0x02,
+	VDEC_WMVFormat8 = 0x04,
+	VDEC_WMVFormat9 = 0x08,
+	VDEC_WMFFormatKhronosExtensions = 0x6F000000,
+	VDEC_WMFFormatVendorStartUnused = 0x7F000000,
+	VDEC_WMVFormatMax = 0x7FFFFFFF
+};
+
+enum vdec_vc1_profile {
+	VDEC_VC1ProfileSimple = 0x1,
+	VDEC_VC1ProfileMain = 0x2,
+	VDEC_VC1ProfileAdvanced = 0x4
+};
+
+enum vdec_vc1_level {
+	VDEC_VC1_LEVEL_S_Low = 0x1,
+	VDEC_VC1_LEVEL_S_Medium = 0x2,
+	VDEC_VC1_LEVEL_M_Low = 0x4,
+	VDEC_VC1_LEVEL_M_Medium = 0x8,
+	VDEC_VC1_LEVEL_M_High = 0x10,
+	VDEC_VC1_LEVEL_A_L0 = 0x20,
+	VDEC_VC1_LEVEL_A_L1 = 0x40,
+	VDEC_VC1_LEVEL_A_L2 = 0x80,
+	VDEC_VC1_LEVEL_A_L3 = 0x100,
+	VDEC_VC1_LEVEL_A_L4 = 0x200
+};
+
+struct vdec_profile_level {
+	uint32_t profiles;
+	uint32_t levels;
+};
+
+enum vdec_interlaced_format {
+	VDEC_InterlaceFrameProgressive = 0x1,
+	VDEC_InterlaceInterleaveFrameTopFieldFirst = 0x2,
+	VDEC_InterlaceInterleaveFrameBottomFieldFirst = 0x4
+};
+
+#define VDEC_YUV_FORMAT_NV12_TP10_UBWC \
+	VDEC_YUV_FORMAT_NV12_TP10_UBWC
+
+enum vdec_output_fromat {
+	VDEC_YUV_FORMAT_NV12 = 0x1,
+	VDEC_YUV_FORMAT_TILE_4x2 = 0x2,
+	VDEC_YUV_FORMAT_NV12_UBWC = 0x3,
+	VDEC_YUV_FORMAT_NV12_TP10_UBWC = 0x4
+};
+
+enum vdec_output_order {
+	VDEC_ORDER_DISPLAY = 0x1,
+	VDEC_ORDER_DECODE = 0x2
+};
+
+struct vdec_picsize {
+	uint32_t frame_width;
+	uint32_t frame_height;
+	uint32_t stride;
+	uint32_t scan_lines;
+};
+
+struct vdec_seqheader {
+	void __user *ptr_seqheader;
+	size_t seq_header_len;
+	int pmem_fd;
+	size_t pmem_offset;
+};
+
+struct vdec_mberror {
+	void __user *ptr_errormap;
+	size_t err_mapsize;
+};
+
+struct vdec_input_frameinfo {
+	void __user *bufferaddr;
+	size_t offset;
+	size_t datalen;
+	uint32_t flags;
+	int64_t timestamp;
+	void *client_data;
+	int pmem_fd;
+	size_t pmem_offset;
+	void __user *desc_addr;
+	uint32_t desc_size;
+};
+
+struct vdec_framesize {
+	uint32_t   left;
+	uint32_t   top;
+	uint32_t   right;
+	uint32_t   bottom;
+};
+
+struct vdec_aspectratioinfo {
+	uint32_t aspect_ratio;
+	uint32_t par_width;
+	uint32_t par_height;
+};
+
+struct vdec_sep_metadatainfo {
+	void __user *metabufaddr;
+	uint32_t size;
+	int fd;
+	int offset;
+	uint32_t buffer_size;
+};
+
+struct vdec_output_frameinfo {
+	void __user *bufferaddr;
+	size_t offset;
+	size_t len;
+	uint32_t flags;
+	int64_t time_stamp;
+	enum vdec_picture pic_type;
+	void *client_data;
+	void *input_frame_clientdata;
+	struct vdec_picsize picsize;
+	struct vdec_framesize framesize;
+	enum vdec_interlaced_format interlaced_format;
+	struct vdec_aspectratioinfo aspect_ratio_info;
+	struct vdec_sep_metadatainfo metadata_info;
+};
+
+union vdec_msgdata {
+	struct vdec_output_frameinfo output_frame;
+	void *input_frame_clientdata;
+};
+
+struct vdec_msginfo {
+	uint32_t status_code;
+	uint32_t msgcode;
+	union vdec_msgdata msgdata;
+	size_t msgdatasize;
+};
+
+struct vdec_framerate {
+	unsigned long fps_denominator;
+	unsigned long fps_numerator;
+};
+
+struct vdec_h264_mv {
+	size_t size;
+	int count;
+	int pmem_fd;
+	int offset;
+};
+
+struct vdec_mv_buff_size {
+	int width;
+	int height;
+	int size;
+	int alignment;
+};
+
+struct vdec_meta_buffers {
+	size_t size;
+	int count;
+	int pmem_fd;
+	int pmem_fd_iommu;
+	int offset;
+};
+
+#endif /* end of macro _VDECDECODER_H_ */
diff --git a/include/uapi/linux/msm_vidc_enc.h b/include/uapi/linux/msm_vidc_enc.h
new file mode 100644
index 0000000..f4f1630
--- /dev/null
+++ b/include/uapi/linux/msm_vidc_enc.h
@@ -0,0 +1,752 @@
+#ifndef _UAPI_MSM_VIDC_ENC_H_
+#define _UAPI_MSM_VIDC_ENC_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/** STATUS CODES*/
+/* Base value for status codes */
+#define VEN_S_BASE	0x00000000
+#define VEN_S_SUCCESS	(VEN_S_BASE)/* Success */
+#define VEN_S_EFAIL	(VEN_S_BASE+1)/* General failure */
+#define VEN_S_EFATAL	(VEN_S_BASE+2)/* Fatal irrecoverable failure*/
+#define VEN_S_EBADPARAM	(VEN_S_BASE+3)/* Error passed parameters*/
+/*Command called in invalid state*/
+#define VEN_S_EINVALSTATE	(VEN_S_BASE+4)
+#define VEN_S_ENOSWRES	(VEN_S_BASE+5)/* Insufficient OS resources*/
+#define VEN_S_ENOHWRES	(VEN_S_BASE+6)/*Insufficient HW resources */
+#define VEN_S_EBUFFREQ	(VEN_S_BASE+7)/* Buffer requirements were not met*/
+#define VEN_S_EINVALCMD	(VEN_S_BASE+8)/* Invalid command called */
+#define VEN_S_ETIMEOUT	(VEN_S_BASE+9)/* Command timeout. */
+/*Re-attempt was made when multiple invocation not supported for API.*/
+#define VEN_S_ENOREATMPT	(VEN_S_BASE+10)
+#define VEN_S_ENOPREREQ	(VEN_S_BASE+11)/*Pre-requirement is not met for API*/
+#define VEN_S_ECMDQFULL	(VEN_S_BASE+12)/*Command queue is full*/
+#define VEN_S_ENOTSUPP	(VEN_S_BASE+13)/*Command not supported*/
+#define VEN_S_ENOTIMPL	(VEN_S_BASE+14)/*Command not implemented.*/
+#define VEN_S_ENOTPMEM	(VEN_S_BASE+15)/*Buffer is not from PMEM*/
+#define VEN_S_EFLUSHED	(VEN_S_BASE+16)/*returned buffer was flushed*/
+#define VEN_S_EINSUFBUF	(VEN_S_BASE+17)/*provided buffer size insufficient*/
+#define VEN_S_ESAMESTATE	(VEN_S_BASE+18)
+#define VEN_S_EINVALTRANS	(VEN_S_BASE+19)
+
+#define VEN_INTF_VER			 1
+
+/*Asynchronous messages from driver*/
+#define VEN_MSG_INDICATION	0
+#define VEN_MSG_INPUT_BUFFER_DONE	1
+#define VEN_MSG_OUTPUT_BUFFER_DONE	2
+#define VEN_MSG_NEED_OUTPUT_BUFFER	3
+#define VEN_MSG_FLUSH_INPUT_DONE	4
+#define VEN_MSG_FLUSH_OUTPUT_DONE	5
+#define VEN_MSG_START	6
+#define VEN_MSG_STOP	7
+#define VEN_MSG_PAUSE	8
+#define VEN_MSG_RESUME	9
+#define VEN_MSG_STOP_READING_MSG	10
+#define VEN_MSG_LTRUSE_FAILED	    11
+#define VEN_MSG_HW_OVERLOAD	12
+#define VEN_MSG_MAX_CLIENTS	13
+
+
+/*Buffer flags bits masks*/
+#define VEN_BUFFLAG_EOS	0x00000001
+#define VEN_BUFFLAG_ENDOFFRAME	0x00000010
+#define VEN_BUFFLAG_SYNCFRAME	0x00000020
+#define VEN_BUFFLAG_EXTRADATA	0x00000040
+#define VEN_BUFFLAG_CODECCONFIG	0x00000080
+
+/*Post processing flags bit masks*/
+#define VEN_EXTRADATA_NONE          0x001
+#define VEN_EXTRADATA_QCOMFILLER    0x002
+#define VEN_EXTRADATA_SLICEINFO     0x100
+#define VEN_EXTRADATA_LTRINFO       0x200
+#define VEN_EXTRADATA_MBINFO        0x400
+
+/*ENCODER CONFIGURATION CONSTANTS*/
+
+/*Encoded video frame types*/
+#define VEN_FRAME_TYPE_I	1/* I frame type */
+#define VEN_FRAME_TYPE_P	2/* P frame type */
+#define VEN_FRAME_TYPE_B	3/* B frame type */
+
+/*Video codec types*/
+#define VEN_CODEC_MPEG4	1/* MPEG4 Codec */
+#define VEN_CODEC_H264	2/* H.264 Codec */
+#define VEN_CODEC_H263	3/* H.263 Codec */
+
+/*Video codec profile types.*/
+#define VEN_PROFILE_MPEG4_SP      1/* 1 - MPEG4 SP profile      */
+#define VEN_PROFILE_MPEG4_ASP     2/* 2 - MPEG4 ASP profile     */
+#define VEN_PROFILE_H264_BASELINE 3/* 3 - H264 Baseline profile	*/
+#define VEN_PROFILE_H264_MAIN     4/* 4 - H264 Main profile     */
+#define VEN_PROFILE_H264_HIGH     5/* 5 - H264 High profile     */
+#define VEN_PROFILE_H263_BASELINE 6/* 6 - H263 Baseline profile */
+
+/*Video codec profile level types.*/
+#define VEN_LEVEL_MPEG4_0	 0x1/* MPEG4 Level 0  */
+#define VEN_LEVEL_MPEG4_1	 0x2/* MPEG4 Level 1  */
+#define VEN_LEVEL_MPEG4_2	 0x3/* MPEG4 Level 2  */
+#define VEN_LEVEL_MPEG4_3	 0x4/* MPEG4 Level 3  */
+#define VEN_LEVEL_MPEG4_4	 0x5/* MPEG4 Level 4  */
+#define VEN_LEVEL_MPEG4_5	 0x6/* MPEG4 Level 5  */
+#define VEN_LEVEL_MPEG4_3b	 0x7/* MPEG4 Level 3b */
+#define VEN_LEVEL_MPEG4_6	 0x8/* MPEG4 Level 6  */
+
+#define VEN_LEVEL_H264_1	 0x9/* H.264 Level 1   */
+#define VEN_LEVEL_H264_1b        0xA/* H.264 Level 1b  */
+#define VEN_LEVEL_H264_1p1	 0xB/* H.264 Level 1.1 */
+#define VEN_LEVEL_H264_1p2	 0xC/* H.264 Level 1.2 */
+#define VEN_LEVEL_H264_1p3	 0xD/* H.264 Level 1.3 */
+#define VEN_LEVEL_H264_2	 0xE/* H.264 Level 2   */
+#define VEN_LEVEL_H264_2p1	 0xF/* H.264 Level 2.1 */
+#define VEN_LEVEL_H264_2p2	0x10/* H.264 Level 2.2 */
+#define VEN_LEVEL_H264_3	0x11/* H.264 Level 3   */
+#define VEN_LEVEL_H264_3p1	0x12/* H.264 Level 3.1 */
+#define VEN_LEVEL_H264_3p2	0x13/* H.264 Level 3.2 */
+#define VEN_LEVEL_H264_4	0x14/* H.264 Level 4   */
+
+#define VEN_LEVEL_H263_10	0x15/* H.263 Level 10  */
+#define VEN_LEVEL_H263_20	0x16/* H.263 Level 20  */
+#define VEN_LEVEL_H263_30	0x17/* H.263 Level 30  */
+#define VEN_LEVEL_H263_40	0x18/* H.263 Level 40  */
+#define VEN_LEVEL_H263_45	0x19/* H.263 Level 45  */
+#define VEN_LEVEL_H263_50	0x1A/* H.263 Level 50  */
+#define VEN_LEVEL_H263_60	0x1B/* H.263 Level 60  */
+#define VEN_LEVEL_H263_70	0x1C/* H.263 Level 70  */
+
+/*Entropy coding model selection for H.264 encoder.*/
+#define VEN_ENTROPY_MODEL_CAVLC	1
+#define VEN_ENTROPY_MODEL_CABAC	2
+/*Cabac model number (0,1,2) for encoder.*/
+#define VEN_CABAC_MODEL_0	1/* CABAC Model 0. */
+#define VEN_CABAC_MODEL_1	2/* CABAC Model 1. */
+#define VEN_CABAC_MODEL_2	3/* CABAC Model 2. */
+
+/*Deblocking filter control type for encoder.*/
+#define VEN_DB_DISABLE	1/* 1 - Disable deblocking filter*/
+#define VEN_DB_ALL_BLKG_BNDRY	2/* 2 - All blocking boundary filtering*/
+#define VEN_DB_SKIP_SLICE_BNDRY	3/* 3 - Filtering except sliceboundary*/
+
+/*Different methods of Multi slice selection.*/
+#define VEN_MSLICE_OFF	1
+#define VEN_MSLICE_CNT_MB	2 /*number of MBscount per slice*/
+#define VEN_MSLICE_CNT_BYTE	3 /*number of bytes count per slice.*/
+#define VEN_MSLICE_GOB	4 /*Multi slice by GOB for H.263 only.*/
+
+/*Different modes for Rate Control.*/
+#define VEN_RC_OFF	1
+#define VEN_RC_VBR_VFR	2
+#define VEN_RC_VBR_CFR	3
+#define VEN_RC_CBR_VFR	4
+#define VEN_RC_CBR_CFR	5
+
+/*Different modes for flushing buffers*/
+#define VEN_FLUSH_INPUT	1
+#define VEN_FLUSH_OUTPUT	2
+#define VEN_FLUSH_ALL	3
+
+/*Different input formats for YUV data.*/
+#define VEN_INPUTFMT_NV12	1/* NV12 Linear */
+#define VEN_INPUTFMT_NV21	2/* NV21 Linear */
+#define VEN_INPUTFMT_NV12_16M2KA	3/* NV12 Linear */
+
+/*Different allowed rotation modes.*/
+#define VEN_ROTATION_0	1/* 0 degrees */
+#define VEN_ROTATION_90	2/* 90 degrees */
+#define VEN_ROTATION_180	3/* 180 degrees */
+#define VEN_ROTATION_270	4/* 270 degrees */
+
+/*IOCTL timeout values*/
+#define VEN_TIMEOUT_INFINITE	0xffffffff
+
+/*Different allowed intra refresh modes.*/
+#define VEN_IR_OFF	1
+#define VEN_IR_CYCLIC	2
+#define VEN_IR_RANDOM	3
+
+/*IOCTL BASE CODES Not to be used directly by the client.*/
+/* Base value for ioctls that are not related to encoder configuration.*/
+#define VEN_IOCTLBASE_NENC	0x800
+/* Base value for encoder configuration ioctls*/
+#define VEN_IOCTLBASE_ENC	0x850
+
+struct venc_ioctl_msg {
+	void __user *in;
+	void __user *out;
+};
+
+/*NON ENCODER CONFIGURATION IOCTLs*/
+
+/*IOCTL params:SET: InputData - unsigned long, OutputData - NULL*/
+#define VEN_IOCTL_SET_INTF_VERSION \
+	_IOW(VEN_IOCTLBASE_NENC, 0, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_timeout, OutputData - venc_msg*/
+#define VEN_IOCTL_CMD_READ_NEXT_MSG \
+	_IOWR(VEN_IOCTLBASE_NENC, 1, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - NULL, OutputData - NULL*/
+#define VEN_IOCTL_CMD_STOP_READ_MSG	_IO(VEN_IOCTLBASE_NENC, 2)
+
+/*
+ * IOCTL params:SET: InputData - venc_allocatorproperty, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_allocatorproperty
+ */
+#define VEN_IOCTL_SET_INPUT_BUFFER_REQ \
+	_IOW(VEN_IOCTLBASE_NENC, 3, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INPUT_BUFFER_REQ \
+	_IOR(VEN_IOCTLBASE_NENC, 4, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_ALLOC_INPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 5, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_SET_INPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 6, struct venc_ioctl_msg)
+
+/*IOCTL params: CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_FREE_INPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 7, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_allocatorproperty, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_allocatorproperty
+ */
+#define VEN_IOCTL_SET_OUTPUT_BUFFER_REQ \
+	_IOW(VEN_IOCTLBASE_NENC, 8, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_OUTPUT_BUFFER_REQ \
+	_IOR(VEN_IOCTLBASE_NENC, 9, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_ALLOC_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 10, struct venc_ioctl_msg)
+
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_SET_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 11, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL.*/
+#define VEN_IOCTL_CMD_FREE_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 12, struct venc_ioctl_msg)
+
+
+/* Asynchronous respone message code:* VEN_MSG_START*/
+#define VEN_IOCTL_CMD_START	_IO(VEN_IOCTLBASE_NENC, 13)
+
+
+/*
+ * IOCTL params:CMD: InputData - venc_buffer, OutputData - NULL
+ * Asynchronous respone message code:VEN_MSG_INPUT_BUFFER_DONE
+ */
+#define VEN_IOCTL_CMD_ENCODE_FRAME \
+	_IOW(VEN_IOCTLBASE_NENC, 14, struct venc_ioctl_msg)
+
+
+/*
+ *IOCTL params:CMD: InputData - venc_buffer, OutputData - NULL
+ *Asynchronous response message code:VEN_MSG_OUTPUT_BUFFER_DONE
+ */
+#define VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 15, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:CMD: InputData - venc_bufferflush, OutputData - NULL
+ * Asynchronous response message code:VEN_MSG_INPUT_BUFFER_DONE
+ */
+#define VEN_IOCTL_CMD_FLUSH \
+	_IOW(VEN_IOCTLBASE_NENC, 16, struct venc_ioctl_msg)
+
+
+/*Asynchronous respone message code:VEN_MSG_PAUSE*/
+#define VEN_IOCTL_CMD_PAUSE	_IO(VEN_IOCTLBASE_NENC, 17)
+
+/*Asynchronous respone message code:VEN_MSG_RESUME*/
+#define VEN_IOCTL_CMD_RESUME _IO(VEN_IOCTLBASE_NENC, 18)
+
+/* Asynchronous respone message code:VEN_MSG_STOP*/
+#define VEN_IOCTL_CMD_STOP _IO(VEN_IOCTLBASE_NENC, 19)
+
+#define VEN_IOCTL_SET_RECON_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 20, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_FREE_RECON_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 21, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_RECON_BUFFER_SIZE \
+	_IOW(VEN_IOCTLBASE_NENC, 22, struct venc_ioctl_msg)
+
+
+
+/*ENCODER PROPERTY CONFIGURATION & CAPABILITY IOCTLs*/
+
+/*
+ * IOCTL params:SET: InputData - venc_basecfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_basecfg
+ */
+#define VEN_IOCTL_SET_BASE_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 1, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_BASE_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 2, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_LIVE_MODE \
+	_IOW(VEN_IOCTLBASE_ENC, 3, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_LIVE_MODE \
+	_IOR(VEN_IOCTLBASE_ENC, 4, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_profile, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_profile
+ */
+#define VEN_IOCTL_SET_CODEC_PROFILE \
+	_IOW(VEN_IOCTLBASE_ENC, 5, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_CODEC_PROFILE \
+	_IOR(VEN_IOCTLBASE_ENC, 6, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - ven_profilelevel, OutputData - NULL
+ * GET: InputData - NULL, OutputData - ven_profilelevel
+ */
+#define VEN_IOCTL_SET_PROFILE_LEVEL \
+	_IOW(VEN_IOCTLBASE_ENC, 7, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_PROFILE_LEVEL \
+	_IOR(VEN_IOCTLBASE_ENC, 8, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_SHORT_HDR \
+	_IOW(VEN_IOCTLBASE_ENC, 9, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_SHORT_HDR \
+	_IOR(VEN_IOCTLBASE_ENC, 10, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params: SET: InputData - venc_sessionqp, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_sessionqp
+ */
+#define VEN_IOCTL_SET_SESSION_QP \
+	_IOW(VEN_IOCTLBASE_ENC, 11, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_SESSION_QP \
+	_IOR(VEN_IOCTLBASE_ENC, 12, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_intraperiod, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_intraperiod
+ */
+#define VEN_IOCTL_SET_INTRA_PERIOD \
+	_IOW(VEN_IOCTLBASE_ENC, 13, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INTRA_PERIOD \
+	_IOR(VEN_IOCTLBASE_ENC, 14, struct venc_ioctl_msg)
+
+
+/* Request an Iframe*/
+#define VEN_IOCTL_CMD_REQUEST_IFRAME _IO(VEN_IOCTLBASE_ENC, 15)
+
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_capability*/
+#define VEN_IOCTL_GET_CAPABILITY \
+	_IOR(VEN_IOCTLBASE_ENC, 16, struct venc_ioctl_msg)
+
+
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_seqheader*/
+#define VEN_IOCTL_GET_SEQUENCE_HDR \
+	_IOR(VEN_IOCTLBASE_ENC, 17, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_entropycfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_entropycfg
+ */
+#define VEN_IOCTL_SET_ENTROPY_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 18, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_ENTROPY_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 19, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_dbcfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_dbcfg
+ */
+#define VEN_IOCTL_SET_DEBLOCKING_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 20, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_DEBLOCKING_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 21, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_intrarefresh, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_intrarefresh
+ */
+#define VEN_IOCTL_SET_INTRA_REFRESH \
+	_IOW(VEN_IOCTLBASE_ENC, 22, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INTRA_REFRESH \
+	_IOR(VEN_IOCTLBASE_ENC, 23, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_multiclicecfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_multiclicecfg
+ */
+#define VEN_IOCTL_SET_MULTI_SLICE_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 24, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_MULTI_SLICE_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 25, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_ratectrlcfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_ratectrlcfg
+ */
+#define VEN_IOCTL_SET_RATE_CTRL_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 26, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_RATE_CTRL_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 27, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_voptimingcfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_voptimingcfg
+ */
+#define VEN_IOCTL_SET_VOP_TIMING_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 28, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_VOP_TIMING_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 29, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_framerate, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_framerate
+ */
+#define VEN_IOCTL_SET_FRAME_RATE \
+	_IOW(VEN_IOCTLBASE_ENC, 30, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_FRAME_RATE \
+	_IOR(VEN_IOCTLBASE_ENC, 31, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_targetbitrate, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_targetbitrate
+ */
+#define VEN_IOCTL_SET_TARGET_BITRATE \
+	_IOW(VEN_IOCTLBASE_ENC, 32, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_TARGET_BITRATE \
+	_IOR(VEN_IOCTLBASE_ENC, 33, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_rotation, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_rotation
+ */
+#define VEN_IOCTL_SET_ROTATION \
+	_IOW(VEN_IOCTLBASE_ENC, 34, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_ROTATION \
+	_IOR(VEN_IOCTLBASE_ENC, 35, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_headerextension, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_headerextension
+ */
+#define VEN_IOCTL_SET_HEC \
+	_IOW(VEN_IOCTLBASE_ENC, 36, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_HEC \
+	_IOR(VEN_IOCTLBASE_ENC, 37, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_DATA_PARTITION \
+	_IOW(VEN_IOCTLBASE_ENC, 38, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_DATA_PARTITION \
+	_IOR(VEN_IOCTLBASE_ENC, 39, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_RVLC \
+	_IOW(VEN_IOCTLBASE_ENC, 40, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_RVLC \
+	_IOR(VEN_IOCTLBASE_ENC, 41, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_AC_PREDICTION \
+	_IOW(VEN_IOCTLBASE_ENC, 42, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_AC_PREDICTION \
+	_IOR(VEN_IOCTLBASE_ENC, 43, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_qprange, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_qprange
+ */
+#define VEN_IOCTL_SET_QP_RANGE \
+	_IOW(VEN_IOCTLBASE_ENC, 44, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_QP_RANGE \
+	_IOR(VEN_IOCTLBASE_ENC, 45, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_NUMBER_INSTANCES \
+	_IOR(VEN_IOCTLBASE_ENC, 46, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_SET_METABUFFER_MODE \
+	_IOW(VEN_IOCTLBASE_ENC, 47, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - unsigned int, OutputData - NULL.*/
+#define VEN_IOCTL_SET_EXTRADATA \
+	_IOW(VEN_IOCTLBASE_ENC, 48, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - unsigned int.*/
+#define VEN_IOCTL_GET_EXTRADATA \
+	_IOR(VEN_IOCTLBASE_ENC, 49, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - NULL, OutputData - NULL.*/
+#define VEN_IOCTL_SET_SLICE_DELIVERY_MODE \
+	_IO(VEN_IOCTLBASE_ENC, 50)
+
+#define VEN_IOCTL_SET_H263_PLUSPTYPE \
+	_IOW(VEN_IOCTLBASE_ENC, 51, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_range, OutputData - NULL.*/
+#define VEN_IOCTL_SET_CAPABILITY_LTRCOUNT \
+	_IOW(VEN_IOCTLBASE_ENC, 52, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_range.*/
+#define VEN_IOCTL_GET_CAPABILITY_LTRCOUNT \
+	_IOR(VEN_IOCTLBASE_ENC, 53, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrmode, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRMODE \
+	_IOW(VEN_IOCTLBASE_ENC, 54, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrmode.*/
+#define VEN_IOCTL_GET_LTRMODE \
+	_IOR(VEN_IOCTLBASE_ENC, 55, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrcount, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRCOUNT \
+	_IOW(VEN_IOCTLBASE_ENC, 56, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrcount.*/
+#define VEN_IOCTL_GET_LTRCOUNT \
+	_IOR(VEN_IOCTLBASE_ENC, 57, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrperiod, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRPERIOD \
+	_IOW(VEN_IOCTLBASE_ENC, 58, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrperiod.*/
+#define VEN_IOCTL_GET_LTRPERIOD \
+	_IOR(VEN_IOCTLBASE_ENC, 59, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltruse, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRUSE \
+	_IOW(VEN_IOCTLBASE_ENC, 60, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltruse.*/
+#define VEN_IOCTL_GET_LTRUSE \
+	_IOR(VEN_IOCTLBASE_ENC, 61, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrmark, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRMARK \
+	_IOW(VEN_IOCTLBASE_ENC, 62, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrmark.*/
+#define VEN_IOCTL_GET_LTRMARK \
+	_IOR(VEN_IOCTLBASE_ENC, 63, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - unsigned int, OutputData - NULL*/
+#define VEN_IOCTL_SET_SPS_PPS_FOR_IDR \
+	_IOW(VEN_IOCTLBASE_ENC, 64, struct venc_ioctl_msg)
+
+struct venc_range {
+	unsigned long	max;
+	unsigned long	min;
+	unsigned long	step_size;
+};
+
+struct venc_switch {
+	unsigned char	status;
+};
+
+struct venc_allocatorproperty {
+	unsigned long	 mincount;
+	unsigned long	 maxcount;
+	unsigned long	 actualcount;
+	unsigned long	 datasize;
+	unsigned long	 suffixsize;
+	unsigned long	 alignment;
+	unsigned long	 bufpoolid;
+};
+
+struct venc_bufferpayload {
+	unsigned char *pbuffer;
+	size_t	sz;
+	int	fd;
+	unsigned int	offset;
+	unsigned int	maped_size;
+	unsigned long	filled_len;
+};
+
+struct venc_buffer {
+	unsigned char *ptrbuffer;
+	unsigned long	sz;
+	unsigned long	len;
+	unsigned long	offset;
+	long long	timestamp;
+	unsigned long	flags;
+	void	*clientdata;
+};
+
+struct venc_basecfg {
+	unsigned long	input_width;
+	unsigned long	input_height;
+	unsigned long	dvs_width;
+	unsigned long	dvs_height;
+	unsigned long	codectype;
+	unsigned long	fps_num;
+	unsigned long	fps_den;
+	unsigned long	targetbitrate;
+	unsigned long	inputformat;
+};
+
+struct venc_profile {
+	unsigned long	profile;
+};
+struct ven_profilelevel {
+	unsigned long	level;
+};
+
+struct venc_sessionqp {
+	unsigned long	iframeqp;
+	unsigned long	pframqp;
+};
+
+struct venc_qprange {
+	unsigned long	maxqp;
+	unsigned long	minqp;
+};
+
+struct venc_plusptype {
+	unsigned long	plusptype_enable;
+};
+
+struct venc_intraperiod {
+	unsigned long	num_pframes;
+	unsigned long	num_bframes;
+};
+struct venc_seqheader {
+	unsigned char *hdrbufptr;
+	unsigned long	bufsize;
+	unsigned long	hdrlen;
+};
+
+struct venc_capability {
+	unsigned long	codec_types;
+	unsigned long	maxframe_width;
+	unsigned long	maxframe_height;
+	unsigned long	maxtarget_bitrate;
+	unsigned long	maxframe_rate;
+	unsigned long	input_formats;
+	unsigned char	dvs;
+};
+
+struct venc_entropycfg {
+	unsigned int longentropysel;
+	unsigned long	cabacmodel;
+};
+
+struct venc_dbcfg {
+	unsigned long	db_mode;
+	unsigned long	slicealpha_offset;
+	unsigned long	slicebeta_offset;
+};
+
+struct venc_intrarefresh {
+	unsigned long	irmode;
+	unsigned long	mbcount;
+};
+
+struct venc_multiclicecfg {
+	unsigned long	mslice_mode;
+	unsigned long	mslice_size;
+};
+
+struct venc_bufferflush {
+	unsigned long	flush_mode;
+};
+
+struct venc_ratectrlcfg {
+	unsigned long	rcmode;
+};
+
+struct	venc_voptimingcfg {
+	unsigned long	voptime_resolution;
+};
+struct venc_framerate {
+	unsigned long	fps_denominator;
+	unsigned long	fps_numerator;
+};
+
+struct venc_targetbitrate {
+	unsigned long	target_bitrate;
+};
+
+
+struct venc_rotation {
+	unsigned long	rotation;
+};
+
+struct venc_timeout {
+	 unsigned long	millisec;
+};
+
+struct venc_headerextension {
+	 unsigned long	header_extension;
+};
+
+struct venc_msg {
+	unsigned long	statuscode;
+	unsigned long	msgcode;
+	struct venc_buffer	buf;
+	unsigned long	msgdata_size;
+};
+
+struct venc_recon_addr {
+	unsigned char *pbuffer;
+	unsigned long buffer_size;
+	unsigned long pmem_fd;
+	unsigned long offset;
+};
+
+struct venc_recon_buff_size {
+	int width;
+	int height;
+	int size;
+	int alignment;
+};
+
+struct venc_ltrmode {
+	unsigned long   ltr_mode;
+};
+
+struct venc_ltrcount {
+	unsigned long   ltr_count;
+};
+
+struct venc_ltrperiod {
+	unsigned long   ltr_period;
+};
+
+struct venc_ltruse {
+	unsigned long   ltr_id;
+	unsigned long   ltr_frames;
+};
+
+#endif /* _UAPI_MSM_VIDC_ENC_H_ */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 9399a35..20a01ca 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2603,6 +2603,8 @@
 #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
 #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
 
+#define NL80211_WIPHY_NAME_MAXLEN		128
+
 #define NL80211_MAX_SUPP_RATES			32
 #define NL80211_MAX_SUPP_HT_RATES		77
 #define NL80211_MAX_SUPP_REG_RULES		64
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 2c5810a..b67cfd8 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -106,7 +106,7 @@
 #define PCI_SUBSYSTEM_ID	0x2e
 #define PCI_ROM_ADDRESS		0x30	/* Bits 31..11 are address, 10..1 reserved */
 #define  PCI_ROM_ADDRESS_ENABLE	0x01
-#define PCI_ROM_ADDRESS_MASK	(~0x7ffUL)
+#define PCI_ROM_ADDRESS_MASK	(~0x7ffU)
 
 #define PCI_CAPABILITY_LIST	0x34	/* Offset of first capability list entry */
 
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index c1af9b3..f0320a0 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -203,6 +203,18 @@
  */
 #define PR_SET_TIMERSLACK_PID	127
 
+/* Per task speculation control */
+#define PR_GET_SPECULATION_CTRL		52
+#define PR_SET_SPECULATION_CTRL		53
+/* Speculation control variants */
+# define PR_SPEC_STORE_BYPASS		0
+/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
+# define PR_SPEC_NOT_AFFECTED		0
+# define PR_SPEC_PRCTL			(1UL << 0)
+# define PR_SPEC_ENABLE			(1UL << 1)
+# define PR_SPEC_DISABLE		(1UL << 2)
+# define PR_SPEC_FORCE_DISABLE		(1UL << 3)
+
 #define PR_SET_VMA		0x53564d41
 # define PR_SET_VMA_ANON_NAME		0
 
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index 3d7a0fc..39930ca 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -87,6 +87,9 @@
 		(((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
 #define PSCI_VERSION_MINOR(ver)			\
 		((ver) & PSCI_VERSION_MINOR_MASK)
+#define PSCI_VERSION(maj, min)						\
+	((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
+	 ((min) & PSCI_VERSION_MINOR_MASK))
 
 /* PSCI features decoding (>=1.0) */
 #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT	1
diff --git a/include/uapi/linux/qg-profile.h b/include/uapi/linux/qg-profile.h
index bffddbb..0230b32 100644
--- a/include/uapi/linux/qg-profile.h
+++ b/include/uapi/linux/qg-profile.h
@@ -55,6 +55,8 @@
 #define QG_MAX_FCC_MAH				16000
 #define QG_MIN_SLOPE				1
 #define QG_MAX_SLOPE				50000
+#define QG_ESR_SF_MIN				5000
+#define QG_ESR_SF_MAX				20000
 
 /*  IOCTLs to query battery profile data */
 #define BPIOCXSOC	_IOWR('B', 0x01, struct battery_params) /* SOC */
diff --git a/include/uapi/linux/qg.h b/include/uapi/linux/qg.h
index 2c7b49a..40882a7 100644
--- a/include/uapi/linux/qg.h
+++ b/include/uapi/linux/qg.h
@@ -14,12 +14,12 @@
 	QG_FIFO_TIME_DELTA,
 	QG_BATT_SOC,
 	QG_CC_SOC,
-	QG_RESERVED_3,
-	QG_RESERVED_4,
-	QG_RESERVED_5,
-	QG_RESERVED_6,
-	QG_RESERVED_7,
-	QG_RESERVED_8,
+	QG_ESR_CHARGE_DELTA,
+	QG_ESR_DISCHARGE_DELTA,
+	QG_ESR_CHARGE_SF,
+	QG_ESR_DISCHARGE_SF,
+	QG_FULL_SOC,
+	QG_CLEAR_LEARNT_DATA,
 	QG_RESERVED_9,
 	QG_RESERVED_10,
 	QG_MAX,
@@ -27,6 +27,12 @@
 
 #define QG_BATT_SOC QG_BATT_SOC
 #define QG_CC_SOC QG_CC_SOC
+#define QG_ESR_CHARGE_DELTA QG_ESR_CHARGE_DELTA
+#define QG_ESR_DISCHARGE_DELTA QG_ESR_DISCHARGE_DELTA
+#define QG_ESR_CHARGE_SF QG_ESR_CHARGE_SF
+#define QG_ESR_DISCHARGE_SF QG_ESR_DISCHARGE_SF
+#define QG_FULL_SOC QG_FULL_SOC
+#define QG_CLEAR_LEARNT_DATA QG_CLEAR_LEARNT_DATA
 
 struct fifo_data {
 	unsigned int			v;
diff --git a/include/uapi/linux/qseecom.h b/include/uapi/linux/qseecom.h
index f0a26b2..6de4c76 100644
--- a/include/uapi/linux/qseecom.h
+++ b/include/uapi/linux/qseecom.h
@@ -281,13 +281,6 @@
 	int flag;
 };
 
-struct qseecom_encdec_conf_t {
-	__le64 start_sector;
-	size_t fs_size;
-	int index;
-	int mode;
-};
-
 #define SG_ENTRY_SZ		sizeof(struct qseecom_sg_entry)
 #define SG_ENTRY_SZ_64BIT	sizeof(struct qseecom_sg_entry_64bit)
 
@@ -399,7 +392,4 @@
 #define QSEECOM_IOCTL_SET_ICE_INFO \
 	_IOWR(QSEECOM_IOC_MAGIC, 43, struct qseecom_ice_data_t)
 
-#define QSEECOM_IOCTL_SET_ENCDEC_INFO \
-	_IOWR(QSEECOM_IOC_MAGIC, 44, struct qseecom_encdec_conf_t)
-
 #endif /* _UAPI_QSEECOM_H_ */
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index 3f93d16..b455b0d 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -34,6 +34,9 @@
 /* Clear the entropy pool and associated counters.  (Superuser only.) */
 #define RNDCLEARPOOL	_IO( 'R', 0x06 )
 
+/* Reseed CRNG.  (Superuser only.) */
+#define RNDRESEEDCRNG	_IO( 'R', 0x07 )
+
 struct rand_pool_info {
 	int	entropy_count;
 	int	buf_size;
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 0f238a4..e4acb61 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -15,7 +15,9 @@
 #define SECCOMP_SET_MODE_FILTER	1
 
 /* Valid flags for SECCOMP_SET_MODE_FILTER */
-#define SECCOMP_FILTER_FLAG_TSYNC	1
+#define SECCOMP_FILTER_FLAG_TSYNC	(1UL << 0)
+/* In v4.14+ SECCOMP_FILTER_FLAG_LOG is (1UL << 1) */
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW	(1UL << 2)
 
 /*
  * All BPF programs must return a 32-bit value.
diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h
index 6d25967..63abd156 100644
--- a/include/uapi/media/msm_camsensor_sdk.h
+++ b/include/uapi/media/msm_camsensor_sdk.h
@@ -50,6 +50,8 @@
 
 #define MSM_SENSOR_BYPASS_VIDEO_NODE    1
 
+#define SENSOR_PROBE_WRITE
+
 enum msm_sensor_camera_id_t {
 	CAMERA_0,
 	CAMERA_1,
@@ -292,10 +294,19 @@
 	unsigned int            sensor_mount_angle;
 };
 
+struct msm_camera_i2c_reg_setting {
+	struct msm_camera_i2c_reg_array *reg_setting;
+	unsigned short size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	unsigned short delay;
+};
+
 struct msm_sensor_id_info_t {
 	unsigned short sensor_id_reg_addr;
 	unsigned short sensor_id;
 	unsigned short sensor_id_mask;
+	struct msm_camera_i2c_reg_setting setting;
 };
 
 struct msm_camera_sensor_slave_info {
@@ -322,14 +333,6 @@
 	unsigned int delay;
 };
 
-struct msm_camera_i2c_reg_setting {
-	struct msm_camera_i2c_reg_array *reg_setting;
-	unsigned short size;
-	enum msm_camera_i2c_reg_addr_type addr_type;
-	enum msm_camera_i2c_data_type data_type;
-	unsigned short delay;
-};
-
 struct msm_camera_csid_vc_cfg {
 	unsigned char cid;
 	unsigned char dt;
diff --git a/include/uapi/media/msm_media_info.h b/include/uapi/media/msm_media_info.h
index 3fd0c88..796d6d2 100644
--- a/include/uapi/media/msm_media_info.h
+++ b/include/uapi/media/msm_media_info.h
@@ -1261,8 +1261,11 @@
 		size = MSM_MEDIA_ALIGN(size, 4096);
 
 		/* Additional size to cover last row of non-aligned frame */
-		size += MSM_MEDIA_ALIGN(width, w_alignment) * w_alignment;
-		size = MSM_MEDIA_ALIGN(size, 4096);
+		if (width >= 2400 && height >= 2400) {
+			size += MSM_MEDIA_ALIGN(width, w_alignment) *
+					w_alignment;
+			size = MSM_MEDIA_ALIGN(size, 4096);
+		}
 		break;
 	case COLOR_FMT_P010:
 		uv_alignment = 4096;
@@ -1302,8 +1305,11 @@
 		size = MSM_MEDIA_ALIGN(size, 4096);
 
 		/* Additional size to cover last row of non-aligned frame */
-		size += MSM_MEDIA_ALIGN(width, w_alignment) * w_alignment;
-		size = MSM_MEDIA_ALIGN(size, 4096);
+		if (width >= 2400 && height >= 2400) {
+			size += MSM_MEDIA_ALIGN(width, w_alignment) *
+					w_alignment;
+			size = MSM_MEDIA_ALIGN(size, 4096);
+		}
 		break;
 	case COLOR_FMT_NV12_BPP10_UBWC:
 		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
diff --git a/include/uapi/media/msmb_isp.h b/include/uapi/media/msmb_isp.h
index 053fa76..74a8d93 100644
--- a/include/uapi/media/msmb_isp.h
+++ b/include/uapi/media/msmb_isp.h
@@ -18,6 +18,7 @@
 #define ISP1_BIT              (0x10000 << 2)
 #define ISP_META_CHANNEL_BIT  (0x10000 << 3)
 #define ISP_SCRATCH_BUF_BIT   (0x10000 << 4)
+#define ISP_PDAF_CHANNEL_BIT  (0x10000 << 5)
 #define ISP_OFFLINE_STATS_BIT (0x10000 << 5)
 #define ISP_SVHDR_IN_BIT      (0x10000 << 6) /* RDI hw stream for SVHDR */
 #define ISP_SVHDR_OUT_BIT     (0x10000 << 7) /* SVHDR output bufq stream*/
@@ -295,6 +296,11 @@
 	uint8_t rdi_cid;/*CID 1-16*/
 };
 
+enum msm_stream_memory_input_t {
+	MEMORY_INPUT_DISABLED,
+	MEMORY_INPUT_ENABLED
+};
+
 enum msm_stream_rdi_input_type {
 	MSM_CAMERA_RDI_MIN,
 	MSM_CAMERA_RDI_PDAF,
@@ -324,6 +330,29 @@
 	enum msm_stream_rdi_input_type rdi_input_type;
 };
 
+struct msm_vfe32_axi_stream_request_cmd {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t vt_enable;
+	uint32_t output_format;/*Planar/RAW/Misc*/
+	enum msm_vfe_axi_stream_src stream_src; /*CAMIF/IDEAL/RDIs*/
+	struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+
+	uint32_t burst_count;
+	uint32_t hfr_mode;
+	uint8_t frame_base;
+
+	uint32_t init_frame_drop; /*MAX 31 Frames*/
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+	uint8_t buf_divert; /* if TRUE no vb2 buf done. */
+	/*Return values*/
+	uint32_t axi_stream_handle;
+	uint32_t controllable_output;
+	uint32_t burst_len;
+	/* Flag indicating memory input stream */
+	enum msm_stream_memory_input_t memory_input;
+};
+
 struct msm_vfe_axi_stream_release_cmd {
 	uint32_t stream_handle;
 };
@@ -680,7 +709,9 @@
 	ISP_PING_PONG_MISMATCH = 12,
 	ISP_REG_UPDATE_MISSING = 13,
 	ISP_BUF_FATAL_ERROR = 14,
-	ISP_EVENT_MAX         = 15
+	ISP_EVENT_MAX         = 15,
+	ISP_WM_BUS_OVERFLOW = 16,
+	ISP_CAMIF_ERROR     = 17,
 };
 
 #define ISP_EVENT_OFFSET          8
@@ -710,6 +741,7 @@
 #define ISP_EVENT_REG_UPDATE_MISSING (ISP_EVENT_BASE + ISP_REG_UPDATE_MISSING)
 #define ISP_EVENT_BUF_FATAL_ERROR (ISP_EVENT_BASE + ISP_BUF_FATAL_ERROR)
 #define ISP_EVENT_STREAM_UPDATE_DONE   (ISP_STREAM_EVENT_BASE)
+#define ISP_EVENT_WM_BUS_OVERFLOW (ISP_EVENT_BASE + ISP_WM_BUS_OVERFLOW)
 
 /* The msm_v4l2_event_data structure should match the
  * v4l2_event.u.data field.
@@ -759,6 +791,11 @@
 	uint32_t stream_id_mask;
 };
 
+struct msm_isp32_error_info {
+	/* 1 << msm_isp_event_idx */
+	uint32_t error_mask;
+};
+
 /* This structure reports delta between master and slave */
 struct msm_isp_ms_delta_info {
 	uint8_t num_delta_info;
@@ -827,6 +864,25 @@
 	} u; /* union can have max 52 bytes */
 };
 
+struct msm_isp32_event_data {
+	/*Wall clock except for buffer divert events
+	 *which use monotonic clock
+	 */
+	struct timeval timestamp;
+	/* Monotonic timestamp since bootup */
+	struct timeval mono_timestamp;
+	enum msm_vfe_input_src input_intf;
+	uint32_t frame_id;
+	union {
+		/* Sent for Stats_Done event */
+		struct msm_isp_stats_event stats;
+		/* Sent for Buf_Divert event */
+		struct msm_isp_buf_event buf_done;
+		struct msm_isp32_error_info error_info;
+	} u; /* union can have max 52 bytes */
+	uint32_t is_skip_pproc;
+};
+
 enum msm_vfe_ahb_clk_vote {
 	MSM_ISP_CAMERA_AHB_SVS_VOTE = 1,
 	MSM_ISP_CAMERA_AHB_TURBO_VOTE = 2,
@@ -919,6 +975,7 @@
 	MSM_ISP_MAP_BUF_START_MULTI_PASS_FE,
 	MSM_ISP_REQUEST_BUF_VER2,
 	MSM_ISP_DUAL_HW_LPM_MODE,
+	MSM_ISP32_REQUEST_STREAM,
 };
 
 #define VIDIOC_MSM_VFE_REG_CFG \
@@ -941,6 +998,10 @@
 	_IOWR('V', MSM_ISP_REQUEST_STREAM, \
 		struct msm_vfe_axi_stream_request_cmd)
 
+#define VIDIOC_MSM_ISP32_REQUEST_STREAM \
+	_IOWR('V', MSM_ISP32_REQUEST_STREAM, \
+		struct msm_vfe32_axi_stream_request_cmd)
+
 #define VIDIOC_MSM_ISP_CFG_STREAM \
 	_IOWR('V', MSM_ISP_CFG_STREAM, \
 		struct msm_vfe_axi_stream_cfg_cmd)
@@ -1038,6 +1099,8 @@
 
 #define VIDIOC_MSM_ISP_REQUEST_BUF_VER2 \
 	_IOWR('V', MSM_ISP_REQUEST_BUF_VER2, struct msm_isp_buf_request_ver2)
+#define VIDIOC_MSM_ISP_BUF_DONE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE+21, struct msm_isp32_event_data)
 
 #define VIDIOC_MSM_ISP_DUAL_HW_LPM_MODE \
 	_IOWR('V', MSM_ISP_DUAL_HW_LPM_MODE, \
diff --git a/init/Kconfig b/init/Kconfig
index 854bd5d..e3929edd 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -840,6 +840,16 @@
 		     13 =>  8 KB
 		     12 =>  4 KB
 
+config CONSOLE_FLUSH_ON_HOTPLUG
+	bool "Enable console flush configurable in hot plug code path"
+	depends on HOTPLUG_CPU
+	def_bool n
+	help
+	In cpu hot plug path console lock acquire and release causes the
+	console to flush. If console lock is not free hot plug latency
+	increases. So make console flush configurable in hot plug path
+	and default disabled to help in cpu hot plug latencies.
+
 config LOG_CPU_MAX_BUF_SHIFT
 	int "CPU kernel log buffer size contribution (13 => 8 KB, 17 => 128KB)"
 	depends on SMP
diff --git a/init/main.c b/init/main.c
index 17e439f..f9cd3f0 100644
--- a/init/main.c
+++ b/init/main.c
@@ -81,6 +81,7 @@
 #include <linux/proc_ns.h>
 #include <linux/io.h>
 #include <linux/kaiser.h>
+#include <linux/cache.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -913,14 +914,16 @@
 
 static noinline void __init kernel_init_freeable(void);
 
-#ifdef CONFIG_DEBUG_RODATA
-static bool rodata_enabled = true;
+#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_SET_MODULE_RONX)
+bool rodata_enabled __ro_after_init = true;
 static int __init set_debug_rodata(char *str)
 {
 	return strtobool(str, &rodata_enabled);
 }
 __setup("rodata=", set_debug_rodata);
+#endif
 
+#ifdef CONFIG_DEBUG_RODATA
 static void mark_readonly(void)
 {
 	if (rodata_enabled)
diff --git a/ipc/shm.c b/ipc/shm.c
index e2072ae..9c687cd 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -198,6 +198,12 @@
 	if (IS_ERR(shp))
 		return PTR_ERR(shp);
 
+	if (shp->shm_file != sfd->file) {
+		/* ID was reused */
+		shm_unlock(shp);
+		return -EINVAL;
+	}
+
 	shp->shm_atim = get_seconds();
 	shp->shm_lprid = task_tgid_vnr(current);
 	shp->shm_nattch++;
@@ -381,6 +387,17 @@
 	return sfd->vm_ops->fault(vma, vmf);
 }
 
+static int shm_split(struct vm_area_struct *vma, unsigned long addr)
+{
+	struct file *file = vma->vm_file;
+	struct shm_file_data *sfd = shm_file_data(file);
+
+	if (sfd->vm_ops && sfd->vm_ops->split)
+		return sfd->vm_ops->split(vma, addr);
+
+	return 0;
+}
+
 #ifdef CONFIG_NUMA
 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 {
@@ -414,8 +431,9 @@
 	int ret;
 
 	/*
-	 * In case of remap_file_pages() emulation, the file can represent
-	 * removed IPC ID: propogate shm_lock() error to caller.
+	 * In case of remap_file_pages() emulation, the file can represent an
+	 * IPC ID that was removed, and possibly even reused by another shm
+	 * segment already.  Propagate this case as an error to caller.
 	 */
 	ret =__shm_open(vma);
 	if (ret)
@@ -439,6 +457,7 @@
 	struct shm_file_data *sfd = shm_file_data(file);
 
 	put_ipc_ns(sfd->ns);
+	fput(sfd->file);
 	shm_file_data(file) = NULL;
 	kfree(sfd);
 	return 0;
@@ -503,6 +522,7 @@
 	.open	= shm_open,	/* callback for a new vm-area open */
 	.close	= shm_close,	/* callback for when the vm-area is released */
 	.fault	= shm_fault,
+	.split	= shm_split,
 #if defined(CONFIG_NUMA)
 	.set_policy = shm_set_policy,
 	.get_policy = shm_get_policy,
@@ -1107,14 +1127,17 @@
 		goto out;
 	else if ((addr = (ulong)shmaddr)) {
 		if (addr & (shmlba - 1)) {
-			/*
-			 * Round down to the nearest multiple of shmlba.
-			 * For sane do_mmap_pgoff() parameters, avoid
-			 * round downs that trigger nil-page and MAP_FIXED.
-			 */
-			if ((shmflg & SHM_RND) && addr >= shmlba)
-				addr &= ~(shmlba - 1);
-			else
+			if (shmflg & SHM_RND) {
+				addr &= ~(shmlba - 1);  /* round down */
+
+				/*
+				 * Ensure that the round-down is non-nil
+				 * when remapping. This can happen for
+				 * cases when addr < shmlba.
+				 */
+				if (!addr && (shmflg & SHM_REMAP))
+					goto out;
+			} else
 #ifndef __ARCH_FORCE_SHMLBA
 				if (addr & ~PAGE_MASK)
 #endif
@@ -1200,7 +1223,16 @@
 	file->f_mapping = shp->shm_file->f_mapping;
 	sfd->id = shp->shm_perm.id;
 	sfd->ns = get_ipc_ns(ns);
-	sfd->file = shp->shm_file;
+	/*
+	 * We need to take a reference to the real shm file to prevent the
+	 * pointer from becoming stale in cases where the lifetime of the outer
+	 * file extends beyond that of the shm segment.  It's not usually
+	 * possible, but it can happen during remap_file_pages() emulation as
+	 * that unmaps the memory, then does ->mmap() via file reference only.
+	 * We'll deny the ->mmap() if the shm segment was since removed, but to
+	 * detect shm ID reuse we need to compare the file pointers.
+	 */
+	sfd->file = get_file(shp->shm_file);
 	sfd->vm_ops = NULL;
 
 	err = security_mmap_file(file, prot, flags);
diff --git a/kernel/audit.c b/kernel/audit.c
index da4e7c0..3461a3d 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -742,6 +742,8 @@
 		return;
 
 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
+	if (!ab)
+		return;
 	audit_log_task_info(ab, current);
 	audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
 			 audit_feature_names[which], !!old_feature, !!new_feature,
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 2cd5256..93648f6 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2252,10 +2252,11 @@
 				audit_sig_uid = uid;
 			security_task_getsecid(tsk, &audit_sig_sid);
 		}
-		if (!audit_signals || audit_dummy_context())
-			return 0;
 	}
 
+	if (!audit_signals || audit_dummy_context())
+		return 0;
+
 	/* optimize the common case by putting first signal recipient directly
 	 * in audit_context */
 	if (!ctx->target_pid) {
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 4db6a67..b30ca0f 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -194,7 +194,7 @@
 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
-	u32 index = *(u32 *)key;
+	u32 index = key ? *(u32 *)key : U32_MAX;
 	u32 *next = (u32 *)next_key;
 
 	if (index >= array->map.max_entries) {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 27f4f2c..9c86d5d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -328,12 +328,15 @@
 	struct hlist_head *head;
 	struct htab_elem *l, *next_l;
 	u32 hash, key_size;
-	int i;
+	int i = 0;
 
 	WARN_ON_ONCE(!rcu_read_lock_held());
 
 	key_size = map->key_size;
 
+	if (!key)
+		goto find_first_elem;
+
 	hash = htab_map_hash(key, key_size);
 
 	head = select_bucket(htab, hash);
@@ -341,10 +344,8 @@
 	/* lookup the key */
 	l = lookup_elem_raw(head, hash, key, key_size);
 
-	if (!l) {
-		i = 0;
+	if (!l)
 		goto find_first_elem;
-	}
 
 	/* key was found, get next key in the same bucket */
 	next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 41aa664..85ea598 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -579,14 +579,18 @@
 		goto err_put;
 	}
 
-	err = -ENOMEM;
-	key = kmalloc(map->key_size, GFP_USER);
-	if (!key)
-		goto err_put;
+	if (ukey) {
+		err = -ENOMEM;
+		key = kmalloc(map->key_size, GFP_USER);
+		if (!key)
+			goto err_put;
 
-	err = -EFAULT;
-	if (copy_from_user(key, ukey, map->key_size) != 0)
-		goto free_key;
+		err = -EFAULT;
+		if (copy_from_user(key, ukey, map->key_size) != 0)
+			goto free_key;
+	} else {
+		key = NULL;
+	}
 
 	err = -ENOMEM;
 	next_key = kmalloc(map->key_size, GFP_USER);
diff --git a/kernel/cfi.c b/kernel/cfi.c
index 87053e2..6951c25 100644
--- a/kernel/cfi.c
+++ b/kernel/cfi.c
@@ -23,12 +23,12 @@
 #define cfi_slowpath_handler	__cfi_slowpath
 #endif /* CONFIG_CFI_PERMISSIVE */
 
-static inline void handle_cfi_failure()
+static inline void handle_cfi_failure(void *ptr)
 {
 #ifdef CONFIG_CFI_PERMISSIVE
-	WARN_RATELIMIT(1, "CFI failure:\n");
+	WARN_RATELIMIT(1, "CFI failure (target: [<%px>] %pF):\n", ptr, ptr);
 #else
-	pr_err("CFI failure:\n");
+	pr_err("CFI failure (target: [<%px>] %pF):\n", ptr, ptr);
 	BUG();
 #endif
 }
@@ -282,18 +282,18 @@
 	if (likely(check))
 		check(id, ptr, diag);
 	else /* Don't allow unchecked modules */
-		handle_cfi_failure();
+		handle_cfi_failure(ptr);
 }
 EXPORT_SYMBOL(cfi_slowpath_handler);
 #endif /* CONFIG_MODULES */
 
-void cfi_failure_handler(void *data, void *value, void *vtable)
+void cfi_failure_handler(void *data, void *ptr, void *vtable)
 {
-	handle_cfi_failure();
+	handle_cfi_failure(ptr);
 }
 EXPORT_SYMBOL(cfi_failure_handler);
 
-void __cfi_check_fail(void *data, void *value)
+void __cfi_check_fail(void *data, void *ptr)
 {
-	handle_cfi_failure();
+	handle_cfi_failure(ptr);
 }
diff --git a/kernel/cpu.c b/kernel/cpu.c
index c68d150..3f3b7f8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -64,6 +64,12 @@
 
 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
 
+#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
+static struct lock_class_key cpuhp_state_key;
+static struct lockdep_map cpuhp_state_lock_map =
+	STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
+#endif
+
 /**
  * cpuhp_step - Hotplug state machine step
  * @name:	Name of the step
@@ -572,6 +578,7 @@
 
 	st->should_run = false;
 
+	lock_map_acquire(&cpuhp_state_lock_map);
 	/* Single callback invocation for [un]install ? */
 	if (st->single) {
 		if (st->cb_state < CPUHP_AP_ONLINE) {
@@ -603,6 +610,7 @@
 		else if (st->state > st->target)
 			ret = cpuhp_ap_offline(cpu, st);
 	}
+	lock_map_release(&cpuhp_state_lock_map);
 	st->result = ret;
 	complete(&st->done);
 }
@@ -617,6 +625,9 @@
 	if (!cpu_online(cpu))
 		return 0;
 
+	lock_map_acquire(&cpuhp_state_lock_map);
+	lock_map_release(&cpuhp_state_lock_map);
+
 	/*
 	 * If we are up and running, use the hotplug thread. For early calls
 	 * we invoke the thread function directly.
@@ -660,6 +671,8 @@
 	enum cpuhp_state state = st->state;
 
 	trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
+	lock_map_acquire(&cpuhp_state_lock_map);
+	lock_map_release(&cpuhp_state_lock_map);
 	__cpuhp_kick_ap_work(st);
 	wait_for_completion(&st->done);
 	trace_cpuhp_exit(cpu, st->state, state, st->result);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 2a20c0d..5a58421 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1564,6 +1564,7 @@
 	int symbolic = 0;
 	int valid = 0;
 	int phys = 0;
+	int raw = 0;
 
 	kdbgetintenv("MDCOUNT", &mdcount);
 	kdbgetintenv("RADIX", &radix);
@@ -1573,9 +1574,10 @@
 	repeat = mdcount * 16 / bytesperword;
 
 	if (strcmp(argv[0], "mdr") == 0) {
-		if (argc != 2)
+		if (argc == 2 || (argc == 0 && last_addr != 0))
+			valid = raw = 1;
+		else
 			return KDB_ARGCOUNT;
-		valid = 1;
 	} else if (isdigit(argv[0][2])) {
 		bytesperword = (int)(argv[0][2] - '0');
 		if (bytesperword == 0) {
@@ -1611,7 +1613,10 @@
 		radix = last_radix;
 		bytesperword = last_bytesperword;
 		repeat = last_repeat;
-		mdcount = ((repeat * bytesperword) + 15) / 16;
+		if (raw)
+			mdcount = repeat;
+		else
+			mdcount = ((repeat * bytesperword) + 15) / 16;
 	}
 
 	if (argc) {
@@ -1628,7 +1633,10 @@
 			diag = kdbgetularg(argv[nextarg], &val);
 			if (!diag) {
 				mdcount = (int) val;
-				repeat = mdcount * 16 / bytesperword;
+				if (raw)
+					repeat = mdcount;
+				else
+					repeat = mdcount * 16 / bytesperword;
 			}
 		}
 		if (argc >= nextarg+1) {
@@ -1638,8 +1646,15 @@
 		}
 	}
 
-	if (strcmp(argv[0], "mdr") == 0)
-		return kdb_mdr(addr, mdcount);
+	if (strcmp(argv[0], "mdr") == 0) {
+		int ret;
+		last_addr = addr;
+		ret = kdb_mdr(addr, mdcount);
+		last_addr += mdcount;
+		last_repeat = mdcount;
+		last_bytesperword = bytesperword; // to make REPEAT happy
+		return ret;
+	}
 
 	switch (radix) {
 	case 10:
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index e9fdb52..c265f1c 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -117,23 +117,20 @@
 		goto exit;
 	}
 
-	if (count > 1) {
-		/* If the allocation failed, give up */
-		if (!callchain_cpus_entries)
-			err = -ENOMEM;
-		/*
-		 * If requesting per event more than the global cap,
-		 * return a different error to help userspace figure
-		 * this out.
-		 *
-		 * And also do it here so that we have &callchain_mutex held.
-		 */
-		if (event_max_stack > sysctl_perf_event_max_stack)
-			err = -EOVERFLOW;
+	/*
+	 * If requesting per event more than the global cap,
+	 * return a different error to help userspace figure
+	 * this out.
+	 *
+	 * And also do it here so that we have &callchain_mutex held.
+	 */
+	if (event_max_stack > sysctl_perf_event_max_stack) {
+		err = -EOVERFLOW;
 		goto exit;
 	}
 
-	err = alloc_callchain_buffers();
+	if (count == 1)
+		err = alloc_callchain_buffers();
 exit:
 	if (err)
 		atomic_dec(&nr_callchain_events);
@@ -227,12 +224,18 @@
 		}
 
 		if (regs) {
+			mm_segment_t fs;
+
 			if (crosstask)
 				goto exit_put;
 
 			if (add_mark)
 				perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
+
+			fs = get_fs();
+			set_fs(USER_DS);
 			perf_callchain_user(&ctx, regs);
+			set_fs(fs);
 		}
 	}
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index cd941f8..ee74fff 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -486,7 +486,7 @@
 				void __user *buffer, size_t *lenp,
 				loff_t *ppos)
 {
-	int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 
 	if (ret || !write)
 		return ret;
@@ -667,9 +667,15 @@
 
 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
 {
-	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
-	if (cgrp_out)
-		__update_cgrp_time(cgrp_out);
+	struct perf_cgroup *cgrp = cpuctx->cgrp;
+	struct cgroup_subsys_state *css;
+
+	if (cgrp) {
+		for (css = &cgrp->css; css; css = css->parent) {
+			cgrp = container_of(css, struct perf_cgroup, css);
+			__update_cgrp_time(cgrp);
+		}
+	}
 }
 
 static inline void update_cgrp_time_from_event(struct perf_event *event)
@@ -697,6 +703,7 @@
 {
 	struct perf_cgroup *cgrp;
 	struct perf_cgroup_info *info;
+	struct cgroup_subsys_state *css;
 
 	/*
 	 * ctx->lock held by caller
@@ -707,8 +714,12 @@
 		return;
 
 	cgrp = perf_cgroup_from_task(task, ctx);
-	info = this_cpu_ptr(cgrp->info);
-	info->timestamp = ctx->timestamp;
+
+	for (css = &cgrp->css; css; css = css->parent) {
+		cgrp = container_of(css, struct perf_cgroup, css);
+		info = this_cpu_ptr(cgrp->info);
+		info->timestamp = ctx->timestamp;
+	}
 }
 
 #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
@@ -4181,6 +4192,9 @@
 	if (event->ctx)
 		put_ctx(event->ctx);
 
+	if (event->hw.target)
+		put_task_struct(event->hw.target);
+
 	exclusive_event_destroy(event);
 	module_put(event->pmu->module);
 
@@ -5799,9 +5813,6 @@
 	__output_copy(handle, values, n * sizeof(u64));
 }
 
-/*
- * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
- */
 static void perf_output_read_group(struct perf_output_handle *handle,
 			    struct perf_event *event,
 			    u64 enabled, u64 running)
@@ -5819,7 +5830,8 @@
 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 		values[n++] = running;
 
-	if (leader != event)
+	if ((leader != event) &&
+	    (leader->state == PERF_EVENT_STATE_ACTIVE))
 		leader->pmu->read(leader);
 
 	values[n++] = perf_event_count(leader);
@@ -5846,6 +5858,13 @@
 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
 				 PERF_FORMAT_TOTAL_TIME_RUNNING)
 
+/*
+ * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
+ *
+ * The problem is that its both hard and excessively expensive to iterate the
+ * child list, not to mention that its impossible to IPI the children running
+ * on another CPU, from interrupt/NMI context.
+ */
 static void perf_output_read(struct perf_output_handle *handle,
 			     struct perf_event *event)
 {
@@ -9462,6 +9481,7 @@
 		 * and we cannot use the ctx information because we need the
 		 * pmu before we get a ctx.
 		 */
+		get_task_struct(task);
 		event->hw.target = task;
 	}
 
@@ -9511,9 +9531,10 @@
 	local64_set(&hwc->period_left, hwc->sample_period);
 
 	/*
-	 * we currently do not support PERF_FORMAT_GROUP on inherited events
+	 * We currently do not support PERF_SAMPLE_READ on inherited events.
+	 * See perf_output_read().
 	 */
-	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
+	if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
 		goto err_ns;
 
 	if (!has_branch_stack(event))
@@ -9541,8 +9562,10 @@
 		event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
 						   sizeof(unsigned long),
 						   GFP_KERNEL);
-		if (!event->addr_filters_offs)
+		if (!event->addr_filters_offs) {
+			err = -ENOMEM;
 			goto err_per_task;
+		}
 
 		/* force hw sync on the address filters */
 		event->addr_filters_gen = 1;
@@ -9576,6 +9599,8 @@
 		perf_detach_cgroup(event);
 	if (event->ns)
 		put_pid_ns(event->ns);
+	if (event->hw.target)
+		put_task_struct(event->hw.target);
 	kfree(event);
 
 	return ERR_PTR(err);
@@ -9695,9 +9720,9 @@
 		 * __u16 sample size limit.
 		 */
 		if (attr->sample_stack_user >= USHRT_MAX)
-			ret = -EINVAL;
+			return -EINVAL;
 		else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
-			ret = -EINVAL;
+			return -EINVAL;
 	}
 
 	if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 3f8cb1e..253ae2d 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -427,16 +427,9 @@
  * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
  * @bp: the breakpoint structure to modify
  * @attr: new breakpoint attributes
- * @triggered: callback to trigger when we hit the breakpoint
- * @tsk: pointer to 'task_struct' of the process to which the address belongs
  */
 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
 {
-	u64 old_addr = bp->attr.bp_addr;
-	u64 old_len = bp->attr.bp_len;
-	int old_type = bp->attr.bp_type;
-	int err = 0;
-
 	/*
 	 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
 	 * will not be possible to raise IPIs that invoke __perf_event_disable.
@@ -451,27 +444,18 @@
 	bp->attr.bp_addr = attr->bp_addr;
 	bp->attr.bp_type = attr->bp_type;
 	bp->attr.bp_len = attr->bp_len;
+	bp->attr.disabled = 1;
 
-	if (attr->disabled)
-		goto end;
+	if (!attr->disabled) {
+		int err = validate_hw_breakpoint(bp);
 
-	err = validate_hw_breakpoint(bp);
-	if (!err)
+		if (err)
+			return err;
+
 		perf_event_enable(bp);
-
-	if (err) {
-		bp->attr.bp_addr = old_addr;
-		bp->attr.bp_type = old_type;
-		bp->attr.bp_len = old_len;
-		if (!bp->attr.disabled)
-			perf_event_enable(bp);
-
-		return err;
+		bp->attr.disabled = 0;
 	}
 
-end:
-	bp->attr.disabled = attr->disabled;
-
 	return 0;
 }
 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 257fa46..017f793 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/circ_buf.h>
 #include <linux/poll.h>
+#include <linux/nospec.h>
 
 #include "internal.h"
 
@@ -844,8 +845,10 @@
 			return NULL;
 
 		/* AUX space */
-		if (pgoff >= rb->aux_pgoff)
-			return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
+		if (pgoff >= rb->aux_pgoff) {
+			int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
+			return virt_to_page(rb->aux_pages[aux_pgoff]);
+		}
 	}
 
 	return __perf_mmap_to_page(rb, pgoff);
diff --git a/kernel/exit.c b/kernel/exit.c
index 2c2cc1a..ee8c601 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -54,6 +54,7 @@
 #include <linux/writeback.h>
 #include <linux/shm.h>
 #include <linux/kcov.h>
+#include <linux/cpufreq_times.h>
 
 #include "sched/tune.h"
 
@@ -171,6 +172,9 @@
 {
 	struct task_struct *leader;
 	int zap_leader;
+#ifdef CONFIG_CPU_FREQ_TIMES
+	cpufreq_task_times_exit(p);
+#endif
 repeat:
 	/* don't need to get the RCU readlock here - the process is dead and
 	 * can't be modifying its own credentials. But shut RCU-lockdep up */
@@ -713,6 +717,7 @@
 	static DEFINE_SPINLOCK(low_water_lock);
 	static int lowest_to_date = THREAD_SIZE;
 	unsigned long free;
+	int islower = false;
 
 	free = stack_not_used(current);
 
@@ -721,11 +726,15 @@
 
 	spin_lock(&low_water_lock);
 	if (free < lowest_to_date) {
-		pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
-			current->comm, task_pid_nr(current), free);
 		lowest_to_date = free;
+		islower = true;
 	}
 	spin_unlock(&low_water_lock);
+
+	if (islower) {
+		pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
+				current->comm, task_pid_nr(current), free);
+	}
 }
 #else
 static inline void check_stack_usage(void) {}
@@ -1676,6 +1685,10 @@
 			__WNOTHREAD|__WCLONE|__WALL))
 		return -EINVAL;
 
+	/* -INT_MIN is not defined */
+	if (upid == INT_MIN)
+		return -ESRCH;
+
 	if (upid == -1)
 		type = PIDTYPE_MAX;
 	else if (upid < 0) {
diff --git a/kernel/fork.c b/kernel/fork.c
index 79fdfd8..965c091 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -614,7 +614,7 @@
 		if (!tmp)
 			goto fail_nomem;
 		*tmp = *mpnt;
-		INIT_LIST_HEAD(&tmp->anon_vma_chain);
+		INIT_VMA(tmp);
 		retval = vma_dup_policy(mpnt, tmp);
 		if (retval)
 			goto fail_nomem_policy;
@@ -764,6 +764,9 @@
 	mm->mmap = NULL;
 	mm->mm_rb = RB_ROOT;
 	mm->vmacache_seqnum = 0;
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+	rwlock_init(&mm->mm_rb_lock);
+#endif
 	atomic_set(&mm->mm_users, 1);
 	atomic_set(&mm->mm_count, 1);
 	init_rwsem(&mm->mmap_sem);
diff --git a/kernel/futex.c b/kernel/futex.c
index bb2265a..c3ea6f2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1458,6 +1458,45 @@
 	return ret;
 }
 
+static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
+{
+	unsigned int op =	  (encoded_op & 0x70000000) >> 28;
+	unsigned int cmp =	  (encoded_op & 0x0f000000) >> 24;
+	int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
+	int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
+	int oldval, ret;
+
+	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
+		if (oparg < 0 || oparg > 31)
+			return -EINVAL;
+		oparg = 1 << oparg;
+	}
+
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+		return -EFAULT;
+
+	ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
+	if (ret)
+		return ret;
+
+	switch (cmp) {
+	case FUTEX_OP_CMP_EQ:
+		return oldval == cmparg;
+	case FUTEX_OP_CMP_NE:
+		return oldval != cmparg;
+	case FUTEX_OP_CMP_LT:
+		return oldval < cmparg;
+	case FUTEX_OP_CMP_GE:
+		return oldval >= cmparg;
+	case FUTEX_OP_CMP_LE:
+		return oldval <= cmparg;
+	case FUTEX_OP_CMP_GT:
+		return oldval > cmparg;
+	default:
+		return -ENOSYS;
+	}
+}
+
 /*
  * Wake up all waiters hashed on the physical page that is mapped
  * to this virtual address:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c1195eb..c93d4df 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -853,7 +853,7 @@
 	 * This code is triggered unconditionally. Check the affinity
 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
 	 */
-	if (desc->irq_common_data.affinity)
+	if (cpumask_available(desc->irq_common_data.affinity))
 		cpumask_copy(mask, desc->irq_common_data.affinity);
 	else
 		valid = false;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 5616755..f5ab72e 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -38,6 +38,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/compiler.h>
 #include <linux/hugetlb.h>
+#include <linux/frame.h>
 
 #include <asm/page.h>
 #include <asm/sections.h>
@@ -878,7 +879,7 @@
  * only when panic_cpu holds the current CPU number; this is the only CPU
  * which processes crash_kexec routines.
  */
-void __crash_kexec(struct pt_regs *regs)
+void __noclone __crash_kexec(struct pt_regs *regs)
 {
 	/* Take the kexec_mutex here to prevent sys_kexec_load
 	 * running on one cpu from replacing the crash kernel
@@ -900,6 +901,7 @@
 		mutex_unlock(&kexec_mutex);
 	}
 }
+STACK_FRAME_NON_STANDARD(__crash_kexec);
 
 void crash_kexec(struct pt_regs *regs)
 {
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a1a07cf..6948518 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -125,7 +125,7 @@
 	return module_alloc(PAGE_SIZE);
 }
 
-static void free_insn_page(void *page)
+void __weak free_insn_page(void *page)
 {
 	module_memfree(page);
 }
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 19248dd..c7471c3 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -20,51 +20,14 @@
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/spinlock.h>
 #include <asm/qrwlock.h>
 
-/*
- * This internal data structure is used for optimizing access to some of
- * the subfields within the atomic_t cnts.
- */
-struct __qrwlock {
-	union {
-		atomic_t cnts;
-		struct {
-#ifdef __LITTLE_ENDIAN
-			u8 wmode;	/* Writer mode   */
-			u8 rcnts[3];	/* Reader counts */
-#else
-			u8 rcnts[3];	/* Reader counts */
-			u8 wmode;	/* Writer mode   */
-#endif
-		};
-	};
-	arch_spinlock_t	lock;
-};
-
-/**
- * rspin_until_writer_unlock - inc reader count & spin until writer is gone
- * @lock  : Pointer to queue rwlock structure
- * @writer: Current queue rwlock writer status byte
- *
- * In interrupt context or at the head of the queue, the reader will just
- * increment the reader count & wait until the writer releases the lock.
- */
-static __always_inline void
-rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
-{
-	while ((cnts & _QW_WMASK) == _QW_LOCKED) {
-		cpu_relax_lowlatency();
-		cnts = atomic_read_acquire(&lock->cnts);
-	}
-}
-
 /**
  * queued_read_lock_slowpath - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
- * @cnts: Current qrwlock lock value
  */
-void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
+void queued_read_lock_slowpath(struct qrwlock *lock)
 {
 	/*
 	 * Readers come here when they cannot get the lock without waiting
@@ -72,13 +35,11 @@
 	if (unlikely(in_interrupt())) {
 		/*
 		 * Readers in interrupt context will get the lock immediately
-		 * if the writer is just waiting (not holding the lock yet).
-		 * The rspin_until_writer_unlock() function returns immediately
-		 * in this case. Otherwise, they will spin (with ACQUIRE
-		 * semantics) until the lock is available without waiting in
-		 * the queue.
+		 * if the writer is just waiting (not holding the lock yet),
+		 * so spin with ACQUIRE semantics until the lock is available
+		 * without waiting in the queue.
 		 */
-		rspin_until_writer_unlock(lock, cnts);
+		atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
 		return;
 	}
 	atomic_sub(_QR_BIAS, &lock->cnts);
@@ -87,14 +48,14 @@
 	 * Put the reader into the wait queue
 	 */
 	arch_spin_lock(&lock->wait_lock);
+	atomic_add(_QR_BIAS, &lock->cnts);
 
 	/*
 	 * The ACQUIRE semantics of the following spinning code ensure
 	 * that accesses can't leak upwards out of our subsequent critical
 	 * section in the case that the lock is currently held for write.
 	 */
-	cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts);
-	rspin_until_writer_unlock(lock, cnts);
+	atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
 
 	/*
 	 * Signal the next one in queue to become queue head
@@ -109,8 +70,6 @@
  */
 void queued_write_lock_slowpath(struct qrwlock *lock)
 {
-	u32 cnts;
-
 	/* Put the writer into the wait queue */
 	arch_spin_lock(&lock->wait_lock);
 
@@ -119,30 +78,14 @@
 	    (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
 		goto unlock;
 
-	/*
-	 * Set the waiting flag to notify readers that a writer is pending,
-	 * or wait for a previous writer to go away.
-	 */
-	for (;;) {
-		struct __qrwlock *l = (struct __qrwlock *)lock;
+	/* Set the waiting flag to notify readers that a writer is pending */
+	atomic_add(_QW_WAITING, &lock->cnts);
 
-		if (!READ_ONCE(l->wmode) &&
-		   (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
-			break;
-
-		cpu_relax_lowlatency();
-	}
-
-	/* When no more readers, set the locked flag */
-	for (;;) {
-		cnts = atomic_read(&lock->cnts);
-		if ((cnts == _QW_WAITING) &&
-		    (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
-					    _QW_LOCKED) == _QW_WAITING))
-			break;
-
-		cpu_relax_lowlatency();
-	}
+	/* When no more readers or writers, set the locked flag */
+	do {
+		atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
+	} while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
+					_QW_LOCKED) != _QW_WAITING);
 unlock:
 	arch_spin_unlock(&lock->wait_lock);
 }
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index b2caec7..a72f5df 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -495,6 +495,14 @@
 	tail = encode_tail(smp_processor_id(), idx);
 
 	node += idx;
+
+	/*
+	 * Ensure that we increment the head node->count before initialising
+	 * the actual node. If the compiler is kind enough to reorder these
+	 * stores, then an IRQ could overwrite our assignments.
+	 */
+	barrier();
+
 	node->locked = 0;
 	node->next = NULL;
 	pv_init_node(node);
diff --git a/kernel/module.c b/kernel/module.c
index dec20c7..13ad2f8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1911,6 +1911,9 @@
 /* livepatching wants to disable read-only so it can frob module. */
 void module_disable_ro(const struct module *mod)
 {
+	if (!rodata_enabled)
+		return;
+
 	frob_text(&mod->core_layout, set_memory_rw);
 	frob_rodata(&mod->core_layout, set_memory_rw);
 	frob_ro_after_init(&mod->core_layout, set_memory_rw);
@@ -1920,6 +1923,9 @@
 
 void module_enable_ro(const struct module *mod, bool after_init)
 {
+	if (!rodata_enabled)
+		return;
+
 	frob_text(&mod->core_layout, set_memory_ro);
 	frob_rodata(&mod->core_layout, set_memory_ro);
 	frob_text(&mod->init_layout, set_memory_ro);
@@ -1952,6 +1958,9 @@
 {
 	struct module *mod;
 
+	if (!rodata_enabled)
+		return;
+
 	mutex_lock(&module_mutex);
 	list_for_each_entry_rcu(mod, &modules, list) {
 		if (mod->state == MODULE_STATE_UNFORMED)
@@ -1968,6 +1977,9 @@
 {
 	struct module *mod;
 
+	if (!rodata_enabled)
+		return;
+
 	mutex_lock(&module_mutex);
 	list_for_each_entry_rcu(mod, &modules, list) {
 		if (mod->state == MODULE_STATE_UNFORMED)
@@ -1981,10 +1993,12 @@
 
 static void disable_ro_nx(const struct module_layout *layout)
 {
-	frob_text(layout, set_memory_rw);
-	frob_rodata(layout, set_memory_rw);
+	if (rodata_enabled) {
+		frob_text(layout, set_memory_rw);
+		frob_rodata(layout, set_memory_rw);
+		frob_ro_after_init(layout, set_memory_rw);
+	}
 	frob_rodata(layout, set_memory_x);
-	frob_ro_after_init(layout, set_memory_rw);
 	frob_ro_after_init(layout, set_memory_x);
 	frob_writable_data(layout, set_memory_x);
 }
diff --git a/kernel/pid.c b/kernel/pid.c
index 693a643..fa704f8 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -322,8 +322,10 @@
 	}
 
 	if (unlikely(is_child_reaper(pid))) {
-		if (pid_ns_prepare_proc(ns))
+		if (pid_ns_prepare_proc(ns)) {
+			disable_pid_allocation(ns);
 			goto out_free;
+		}
 	}
 
 	get_pid_ns(ns);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index a29eaee..bf60b37 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -94,6 +94,16 @@
 	  For more details, refer to the description of CONFIG_HIBERNATION
 	  for booting without resuming.
 
+config HIBERNATION_SKIP_CRC
+	bool "Skip LZO image CRC check"
+	default n
+	depends on HIBERNATION
+	---help---
+	  Some filesystem devices may have hw based integrity checks. In this
+	  scenario, repeating the integrity check in software is unnecessary
+	  and wasteful. This config option has no effect if uncompressed
+	  hibernation images are used.
+
 config ARCH_SAVE_PAGE_KEYS
 	bool
 
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 56d1d0d..ccba4d8 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -103,9 +103,6 @@
 extern dev_t swsusp_resume_device;
 extern sector_t swsusp_resume_block;
 
-extern asmlinkage int swsusp_arch_suspend(void);
-extern asmlinkage int swsusp_arch_resume(void);
-
 extern int create_basic_memory_bitmaps(void);
 extern void free_basic_memory_bitmaps(void);
 extern int hibernate_preallocate_memory(void);
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 0469d80..d6a0b0f 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -478,8 +478,6 @@
 	val = c->default_value;
 
 	for_each_cpu(cpu, mask) {
-		if (cpu_isolated(cpu))
-			continue;
 
 		switch (c->type) {
 		case PM_QOS_MIN:
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 95db6b79..1f6aef2 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -606,9 +606,10 @@
 		}
 		atomic_set(&d->ready, 0);
 
-		for (i = 0; i < d->run_threads; i++)
-			*d->crc32 = crc32_le(*d->crc32,
-			                     d->unc[i], *d->unc_len[i]);
+		if (!IS_ENABLED(CONFIG_HIBERNATION_SKIP_CRC))
+			for (i = 0; i < d->run_threads; i++)
+				*d->crc32 = crc32_le(*d->crc32,
+						d->unc[i], *d->unc_len[i]);
 		atomic_set(&d->stop, 1);
 		wake_up(&d->done);
 	}
@@ -1455,7 +1456,8 @@
 		if (!snapshot_image_loaded(snapshot))
 			ret = -ENODATA;
 		if (!ret) {
-			if (swsusp_header->flags & SF_CRC32_MODE) {
+			if ((swsusp_header->flags & SF_CRC32_MODE) &&
+			    (!IS_ENABLED(CONFIG_HIBERNATION_SKIP_CRC))) {
 				if(handle->crc32 != swsusp_header->crc32) {
 					printk(KERN_ERR
 					       "PM: Invalid image CRC32!\n");
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 3012f95..396b020 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2172,6 +2172,8 @@
 	console_unlock();
 }
 
+#ifdef CONFIG_CONSOLE_FLUSH_ON_HOTPLUG
+
 /**
  * console_cpu_notify - print deferred console messages after CPU hotplug
  * @self: notifier struct
@@ -2197,6 +2199,8 @@
 	return NOTIFY_OK;
 }
 
+#endif
+
 /**
  * console_lock - lock the console system for exclusive use.
  *
@@ -2850,7 +2854,9 @@
 				unregister_console(con);
 		}
 	}
+#ifdef CONFIG_CONSOLE_FLUSH_ON_HOTPLUG
 	hotcpu_notifier(console_cpu_notify, 0);
+#endif
 	return 0;
 }
 late_initcall(printk_late_init);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index e3944c4..554ea54 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -521,8 +521,14 @@
 	}
 	t = list_entry(rnp->gp_tasks->prev,
 		       struct task_struct, rcu_node_entry);
-	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
+	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+		/*
+		 * We could be printing a lot while holding a spinlock.
+		 * Avoid triggering hard lockup.
+		 */
+		touch_nmi_watchdog();
 		sched_show_task(t);
+	}
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
@@ -1629,6 +1635,12 @@
 	char *ticks_title;
 	unsigned long ticks_value;
 
+	/*
+	 * We could be printing a lot while holding a spinlock.  Avoid
+	 * triggering hard lockup.
+	 */
+	touch_nmi_watchdog();
+
 	if (rsp->gpnum == rdp->gpnum) {
 		ticks_title = "ticks this GP";
 		ticks_value = rdp->ticks_this_gp;
diff --git a/kernel/relay.c b/kernel/relay.c
index 2603e04..91e8fbf 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -163,7 +163,7 @@
 {
 	struct rchan_buf *buf;
 
-	if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
+	if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
 		return NULL;
 
 	buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
diff --git a/kernel/resource.c b/kernel/resource.c
index 89778a3..fbea5af 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -633,7 +633,8 @@
 			alloc.start = constraint->alignf(constraint->alignf_data, &avail,
 					size, constraint->align);
 			alloc.end = alloc.start + size - 1;
-			if (resource_contains(&avail, &alloc)) {
+			if (alloc.start <= alloc.end &&
+			    resource_contains(&avail, &alloc)) {
 				new->start = alloc.start;
 				new->end = alloc.end;
 				return 0;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 93c00e9..b755eb8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -76,6 +76,7 @@
 #include <linux/frame.h>
 #include <linux/prefetch.h>
 #include <linux/irq.h>
+#include <linux/cpufreq_times.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -2171,7 +2172,7 @@
 	rq = cpu_rq(task_cpu(p));
 	raw_spin_lock(&rq->lock);
 	old_load = task_load(p);
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
 	raw_spin_unlock(&rq->lock);
@@ -2258,7 +2259,7 @@
 	trace_sched_waking(p);
 
 	if (!task_on_rq_queued(p)) {
-		u64 wallclock = ktime_get_ns();
+		u64 wallclock = sched_ktime_clock();
 
 		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
@@ -2307,6 +2308,7 @@
 	dl_se->dl_period = 0;
 	dl_se->flags = 0;
 	dl_se->dl_bw = 0;
+	dl_se->dl_density = 0;
 
 	dl_se->dl_throttled = 0;
 	dl_se->dl_yielded = 0;
@@ -2342,6 +2344,10 @@
 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
 #endif
 
+#ifdef CONFIG_CPU_FREQ_TIMES
+	cpufreq_task_times_init(p);
+#endif
+
 	RB_CLEAR_NODE(&p->dl.rb_node);
 	init_dl_task_timer(&p->dl);
 	__dl_clear_params(p);
@@ -3255,7 +3261,7 @@
 	old_load = task_load(curr);
 	set_window_start(rq);
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 
 	update_rq_clock(rq);
@@ -3627,7 +3633,7 @@
 	clear_preempt_need_resched();
 	rq->clock_skip_update = 0;
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 	if (likely(prev != next)) {
 		if (!prev->on_rq)
 			prev->last_sleep_ts = wallclock;
@@ -4152,6 +4158,7 @@
 	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
 	dl_se->flags = attr->sched_flags;
 	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
+	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
 
 	/*
 	 * Changing the parameters of a task is 'tricky' and we're not doing
@@ -9604,7 +9611,7 @@
 	rq = task_rq_lock(p, &rf);
 
 	/* rq->curr == p */
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	dequeue_task(rq, p, 0);
 	/*
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 26c9cf4..bd64b1a 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -789,6 +789,7 @@
 	unsigned long flags;
 	unsigned int num_cpus = cluster->num_cpus;
 	unsigned int nr_isolated = 0;
+	bool first_pass = cluster->nr_not_preferred_cpus;
 
 	/*
 	 * Protect against entry being removed (and added at tail) by other
@@ -834,6 +835,7 @@
 	cluster->nr_isolated_cpus += nr_isolated;
 	spin_unlock_irqrestore(&state_lock, flags);
 
+again:
 	/*
 	 * If the number of active CPUs is within the limits, then
 	 * don't force isolation of any busy CPUs.
@@ -853,6 +855,9 @@
 		if (cluster->active_cpus <= cluster->max_cpus)
 			break;
 
+		if (first_pass && !c->not_preferred)
+			continue;
+
 		spin_unlock_irqrestore(&state_lock, flags);
 
 		pr_debug("Trying to isolate CPU%u\n", c->cpu);
@@ -869,6 +874,10 @@
 	cluster->nr_isolated_cpus += nr_isolated;
 	spin_unlock_irqrestore(&state_lock, flags);
 
+	if (first_pass && cluster->active_cpus > cluster->max_cpus) {
+		first_pass = false;
+		goto again;
+	}
 }
 
 static void __try_to_unisolate(struct cluster_data *cluster,
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 895c63f..b71f116 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -466,7 +466,7 @@
 	mutex_lock(&sg_policy->work_lock);
 	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
 	sugov_track_cycles(sg_policy, sg_policy->policy->cur,
-			   ktime_get_ns());
+			   sched_ktime_clock());
 	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 	__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
 				CPUFREQ_RELATION_L);
@@ -919,7 +919,7 @@
 		mutex_lock(&sg_policy->work_lock);
 		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
 		sugov_track_cycles(sg_policy, sg_policy->policy->cur,
-				   ktime_get_ns());
+				   sched_ktime_clock());
 		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 		cpufreq_policy_apply_limits(policy);
 		mutex_unlock(&sg_policy->work_lock);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 87bea1e..aec86a26 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -133,6 +133,8 @@
 
 		if (lowest_mask) {
 			cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask);
+			cpumask_andnot(lowest_mask, lowest_mask,
+				       cpu_isolated_mask);
 			if (drop_nopreempts)
 				drop_nopreempt_cpus(lowest_mask);
 			/*
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index e4f48f1..50f2ea8 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -4,6 +4,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/static_key.h>
 #include <linux/context_tracking.h>
+#include <linux/cpufreq_times.h>
 #include "sched.h"
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
@@ -157,6 +158,11 @@
 
 	/* Account for user time used */
 	acct_account_cputime(p);
+
+#ifdef CONFIG_CPU_FREQ_TIMES
+	/* Account power usage for user time */
+	cpufreq_acct_update_power(p, cputime);
+#endif
 }
 
 /*
@@ -207,6 +213,10 @@
 
 	/* Account for system time used */
 	acct_account_cputime(p);
+#ifdef CONFIG_CPU_FREQ_TIMES
+	/* Account power usage for system time */
+	cpufreq_acct_update_power(p, cputime);
+#endif
 }
 
 /*
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0a295ac..d7c7dd6 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -485,13 +485,84 @@
 }
 
 /*
- * When a -deadline entity is queued back on the runqueue, its runtime and
- * deadline might need updating.
+ * Revised wakeup rule [1]: For self-suspending tasks, rather then
+ * re-initializing task's runtime and deadline, the revised wakeup
+ * rule adjusts the task's runtime to avoid the task to overrun its
+ * density.
  *
- * The policy here is that we update the deadline of the entity only if:
- *  - the current deadline is in the past,
- *  - using the remaining runtime with the current deadline would make
- *    the entity exceed its bandwidth.
+ * Reasoning: a task may overrun the density if:
+ *    runtime / (deadline - t) > dl_runtime / dl_deadline
+ *
+ * Therefore, runtime can be adjusted to:
+ *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
+ *
+ * In such way that runtime will be equal to the maximum density
+ * the task can use without breaking any rule.
+ *
+ * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
+ * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
+ */
+static void
+update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
+{
+	u64 laxity = dl_se->deadline - rq_clock(rq);
+
+	/*
+	 * If the task has deadline < period, and the deadline is in the past,
+	 * it should already be throttled before this check.
+	 *
+	 * See update_dl_entity() comments for further details.
+	 */
+	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
+
+	dl_se->runtime = (dl_se->dl_density * laxity) >> 20;
+}
+
+/*
+ * Regarding the deadline, a task with implicit deadline has a relative
+ * deadline == relative period. A task with constrained deadline has a
+ * relative deadline <= relative period.
+ *
+ * We support constrained deadline tasks. However, there are some restrictions
+ * applied only for tasks which do not have an implicit deadline. See
+ * update_dl_entity() to know more about such restrictions.
+ *
+ * The dl_is_implicit() returns true if the task has an implicit deadline.
+ */
+static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
+{
+	return dl_se->dl_deadline == dl_se->dl_period;
+}
+
+/*
+ * When a deadline entity is placed in the runqueue, its runtime and deadline
+ * might need to be updated. This is done by a CBS wake up rule. There are two
+ * different rules: 1) the original CBS; and 2) the Revisited CBS.
+ *
+ * When the task is starting a new period, the Original CBS is used. In this
+ * case, the runtime is replenished and a new absolute deadline is set.
+ *
+ * When a task is queued before the begin of the next period, using the
+ * remaining runtime and deadline could make the entity to overflow, see
+ * dl_entity_overflow() to find more about runtime overflow. When such case
+ * is detected, the runtime and deadline need to be updated.
+ *
+ * If the task has an implicit deadline, i.e., deadline == period, the Original
+ * CBS is applied. the runtime is replenished and a new absolute deadline is
+ * set, as in the previous cases.
+ *
+ * However, the Original CBS does not work properly for tasks with
+ * deadline < period, which are said to have a constrained deadline. By
+ * applying the Original CBS, a constrained deadline task would be able to run
+ * runtime/deadline in a period. With deadline < period, the task would
+ * overrun the runtime/period allowed bandwidth, breaking the admission test.
+ *
+ * In order to prevent this misbehave, the Revisited CBS is used for
+ * constrained deadline tasks when a runtime overflow is detected. In the
+ * Revisited CBS, rather than replenishing & setting a new absolute deadline,
+ * the remaining runtime of the task is reduced to avoid runtime overflow.
+ * Please refer to the comments update_dl_revised_wakeup() function to find
+ * more about the Revised CBS rule.
  */
 static void update_dl_entity(struct sched_dl_entity *dl_se,
 			     struct sched_dl_entity *pi_se)
@@ -501,6 +572,14 @@
 
 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
+
+		if (unlikely(!dl_is_implicit(dl_se) &&
+			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
+			     !dl_se->dl_boosted)){
+			update_dl_revised_wakeup(dl_se, rq);
+			return;
+		}
+
 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 		dl_se->runtime = pi_se->dl_runtime;
 	}
@@ -964,11 +1043,6 @@
 	__dequeue_dl_entity(dl_se);
 }
 
-static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
-{
-	return dl_se->dl_deadline < dl_se->dl_period;
-}
-
 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 {
 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -1000,7 +1074,7 @@
 	 * If that is the case, the task will be throttled and
 	 * the replenishment timer will be set to the next period.
 	 */
-	if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
+	if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
 		dl_check_constrained_dl(&p->dl);
 
 	/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 04ba6d0..38fed13 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2511,7 +2511,8 @@
 		return;
 
 
-	down_read(&mm->mmap_sem);
+	if (!down_read_trylock(&mm->mmap_sem))
+		return;
 	vma = find_vma(mm, start);
 	if (!vma) {
 		reset_ptenuma_scan(p);
@@ -5709,6 +5710,9 @@
 	for_each_cpu(i, sched_group_cpus(sg))
 		state = min(state, idle_get_state_idx(cpu_rq(i)));
 
+	if (unlikely(state == INT_MAX))
+		return -EINVAL;
+
 	/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
 	state++;
 
@@ -5775,7 +5779,7 @@
  * The required scaling will be performed just one time, by the calling
  * functions, once we accumulated the contributons for all the SGs.
  */
-static void calc_sg_energy(struct energy_env *eenv)
+static int calc_sg_energy(struct energy_env *eenv)
 {
 	struct sched_group *sg = eenv->sg;
 	int busy_energy, idle_energy;
@@ -5804,6 +5808,11 @@
 
 		/* Compute IDLE energy */
 		idle_idx = group_idle_state(eenv, cpu_idx);
+		if (unlikely(idle_idx < 0))
+			return idle_idx;
+		if (idle_idx > sg->sge->nr_idle_states - 1)
+			idle_idx = sg->sge->nr_idle_states - 1;
+
 		idle_power = sg->sge->idle_states[idle_idx].power;
 
 		idle_energy   = SCHED_CAPACITY_SCALE - sg_util;
@@ -5812,6 +5821,7 @@
 		total_energy = busy_energy + idle_energy;
 		eenv->cpu[cpu_idx].energy += total_energy;
 	}
+	return 0;
 }
 
 /*
@@ -5873,7 +5883,8 @@
 				 * CPUs in the current visited SG.
 				 */
 				eenv->sg = sg;
-				calc_sg_energy(eenv);
+				if (calc_sg_energy(eenv))
+					return -EINVAL;
 
 				/* remove CPUs we have just visited */
 				if (!sd->child) {
@@ -6154,7 +6165,8 @@
 
 bool __cpu_overutilized(int cpu, int delta)
 {
-	return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
+	return (capacity_orig_of(cpu) * 1024) <
+			((cpu_util(cpu) + delta) * capacity_margin);
 }
 
 bool cpu_overutilized(int cpu)
@@ -6259,7 +6271,7 @@
 
 static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
 {
-	return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
+	return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0);
 }
 
 /*
@@ -6871,6 +6883,13 @@
 	if (!sg->group_weight)
 		return true;
 
+	/*
+	 * Don't skip a group if a task affinity allows it
+	 * to run only on that group.
+	 */
+	if (cpumask_subset(tsk_cpus_allowed(p), sched_group_cpus(sg)))
+		return false;
+
 	if (!task_fits_max(p, fcpu))
 		return true;
 
@@ -6910,6 +6929,7 @@
 	int target_cpu = -1;
 	int cpu, i;
 	unsigned int active_cpus_count = 0;
+	int isolated_candidate = -1;
 
 	*backup_cpu = -1;
 
@@ -6955,13 +6975,16 @@
 			unsigned long wake_util, new_util, min_capped_util;
 
 			cpumask_clear_cpu(i, &search_cpus);
+
+			if (!cpu_online(i) || cpu_isolated(i))
+				continue;
+
+			isolated_candidate = i;
+
 			if (avoid_prev_cpu && i == task_cpu(p))
 				continue;
 
-			if (!cpu_online(i) || cpu_isolated(i) || is_reserved(i))
-				continue;
-
-			if (walt_cpu_high_irqload(i))
+			if (walt_cpu_high_irqload(i) || is_reserved(i))
 				continue;
 
 			trace_sched_cpu_util(i);
@@ -7247,6 +7270,12 @@
 		? best_active_cpu
 		: best_idle_cpu;
 
+	if (target_cpu == -1 && cpu_isolated(task_cpu(p)) &&
+			isolated_candidate != -1) {
+		target_cpu = isolated_candidate;
+		fbt_env->avoid_prev_cpu = true;
+	}
+
 	trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
 				     best_idle_cpu, best_active_cpu,
 				     target_cpu);
@@ -7357,6 +7386,12 @@
 }
 #endif
 
+enum fastpaths {
+	NONE = 0,
+	SYNC_WAKEUP,
+	PREV_CPU_BIAS,
+};
+
 static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
 {
 	bool boosted, prefer_idle;
@@ -7367,6 +7402,7 @@
 	struct cpumask *rtg_target = find_rtg_target(p);
 	struct find_best_target_env fbt_env;
 	u64 start_t = 0;
+	int fastpath = 0;
 
 	if (trace_sched_task_util_enabled())
 		start_t = sched_clock();
@@ -7403,12 +7439,17 @@
 		if (bias_to_waker_cpu(p, cpu, rtg_target)) {
 			schedstat_inc(p->se.statistics.nr_wakeups_secb_sync);
 			schedstat_inc(this_rq()->eas_stats.secb_sync);
-			return cpu;
+			target_cpu = cpu;
+			fastpath = SYNC_WAKEUP;
+			goto out;
 		}
 	}
 
-	if (bias_to_prev_cpu(p, rtg_target))
-		return prev_cpu;
+	if (bias_to_prev_cpu(p, rtg_target)) {
+		target_cpu = prev_cpu;
+		fastpath = PREV_CPU_BIAS;
+		goto out;
+	}
 
 	rcu_read_lock();
 
@@ -7495,11 +7536,12 @@
 	schedstat_inc(this_rq()->eas_stats.secb_count);
 
 unlock:
-	trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
-			      fbt_env.need_idle, fbt_env.placement_boost,
-			      rtg_target ? cpumask_first(rtg_target) : -1,
-			      start_t);
 	rcu_read_unlock();
+out:
+	trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
+			      fbt_env.need_idle, fastpath,
+			      fbt_env.placement_boost, rtg_target ?
+			      cpumask_first(rtg_target) : -1, start_t);
 	return target_cpu;
 }
 
@@ -8790,13 +8832,12 @@
 	int max_cap_cpu;
 	unsigned long flags;
 
-	capacity = min(capacity, thermal_cap(cpu));
-
-	cpu_rq(cpu)->cpu_capacity_orig = capacity;
-
 	capacity *= arch_scale_max_freq_capacity(sd, cpu);
 	capacity >>= SCHED_CAPACITY_SHIFT;
 
+	capacity = min(capacity, thermal_cap(cpu));
+	cpu_rq(cpu)->cpu_capacity_orig = capacity;
+
 	mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
 
 	raw_spin_lock_irqsave(&mcc->lock, flags);
@@ -9668,8 +9709,11 @@
 	if (busiest->group_type == group_imbalanced)
 		goto force_balance;
 
-	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
-	if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
+	/*
+	 * When dst_cpu is idle, prevent SMP nice and/or asymmetric group
+	 * capacities from resulting in underutilization due to avg_load.
+	 */
+	if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
 	    busiest->group_no_capacity)
 		goto force_balance;
 
@@ -11779,7 +11823,7 @@
 	if (is_max_capacity_cpu(src_cpu))
 		return;
 
-	wc = ktime_get_ns();
+	wc = sched_ktime_clock();
 	for_each_possible_cpu(i) {
 		struct rq *rq = cpu_rq(i);
 
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7a32e5a..44c767a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -839,6 +839,8 @@
 		struct rq *rq = rq_of_rt_rq(rt_rq);
 
 		raw_spin_lock(&rq->lock);
+		update_rq_clock(rq);
+
 		if (rt_rq->rt_time) {
 			u64 runtime;
 
@@ -1834,7 +1836,7 @@
 			if (avoid_prev_cpu && cpu == prev_cpu)
 				continue;
 
-			if (__cpu_overutilized(cpu, util + tutil))
+			if (__cpu_overutilized(cpu, tutil))
 				continue;
 
 			if (cpu_isolated(cpu))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 61aa3c7..e24df36 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1861,7 +1861,7 @@
 }
 
 #ifdef CONFIG_SCHED_WALT
-extern atomic64_t walt_irq_work_lastq_ws;
+extern u64 walt_load_reported_window;
 
 static inline unsigned long
 cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
@@ -1899,7 +1899,7 @@
 		walt_load->prev_window_util = util;
 		walt_load->nl = nl;
 		walt_load->pl = pl;
-		walt_load->ws = atomic64_read(&walt_irq_work_lastq_ws);
+		walt_load->ws = walt_load_reported_window;
 	}
 
 	return (util >= capacity) ? capacity : util;
@@ -2234,8 +2234,13 @@
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 #ifdef CONFIG_SCHED_WALT
+u64 sched_ktime_clock(void);
 void note_task_waking(struct task_struct *p, u64 wallclock);
 #else /* CONFIG_SCHED_WALT */
+static inline u64 sched_ktime_clock(void)
+{
+	return 0;
+}
 static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
 #endif /* CONFIG_SCHED_WALT */
 
@@ -2276,7 +2281,7 @@
 	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
 					cpu_of(rq)));
 	if (data)
-		data->func(data, ktime_get_ns(), flags);
+		data->func(data, sched_ktime_clock(), flags);
 }
 
 static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index c8f6e00..4fe1181 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -19,6 +19,7 @@
  *             and Todd Kjos
  */
 
+#include <linux/syscore_ops.h>
 #include <linux/cpufreq.h>
 #include <linux/list_sort.h>
 #include <linux/jiffies.h>
@@ -41,14 +42,48 @@
 
 #define EARLY_DETECTION_DURATION 9500000
 
+static ktime_t ktime_last;
+static bool sched_ktime_suspended;
 static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
 static bool use_cycle_counter;
 DEFINE_MUTEX(cluster_lock);
-atomic64_t walt_irq_work_lastq_ws;
+static atomic64_t walt_irq_work_lastq_ws;
+u64 walt_load_reported_window;
 
 static struct irq_work walt_cpufreq_irq_work;
 static struct irq_work walt_migration_irq_work;
 
+u64 sched_ktime_clock(void)
+{
+	if (unlikely(sched_ktime_suspended))
+		return ktime_to_ns(ktime_last);
+	return ktime_get_ns();
+}
+
+static void sched_resume(void)
+{
+	sched_ktime_suspended = false;
+}
+
+static int sched_suspend(void)
+{
+	ktime_last = ktime_get();
+	sched_ktime_suspended = true;
+	return 0;
+}
+
+static struct syscore_ops sched_syscore_ops = {
+	.resume	= sched_resume,
+	.suspend = sched_suspend
+};
+
+static int __init sched_init_ops(void)
+{
+	register_syscore_ops(&sched_syscore_ops);
+	return 0;
+}
+late_initcall(sched_init_ops);
+
 static void acquire_rq_locks_irqsave(const cpumask_t *cpus,
 				     unsigned long *flags)
 {
@@ -269,12 +304,7 @@
 	u64 old_window_start = rq->window_start;
 
 	delta = wallclock - rq->window_start;
-	/* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
-	if (delta < 0) {
-		delta = 0;
-		WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n");
-	}
-
+	BUG_ON(delta < 0);
 	if (delta < sched_ravg_window)
 		return old_window_start;
 
@@ -362,10 +392,17 @@
 	if (!rq->window_start || sched_disable_window_stats)
 		return;
 
+	/*
+	 * We don’t have to note down an irqstart event when cycle
+	 * counter is not used.
+	 */
+	if (!use_cycle_counter)
+		return;
+
 	if (is_idle_task(curr)) {
 		/* We're here without rq->lock held, IRQ disabled */
 		raw_spin_lock(&rq->lock);
-		update_task_cpu_cycles(curr, cpu, ktime_get_ns());
+		update_task_cpu_cycles(curr, cpu, sched_ktime_clock());
 		raw_spin_unlock(&rq->lock);
 	}
 }
@@ -426,7 +463,7 @@
 	cur_jiffies_ts = get_jiffies_64();
 
 	if (is_idle_task(curr))
-		update_task_ravg(curr, rq, IRQ_UPDATE, ktime_get_ns(),
+		update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
 				 delta);
 
 	nr_windows = cur_jiffies_ts - rq->irqload_ts;
@@ -763,7 +800,7 @@
 	if (sched_disable_window_stats)
 		goto done;
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 
 	update_task_ravg(task_rq(p)->curr, task_rq(p),
 			 TASK_UPDATE,
@@ -847,8 +884,7 @@
 
 	if (p == src_rq->ed_task) {
 		src_rq->ed_task = NULL;
-		if (!dest_rq->ed_task)
-			dest_rq->ed_task = p;
+		dest_rq->ed_task = p;
 	}
 
 done:
@@ -867,6 +903,9 @@
 		rq->window_start = 1;
 		sync_cpu_available = 1;
 		atomic64_set(&walt_irq_work_lastq_ws, rq->window_start);
+		walt_load_reported_window =
+					atomic64_read(&walt_irq_work_lastq_ws);
+
 	} else {
 		struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
 
@@ -2048,7 +2087,7 @@
 		return;
 	}
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 	p->ravg.mark_start = p->last_wake_ts = wallclock;
 	p->last_enqueued_ts = wallclock;
 	p->last_switch_out_ts = 0;
@@ -2450,7 +2489,7 @@
 
 				raw_spin_lock_irqsave(&rq->lock, flags);
 				update_task_ravg(rq->curr, rq, TASK_UPDATE,
-						 ktime_get_ns(), 0);
+						 sched_ktime_clock(), 0);
 				raw_spin_unlock_irqrestore(&rq->lock, flags);
 			}
 		}
@@ -2600,7 +2639,7 @@
 		return;
 	}
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 
 	/*
 	 * wakeup of two or more related tasks could race with each other and
@@ -2627,7 +2666,7 @@
 
 	grp->preferred_cluster = best_cluster(grp,
 			combined_demand, group_boost);
-	grp->last_update = ktime_get_ns();
+	grp->last_update = sched_ktime_clock();
 	trace_sched_set_preferred_cluster(grp, combined_demand);
 }
 
@@ -2651,7 +2690,7 @@
 	 * has passed since we last updated preference
 	 */
 	if (abs(new_load - old_load) > sched_ravg_window / 4 ||
-		ktime_get_ns() - grp->last_update > sched_ravg_window)
+		sched_ktime_clock() - grp->last_update > sched_ravg_window)
 		return 1;
 
 	return 0;
@@ -3034,7 +3073,7 @@
 	bool new_task;
 	int i;
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
@@ -3142,7 +3181,8 @@
 	for_each_cpu(cpu, cpu_possible_mask)
 		raw_spin_lock(&cpu_rq(cpu)->lock);
 
-	wc = ktime_get_ns();
+	wc = sched_ktime_clock();
+	walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws);
 
 	for_each_sched_cluster(cluster) {
 		u64 aggr_grp_load = 0;
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index be06e7d4..cac925c 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -292,7 +292,7 @@
 
 static inline void walt_update_last_enqueue(struct task_struct *p)
 {
-	p->last_enqueued_ts = ktime_get_ns();
+	p->last_enqueued_ts = sched_ktime_clock();
 }
 extern void walt_rotate_work_init(void);
 extern void walt_rotation_checkpoint(int nr_big);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index af182a6..3975856 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -16,6 +16,8 @@
 #include <linux/atomic.h>
 #include <linux/audit.h>
 #include <linux/compat.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
 #include <linux/sched.h>
 #include <linux/seccomp.h>
 #include <linux/slab.h>
@@ -214,8 +216,11 @@
 	return true;
 }
 
+void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
+
 static inline void seccomp_assign_mode(struct task_struct *task,
-				       unsigned long seccomp_mode)
+				       unsigned long seccomp_mode,
+				       unsigned long flags)
 {
 	assert_spin_locked(&task->sighand->siglock);
 
@@ -225,6 +230,9 @@
 	 * filter) is set.
 	 */
 	smp_mb__before_atomic();
+	/* Assume default seccomp processes want spec flaw mitigation. */
+	if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
+		arch_seccomp_spec_mitigate(task);
 	set_tsk_thread_flag(task, TIF_SECCOMP);
 }
 
@@ -292,7 +300,7 @@
  * without dropping the locks.
  *
  */
-static inline void seccomp_sync_threads(void)
+static inline void seccomp_sync_threads(unsigned long flags)
 {
 	struct task_struct *thread, *caller;
 
@@ -333,7 +341,8 @@
 		 * allow one thread to transition the other.
 		 */
 		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
-			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
+					    flags);
 	}
 }
 
@@ -452,7 +461,7 @@
 
 	/* Now that the new filter is in place, synchronize to all threads. */
 	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
-		seccomp_sync_threads();
+		seccomp_sync_threads(flags);
 
 	return 0;
 }
@@ -712,7 +721,7 @@
 #ifdef TIF_NOTSC
 	disable_TSC();
 #endif
-	seccomp_assign_mode(current, seccomp_mode);
+	seccomp_assign_mode(current, seccomp_mode, 0);
 	ret = 0;
 
 out:
@@ -770,7 +779,7 @@
 	/* Do not free the successfully attached filter. */
 	prepared = NULL;
 
-	seccomp_assign_mode(current, seccomp_mode);
+	seccomp_assign_mode(current, seccomp_mode, flags);
 out:
 	spin_unlock_irq(&current->sighand->siglock);
 	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
diff --git a/kernel/signal.c b/kernel/signal.c
index 7ebe236..4364e57 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1392,6 +1392,10 @@
 		return ret;
 	}
 
+	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
+	if (pid == INT_MIN)
+		return -ESRCH;
+
 	read_lock(&tasklist_lock);
 	if (pid != -1) {
 		ret = __kill_pgrp_info(sig, info,
@@ -2495,6 +2499,13 @@
 {
 	struct task_struct *tsk = current;
 
+	/*
+	 * In case the signal mask hasn't changed, there is nothing we need
+	 * to do. The current->blocked shouldn't be modified by other task.
+	 */
+	if (sigequalsets(&tsk->blocked, newset))
+		return;
+
 	spin_lock_irq(&tsk->sighand->siglock);
 	__set_task_blocked(tsk, newset);
 	spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 1650578..10c5a3a 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -121,7 +121,45 @@
 		}
 
 		if (kthread_should_park()) {
+			/*
+			 * Serialize against wakeup. If we take the lock first,
+			 * wakeup is skipped. If we run later, we observe,
+			 * TASK_RUNNING update from wakeup path, before moving
+			 * forward. This helps avoid the race, where wakeup
+			 * observes TASK_INTERRUPTIBLE, and also observes
+			 * the TASK_PARKED in kthread_parkme() before updating
+			 * task state to TASK_RUNNING. In this case, kthread
+			 * gets parked in TASK_RUNNING state. This results
+			 * in panic later on in kthread_unpark(), as it sees
+			 * KTHREAD_IS_PARKED flag set but fails to rebind the
+			 * kthread, due to it being not in TASK_PARKED state.
+			 *
+			 * Control thread                      Hotplug Thread
+			 *
+			 * kthread_park()
+			 *   set KTHREAD_SHOULD_PARK
+			 *                                smpboot_thread_fn()
+			 *                                  set_current_state(
+			 *                                  TASK_INTERRUPTIBLE);
+			 *                                  kthread_parkme()
+			 *
+			 *   wake_up_process()
+			 *
+			 * raw_spin_lock_irqsave(&p->pi_lock, flags);
+			 * if (!(p->state & state))
+			 *            goto out;
+			 *
+			 *                                  __set_current_state(
+			 *                                  TASK_PARKED);
+			 *
+			 * if (p->on_rq && ttwu_remote(p, wake_flags))
+			 *   ttwu_remote()
+			 *     p->state = TASK_RUNNING;
+			 *                                   schedule();
+			 */
+			raw_spin_lock(&current->pi_lock);
 			__set_current_state(TASK_RUNNING);
+			raw_spin_unlock(&current->pi_lock);
 			preempt_enable();
 			if (ht->park && td->status == HP_THREAD_ACTIVE) {
 				BUG_ON(td->cpu != smp_processor_id());
diff --git a/kernel/sys.c b/kernel/sys.c
index 4ccf5f0..af33cbf 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -55,6 +55,8 @@
 #include <linux/uidgid.h>
 #include <linux/cred.h>
 
+#include <linux/nospec.h>
+
 #include <linux/kmsg_dump.h>
 /* Move somewhere else to avoid recompiling? */
 #include <generated/utsrelease.h>
@@ -1313,6 +1315,7 @@
 	if (resource >= RLIM_NLIMITS)
 		return -EINVAL;
 
+	resource = array_index_nospec(resource, RLIM_NLIMITS);
 	task_lock(current->group_leader);
 	x = current->signal->rlim[resource];
 	task_unlock(current->group_leader);
@@ -2221,6 +2224,17 @@
 }
 #endif
 
+int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
+{
+	return -EINVAL;
+}
+
+int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
+				    unsigned long ctrl)
+{
+	return -EINVAL;
+}
+
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 		unsigned long, arg4, unsigned long, arg5)
 {
@@ -2440,6 +2454,16 @@
 	case PR_GET_FP_MODE:
 		error = GET_FP_MODE(me);
 		break;
+	case PR_GET_SPECULATION_CTRL:
+		if (arg3 || arg4 || arg5)
+			return -EINVAL;
+		error = arch_prctl_spec_ctrl_get(me, arg2);
+		break;
+	case PR_SET_SPECULATION_CTRL:
+		if (arg4 || arg5)
+			return -EINVAL;
+		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
+		break;
 	case PR_SET_VMA:
 		error = prctl_set_vma(arg2, arg3, arg4, arg5);
 		break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f41c0e9..75ae656 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -398,22 +398,6 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
-#ifdef CONFIG_SCHED_WALT
-	{
-		.procname	= "sched_use_walt_cpu_util",
-		.data		= &sysctl_sched_use_walt_cpu_util,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "sched_use_walt_task_util",
-		.data		= &sysctl_sched_use_walt_task_util,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-#endif
 	{
 		.procname	= "sched_cstate_aware",
 		.data		= &sysctl_sched_cstate_aware,
@@ -1502,6 +1486,15 @@
 		.extra1		= &zero,
 		.extra2		= &one_hundred,
 	},
+	{
+		.procname       = "want_old_faultaround_pte",
+		.data           = &want_old_faultaround_pte,
+		.maxlen         = sizeof(want_old_faultaround_pte),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec_minmax,
+		.extra1         = &zero,
+		.extra2         = &one,
+	},
 #ifdef CONFIG_HUGETLB_PAGE
 	{
 		.procname	= "nr_hugepages",
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d2a20e8..22d7454 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -610,6 +610,14 @@
 	now = ktime_get();
 	/* Find all expired events */
 	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
+		/*
+		 * Required for !SMP because for_each_cpu() reports
+		 * unconditionally CPU0 as set on UP kernels.
+		 */
+		if (!IS_ENABLED(CONFIG_SMP) &&
+		    cpumask_empty(tick_broadcast_oneshot_mask))
+			break;
+
 		td = &per_cpu(tick_cpu_device, cpu);
 		if (td->evtdev->next_event.tv64 <= now.tv64) {
 			cpumask_set_cpu(cpu, tmpmask);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 063dd22..5534be1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -120,8 +120,9 @@
 				 struct ftrace_ops *op, struct pt_regs *regs);
 #else
 /* See comment below, where ftrace_ops_list_func is defined */
-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
-#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
+			      struct ftrace_ops *op, struct pt_regs *regs);
+#define ftrace_ops_list_func ftrace_ops_no_ops
 #endif
 
 /*
@@ -5309,7 +5310,8 @@
 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 }
 #else
-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
+			      struct ftrace_ops *op, struct pt_regs *regs)
 {
 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
@@ -5735,14 +5737,17 @@
 	fgraph_graph_time = enable;
 }
 
+void ftrace_graph_return_stub(struct ftrace_graph_ret *trace)
+{
+}
+
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
 {
 	return 0;
 }
 
 /* The callbacks that hook a function */
-trace_func_graph_ret_t ftrace_graph_return =
-			(trace_func_graph_ret_t)ftrace_stub;
+trace_func_graph_ret_t ftrace_graph_return = ftrace_graph_return_stub;
 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
 
@@ -5970,7 +5975,7 @@
 		goto out;
 
 	ftrace_graph_active--;
-	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+	ftrace_graph_return = ftrace_graph_return_stub;
 	ftrace_graph_entry = ftrace_graph_entry_stub;
 	__ftrace_graph_entry = ftrace_graph_entry_stub;
 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
diff --git a/kernel/trace/ipc_logging_debug.c b/kernel/trace/ipc_logging_debug.c
index 7a4c630..3a13ef3 100644
--- a/kernel/trace/ipc_logging_debug.c
+++ b/kernel/trace/ipc_logging_debug.c
@@ -78,17 +78,16 @@
 	struct dentry *d = file->f_path.dentry;
 	char *buffer;
 	int bsize;
-	int srcu_idx;
 	int r;
 
-	r = debugfs_use_file_start(d, &srcu_idx);
+	r = debugfs_file_get(d);
 	if (!r) {
 		ilctxt = file->private_data;
 		r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
-	}
-	debugfs_use_file_finish(srcu_idx);
-	if (r)
+	} else {
 		return r;
+	}
+	debugfs_file_put(d);
 
 	buffer = kmalloc(count, GFP_KERNEL);
 	if (!buffer) {
@@ -119,18 +118,17 @@
 	struct ipc_log_context *ilctxt;
 	struct dentry *d = file->f_path.dentry;
 	int bsize = 1;
-	int srcu_idx;
 	int r;
 	char local_buf[3];
 
-	r = debugfs_use_file_start(d, &srcu_idx);
+	r = debugfs_file_get(d);
 	if (!r) {
 		ilctxt = file->private_data;
 		r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
-	}
-	debugfs_use_file_finish(srcu_idx);
-	if (r)
+	} else {
 		return r;
+	}
+	debugfs_file_put(d);
 
 	if (copy_from_user(local_buf, buff, bsize)) {
 		count = -EFAULT;
@@ -172,17 +170,16 @@
 	struct ipc_log_context *ilctxt;
 	struct dentry *d = file->f_path.dentry;
 	int bsize = 2;
-	int srcu_idx;
 	int r;
 
-	r = debugfs_use_file_start(d, &srcu_idx);
+	r = debugfs_file_get(d);
 	if (!r) {
 		ilctxt = file->private_data;
 		r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
-	}
-	debugfs_use_file_finish(srcu_idx);
-	if (r)
+	} else {
 		return r;
+	}
+	debugfs_file_put(d);
 
 	bsize = simple_read_from_buffer(buff, count, ppos,
 				ilctxt->disabled?"1\n":"0\n", bsize);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 046abb0..de11b81 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3078,13 +3078,14 @@
 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
 		return;
 
-	if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
+	if (cpumask_available(iter->started) &&
+	    cpumask_test_cpu(iter->cpu, iter->started))
 		return;
 
 	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
 		return;
 
-	if (iter->started)
+	if (cpumask_available(iter->started))
 		cpumask_set_cpu(iter->cpu, iter->started);
 
 	/* Don't print started cpu buffer for the first entry of the trace */
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0193f58..e35a411 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -322,6 +322,9 @@
 
 static int regex_match_front(char *str, struct regex *r, int len)
 {
+	if (len < r->len)
+		return 0;
+
 	if (strncmp(str, r->pattern, r->len) == 0)
 		return 1;
 	return 0;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 79bbaf0..59b482f 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -478,14 +478,11 @@
 #ifdef CONFIG_PREEMPTIRQ_EVENTS
 	struct irqsoff_store *is = &per_cpu(the_irqsoff,
 						raw_smp_processor_id());
+	u64 delta = sched_clock() - is->ts;
 
-	if (!is->ts) {
-		is->ts = sched_clock();
-		is->caddr[0] = CALLER_ADDR0;
-		is->caddr[1] = CALLER_ADDR1;
-		is->caddr[2] = CALLER_ADDR2;
-		is->caddr[3] = CALLER_ADDR3;
-	}
+	if (delta > sysctl_irqsoff_tracing_threshold_ns)
+		trace_irqs_disable(delta, is->caddr[0], is->caddr[1],
+						is->caddr[2], is->caddr[3]);
 #endif /* CONFIG_PREEMPTIRQ_EVENTS */
 
 	if (!preempt_trace() && irq_trace())
@@ -497,15 +494,12 @@
 #ifdef CONFIG_PREEMPTIRQ_EVENTS
 	struct irqsoff_store *is = &per_cpu(the_irqsoff,
 						raw_smp_processor_id());
-	u64 delta = 0;
 
-	if (is->ts) {
-		delta = sched_clock() - is->ts;
-		is->ts = 0;
-	}
-	if (delta > sysctl_irqsoff_tracing_threshold_ns)
-		trace_irqs_disable(delta, is->caddr[0], is->caddr[1],
-						is->caddr[2], is->caddr[3]);
+	is->ts = sched_clock();
+	is->caddr[0] = CALLER_ADDR0;
+	is->caddr[1] = CALLER_ADDR1;
+	is->caddr[2] = CALLER_ADDR2;
+	is->caddr[3] = CALLER_ADDR3;
 #endif /* CONFIG_PREEMPTIRQ_EVENTS */
 
 	if (!preempt_trace() && irq_trace())
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index bc6c6ec..83afbf2 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -149,6 +149,8 @@
 		return;
 
 	ret = strncpy_from_user(dst, src, maxlen);
+	if (ret == maxlen)
+		dst[--ret] = '\0';
 
 	if (ret < 0) {	/* Failed to fetch string */
 		((u8 *)get_rloc_data(dest))[0] = '\0';
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d0639d9..c8e7cc0 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -202,7 +202,7 @@
 			lockdep_is_held(&tracepoints_mutex));
 	old = func_add(&tp_funcs, func, prio);
 	if (IS_ERR(old)) {
-		WARN_ON_ONCE(1);
+		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
 		return PTR_ERR(old);
 	}
 
@@ -235,7 +235,7 @@
 			lockdep_is_held(&tracepoints_mutex));
 	old = func_remove(&tp_funcs, func);
 	if (IS_ERR(old)) {
-		WARN_ON_ONCE(1);
+		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
 		return PTR_ERR(old);
 	}
 
diff --git a/kernel/user.c b/kernel/user.c
index b069ccb..41e94e4 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/export.h>
 #include <linux/user_namespace.h>
+#include <linux/proc_fs.h>
 #include <linux/proc_ns.h>
 
 /*
@@ -201,6 +202,7 @@
 		}
 		spin_unlock_irq(&uidhash_lock);
 	}
+	proc_register_uid(uid);
 
 	return up;
 
@@ -222,6 +224,7 @@
 	spin_lock_irq(&uidhash_lock);
 	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
 	spin_unlock_irq(&uidhash_lock);
+	proc_register_uid(GLOBAL_ROOT_UID);
 
 	return 0;
 }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 04c2289..3806a97 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5276,7 +5276,7 @@
 
 	ret = device_register(&wq_dev->dev);
 	if (ret) {
-		kfree(wq_dev);
+		put_device(&wq_dev->dev);
 		wq->wq_dev = NULL;
 		return ret;
 	}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 079d91a..640818c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -269,7 +269,6 @@
 
 config DEBUG_FS
 	bool "Debug Filesystem"
-	select SRCU
 	help
 	  debugfs is a virtual file system that kernel developers use to put
 	  debugging files into.  Enable this option to be able to read and
@@ -707,6 +706,19 @@
 
 source "lib/Kconfig.kasan"
 
+config DEBUG_REFCOUNT
+	bool "Verbose refcount checks"
+	help
+	  Say Y here if you want reference counters (refcount_t and kref) to
+	  generate WARNs on dubious usage. Without this refcount_t will still
+	  be a saturating counter and avoid Use-After-Free by turning it into
+	  a resource leak Denial-Of-Service.
+
+	  Use of this option will increase kernel text size but will alert the
+	  admin of potential abuse.
+
+	  If in doubt, say "N".
+
 endmenu # "Memory Debugging"
 
 config ARCH_HAS_KCOV
diff --git a/lib/kobject.c b/lib/kobject.c
index 763d70a..34f8472 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -234,14 +234,12 @@
 
 		/* be noisy on error issues */
 		if (error == -EEXIST)
-			WARN(1, "%s failed for %s with "
-			     "-EEXIST, don't try to register things with "
-			     "the same name in the same directory.\n",
-			     __func__, kobject_name(kobj));
+			pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+			       __func__, kobject_name(kobj));
 		else
-			WARN(1, "%s failed for %s (error: %d parent: %s)\n",
-			     __func__, kobject_name(kobj), error,
-			     parent ? kobject_name(parent) : "'none'");
+			pr_err("%s failed for %s (error: %d parent: %s)\n",
+			       __func__, kobject_name(kobj), error,
+			       parent ? kobject_name(parent) : "'none'");
 	} else
 		kobj->state_in_sysfs = 1;
 
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 98da752..1586dfd 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -83,6 +83,7 @@
 		__u32 result;
 	} test[MAX_SUBTESTS];
 	int (*fill_helper)(struct bpf_test *self);
+	int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
 	__u8 frag_data[MAX_DATA];
 };
 
@@ -1900,7 +1901,9 @@
 		},
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
-		{ }
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{
 		"check: div_k_0",
@@ -1910,7 +1913,9 @@
 		},
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
-		{ }
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{
 		"check: unknown insn",
@@ -1921,7 +1926,9 @@
 		},
 		CLASSIC | FLAG_EXPECTED_FAIL,
 		{ },
-		{ }
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{
 		"check: out of range spill/fill",
@@ -1931,7 +1938,9 @@
 		},
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
-		{ }
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{
 		"JUMPS + HOLES",
@@ -2023,6 +2032,8 @@
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
 		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{
 		"check: LDX + RET X",
@@ -2033,6 +2044,8 @@
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
 		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{	/* Mainly checking JIT here. */
 		"M[]: alt STX + LDX",
@@ -2207,6 +2220,8 @@
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
 		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{	/* Passes checker but fails during runtime. */
 		"LD [SKF_AD_OFF-1]",
@@ -4803,6 +4818,7 @@
 		{ },
 		{ },
 		.fill_helper = bpf_fill_maxinsns4,
+		.expected_errcode = -EINVAL,
 	},
 	{	/* Mainly checking JIT here. */
 		"BPF_MAXINSNS: Very long jump",
@@ -4858,10 +4874,15 @@
 	{
 		"BPF_MAXINSNS: Jump, gap, jump, ...",
 		{ },
+#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
 		CLASSIC | FLAG_NO_DATA,
+#endif
 		{ },
 		{ { 0, 0xababcbac } },
 		.fill_helper = bpf_fill_maxinsns11,
+		.expected_errcode = -ENOTSUPP,
 	},
 	{
 		"BPF_MAXINSNS: ld_abs+get_processor_id",
@@ -5632,7 +5653,7 @@
 
 		*err = bpf_prog_create(&fp, &fprog);
 		if (tests[which].aux & FLAG_EXPECTED_FAIL) {
-			if (*err == -EINVAL) {
+			if (*err == tests[which].expected_errcode) {
 				pr_cont("PASS\n");
 				/* Verifier rejected filter as expected. */
 				*err = 0;
diff --git a/mm/Kconfig b/mm/Kconfig
index 3363a70..b452197 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -638,6 +638,16 @@
 	  information to userspace via debugfs.
 	  If unsure, say N.
 
+config VMAP_LAZY_PURGING_FACTOR
+	int "multiplier to the size of purged vmap areas"
+	default "8" if ARM
+	default "32"
+	help
+	  It is used as a multiplier to the max VA pages purged in a
+	  single attempt. For 32-bit in order to reduce fragmentation
+	  of vmalloc space, we decrease the default value to "8".
+
+
 config GENERIC_EARLY_IOREMAP
 	bool
 
@@ -675,6 +685,7 @@
 	depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
 	depends on NO_BOOTMEM && MEMORY_HOTPLUG
 	depends on !FLATMEM
+	depends on !NEED_PER_CPU_KM
 	help
 	  Ordinarily all struct pages are initialised during early boot in a
 	  single thread. On very large machines this can take a considerable
@@ -746,3 +757,25 @@
 	 (addr, addr + size-bytes) of the process.
 
 	 Any other vaule is ignored.
+
+config ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
+       def_bool n
+
+config SPECULATIVE_PAGE_FAULT
+       bool "Speculative page faults"
+       default y
+       depends on ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
+       depends on MMU && SMP
+       help
+         Try to handle user space page faults without holding the mmap_sem.
+
+	 This should allow better concurrency for massively threaded process
+	 since the page fault handler will not wait for other threads memory
+	 layout change to be done, assuming that this change is done in another
+	 part of the process's memory space. This type of page fault is named
+	 speculative page fault.
+
+	 If the speculative page fault fails because of a concurrency is
+	 detected or because underlying PMD or PTE tables are not yet
+	 allocating, it is failing its processing and a classic page fault
+	 is then tried.
diff --git a/mm/cma.c b/mm/cma.c
index 5fc1809..e97ad01 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -438,6 +438,8 @@
 	struct page *page = NULL;
 	int retry_after_sleep = 0;
 	int ret = -ENOMEM;
+	int max_retries = 2;
+	int available_regions = 0;
 
 	if (!cma || !cma->count)
 		return NULL;
@@ -464,9 +466,16 @@
 				bitmap_maxno, start, bitmap_count, mask,
 				offset);
 		if (bitmap_no >= bitmap_maxno) {
-			if (retry_after_sleep < 2) {
+			if (retry_after_sleep < max_retries) {
 				start = 0;
 				/*
+				 * update max retries if available free regions
+				 * are less.
+				 */
+				if (available_regions < 3)
+					max_retries = 5;
+				available_regions = 0;
+				/*
 				 * Page may be momentarily pinned by some other
 				 * process which has been scheduled out, eg.
 				 * in exit path, during unmap call, or process
@@ -483,6 +492,8 @@
 				break;
 			}
 		}
+
+		available_regions++;
 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
 		/*
 		 * It's safe to drop the lock here. We've marked this region for
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 6c707bf..27fc9ad 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -126,7 +126,15 @@
 		 */
 		start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
 		end_index = (endbyte >> PAGE_SHIFT);
-		if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
+		/*
+		 * The page at end_index will be inclusively discarded according
+		 * by invalidate_mapping_pages(), so subtracting 1 from
+		 * end_index means we will skip the last page.  But if endbyte
+		 * is page aligned or is at the end of file, we should not skip
+		 * that page - discarding the last page is safe enough.
+		 */
+		if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK &&
+				endbyte != inode->i_size - 1) {
 			/* First page is tricky as 0 - 1 = -1, but pgoff_t
 			 * is unsigned, so the end_index >= start_index
 			 * check below would be true and we'll discard the whole
diff --git a/mm/filemap.c b/mm/filemap.c
index 03c79ef..b27d2ca 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -47,6 +47,8 @@
 
 #include <asm/mman.h>
 
+int want_old_faultaround_pte = 1;
+
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  * though.
@@ -618,7 +620,7 @@
 	VM_BUG_ON_PAGE(!PageLocked(new), new);
 	VM_BUG_ON_PAGE(new->mapping, new);
 
-	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+	error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
 	if (!error) {
 		struct address_space *mapping = old->mapping;
 		void (*freepage)(struct page *);
@@ -674,7 +676,7 @@
 			return error;
 	}
 
-	error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
+	error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
 	if (error) {
 		if (!huge)
 			mem_cgroup_cancel_charge(page, memcg, false);
@@ -1249,8 +1251,7 @@
 		if (fgp_flags & FGP_ACCESSED)
 			__SetPageReferenced(page);
 
-		err = add_to_page_cache_lru(page, mapping, offset,
-				gfp_mask & GFP_RECLAIM_MASK);
+		err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
 		if (unlikely(err)) {
 			put_page(page);
 			page = NULL;
@@ -1998,7 +1999,7 @@
 		if (!page)
 			return -ENOMEM;
 
-		ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
+		ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
 		if (ret == 0)
 			ret = mapping->a_ops->readpage(file, page);
 		else if (ret == -EEXIST)
@@ -2288,6 +2289,14 @@
 		if (fe->pte)
 			fe->pte += iter.index - last_pgoff;
 		last_pgoff = iter.index;
+
+		if (want_old_faultaround_pte) {
+			if (fe->address == fe->fault_address)
+				fe->flags &= ~FAULT_FLAG_PREFAULT_OLD;
+			else
+				fe->flags |= FAULT_FLAG_PREFAULT_OLD;
+		}
+
 		if (alloc_set_pte(fe, NULL, page))
 			goto unlock;
 		unlock_page(page);
diff --git a/mm/gup.c b/mm/gup.c
index 6c3b4e8..be4ccdd 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -430,6 +430,9 @@
 	if (vm_flags & (VM_IO | VM_PFNMAP))
 		return -EFAULT;
 
+	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+		return -EFAULT;
+
 	if (write) {
 		if (!(vm_flags & VM_WRITE)) {
 			if (!(gup_flags & FOLL_FORCE))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e2982ea..a557862 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -542,7 +542,8 @@
 
 	VM_BUG_ON_PAGE(!PageCompound(page), page);
 
-	if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
+	if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg,
+				  true)) {
 		put_page(page);
 		count_vm_event(THP_FAULT_FALLBACK);
 		return VM_FAULT_FALLBACK;
@@ -957,8 +958,8 @@
 
 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
 		pte_t entry;
-		entry = mk_pte(pages[i], vma->vm_page_prot);
-		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+		entry = mk_pte(pages[i], fe->vma_page_prot);
+		entry = maybe_mkwrite(pte_mkdirty(entry), fe->vma_flags);
 		memcg = (void *)page_private(pages[i]);
 		set_page_private(pages[i], 0);
 		page_add_new_anon_rmap(pages[i], fe->vma, haddr, false);
@@ -1060,7 +1061,7 @@
 	}
 
 	if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
-					huge_gfp, &memcg, true))) {
+				huge_gfp | __GFP_NORETRY, &memcg, true))) {
 		put_page(new_page);
 		split_huge_pmd(vma, fe->pmd, fe->address);
 		if (page)
@@ -1678,7 +1679,7 @@
 				entry = pte_swp_mksoft_dirty(entry);
 		} else {
 			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
-			entry = maybe_mkwrite(entry, vma);
+			entry = maybe_mkwrite(entry, vma->vm_flags);
 			if (!write)
 				entry = pte_wrprotect(entry);
 			if (!young)
diff --git a/mm/init-mm.c b/mm/init-mm.c
index 975e49f..4d21629 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -16,6 +16,9 @@
 
 struct mm_struct init_mm = {
 	.mm_rb		= RB_ROOT,
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+	.mm_rb_lock	= __RW_LOCK_UNLOCKED(init_mm.mm_rb_lock),
+#endif
 	.pgd		= swapper_pg_dir,
 	.mm_users	= ATOMIC_INIT(2),
 	.mm_count	= ATOMIC_INIT(1),
diff --git a/mm/internal.h b/mm/internal.h
index 6aa1c51..d9ac6de 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -38,6 +38,26 @@
 
 int do_swap_page(struct fault_env *fe, pte_t orig_pte);
 
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+extern struct vm_area_struct *get_vma(struct mm_struct *mm,
+				      unsigned long addr);
+extern void put_vma(struct vm_area_struct *vma);
+
+static inline bool vma_has_changed(struct fault_env *fe)
+{
+	int ret = RB_EMPTY_NODE(&fe->vma->vm_rb);
+	unsigned int seq = READ_ONCE(fe->vma->vm_sequence.sequence);
+
+	/*
+	 * Matches both the wmb in write_seqlock_{begin,end}() and
+	 * the wmb in vma_rb_erase().
+	 */
+	smp_rmb();
+
+	return ret || seq != fe->sequence;
+}
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
 		unsigned long floor, unsigned long ceiling);
 
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index ab47d93..7d78b5f 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -860,5 +860,5 @@
 	return 0;
 }
 
-module_init(kasan_memhotplug_init);
+core_initcall(kasan_memhotplug_init);
 #endif
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 898eb26..6b58aaf 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -887,6 +887,8 @@
 		.address = address,
 		.flags = FAULT_FLAG_ALLOW_RETRY,
 		.pmd = pmd,
+		.vma_flags = vma->vm_flags,
+		.vma_page_prot = vma->vm_page_prot,
 	};
 
 	/* we only decide to swapin, if there is enough young ptes */
@@ -963,7 +965,9 @@
 		goto out_nolock;
 	}
 
-	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
+	/* Do not oom kill for khugepaged charges */
+	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
+					   &memcg, true))) {
 		result = SCAN_CGROUP_CHARGE_FAIL;
 		goto out_nolock;
 	}
@@ -1009,6 +1013,7 @@
 	if (mm_find_pmd(mm, address) != pmd)
 		goto out;
 
+	vm_write_begin(vma);
 	anon_vma_lock_write(vma->anon_vma);
 
 	pte = pte_offset_map(pmd, address);
@@ -1044,6 +1049,7 @@
 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
 		spin_unlock(pmd_ptl);
 		anon_vma_unlock_write(vma->anon_vma);
+		vm_write_end(vma);
 		result = SCAN_FAIL;
 		goto out;
 	}
@@ -1078,6 +1084,7 @@
 	set_pmd_at(mm, address, pmd, _pmd);
 	update_mmu_cache_pmd(vma, address, pmd);
 	spin_unlock(pmd_ptl);
+	vm_write_end(vma);
 
 	*hpage = NULL;
 
@@ -1323,7 +1330,9 @@
 		goto out;
 	}
 
-	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
+	/* Do not oom kill for khugepaged charges */
+	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
+					   &memcg, true))) {
 		result = SCAN_CGROUP_CHARGE_FAIL;
 		goto out;
 	}
@@ -1678,10 +1687,14 @@
 	spin_unlock(&khugepaged_mm_lock);
 
 	mm = mm_slot->mm;
-	down_read(&mm->mmap_sem);
-	if (unlikely(khugepaged_test_exit(mm)))
-		vma = NULL;
-	else
+	/*
+	 * Don't wait for semaphore (to avoid long wait times).  Just move to
+	 * the next mm on the list.
+	 */
+	vma = NULL;
+	if (unlikely(!down_read_trylock(&mm->mmap_sem)))
+		goto breakouterloop_mmap_sem;
+	if (likely(!khugepaged_test_exit(mm)))
 		vma = find_vma(mm, khugepaged_scan.address);
 
 	progress++;
diff --git a/mm/ksm.c b/mm/ksm.c
index 927aa34..8450f9b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1487,8 +1487,22 @@
 	tree_rmap_item =
 		unstable_tree_search_insert(rmap_item, page, &tree_page);
 	if (tree_rmap_item) {
+		bool split;
+
 		kpage = try_to_merge_two_pages(rmap_item, page,
 						tree_rmap_item, tree_page);
+		/*
+		 * If both pages we tried to merge belong to the same compound
+		 * page, then we actually ended up increasing the reference
+		 * count of the same compound page twice, and split_huge_page
+		 * failed.
+		 * Here we set a flag if that happened, and we use it later to
+		 * try split_huge_page again. Since we call put_page right
+		 * afterwards, the reference count will be correct and
+		 * split_huge_page should succeed.
+		 */
+		split = PageTransCompound(page)
+			&& compound_head(page) == compound_head(tree_page);
 		put_page(tree_page);
 		if (kpage) {
 			/*
@@ -1513,6 +1527,20 @@
 				break_cow(tree_rmap_item);
 				break_cow(rmap_item);
 			}
+		} else if (split) {
+			/*
+			 * We are here if we tried to merge two pages and
+			 * failed because they both belonged to the same
+			 * compound page. We will split the page now, but no
+			 * merging will take place.
+			 * We do not want to add the cost of a full lock; if
+			 * the page is locked, it is better to skip it and
+			 * perhaps try again later.
+			 */
+			if (!trylock_page(page))
+				return;
+			split_huge_page(page);
+			unlock_page(page);
 		}
 	}
 }
diff --git a/mm/madvise.c b/mm/madvise.c
index 59d1aae..ee7ad9b 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -135,8 +135,10 @@
 	/*
 	 * vm_flags is protected by the mmap_sem held in write mode.
 	 */
-	vma->vm_flags = new_flags;
 
+	vm_write_begin(vma);
+	WRITE_ONCE(vma->vm_flags, new_flags);
+	vm_write_end(vma);
 out:
 	if (error == -ENOMEM)
 		error = -EAGAIN;
@@ -404,9 +406,11 @@
 		.private = tlb,
 	};
 
+	vm_write_begin(vma);
 	tlb_start_vma(tlb, vma);
 	walk_page_range(addr, end, &free_walk);
 	tlb_end_vma(tlb, vma);
+	vm_write_end(vma);
 }
 
 static int madvise_free_single_vma(struct vm_area_struct *vma,
diff --git a/mm/memory.c b/mm/memory.c
index cc6ab38..7a88700 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -75,6 +75,9 @@
 
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/pagefault.h>
+
 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
 #endif
@@ -549,7 +552,9 @@
 		 * Hide vma from rmap and truncate_pagecache before freeing
 		 * pgtables
 		 */
+		vm_write_begin(vma);
 		unlink_anon_vmas(vma);
+		vm_write_end(vma);
 		unlink_file_vma(vma);
 
 		if (is_vm_hugetlb_page(vma)) {
@@ -563,7 +568,9 @@
 			       && !is_vm_hugetlb_page(next)) {
 				vma = next;
 				next = vma->vm_next;
+				vm_write_begin(vma);
 				unlink_anon_vmas(vma);
+				vm_write_end(vma);
 				unlink_file_vma(vma);
 			}
 			free_pgd_range(tlb, addr, vma->vm_end,
@@ -689,7 +696,8 @@
 	if (page)
 		dump_page(page, "bad pte");
 	pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
-		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
+		 (void *)addr, READ_ONCE(vma->vm_flags), vma->anon_vma,
+		 mapping, index);
 	/*
 	 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
 	 */
@@ -703,7 +711,8 @@
 }
 
 /*
- * vm_normal_page -- This function gets the "struct page" associated with a pte.
+ * __vm_normal_page -- This function gets the "struct page" associated with
+ * a pte.
  *
  * "Special" mappings do not wish to be associated with a "struct page" (either
  * it doesn't exist, or it exists but they don't want to touch it). In this
@@ -749,8 +758,8 @@
 #else
 # define HAVE_PTE_SPECIAL 0
 #endif
-struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
-				pte_t pte)
+struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+			      pte_t pte, unsigned long vma_flags)
 {
 	unsigned long pfn = pte_pfn(pte);
 
@@ -759,7 +768,7 @@
 			goto check_pfn;
 		if (vma->vm_ops && vma->vm_ops->find_special_page)
 			return vma->vm_ops->find_special_page(vma, addr);
-		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+		if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP))
 			return NULL;
 		if (!is_zero_pfn(pfn))
 			print_bad_pte(vma, addr, pte, NULL);
@@ -767,9 +776,13 @@
 	}
 
 	/* !HAVE_PTE_SPECIAL case follows: */
+	/*
+	 * This part should never get called when CONFIG_SPECULATIVE_PAGE_FAULT
+	 * is set. This is mainly because we can't rely on vm_start.
+	 */
 
-	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
-		if (vma->vm_flags & VM_MIXEDMAP) {
+	if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+		if (vma_flags & VM_MIXEDMAP) {
 			if (!pfn_valid(pfn))
 				return NULL;
 			goto out;
@@ -778,7 +791,7 @@
 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
 			if (pfn == vma->vm_pgoff + off)
 				return NULL;
-			if (!is_cow_mapping(vma->vm_flags))
+			if (!is_cow_mapping(vma_flags))
 				return NULL;
 		}
 	}
@@ -1285,6 +1298,7 @@
 	unsigned long next;
 
 	BUG_ON(addr >= end);
+	vm_write_begin(vma);
 	tlb_start_vma(tlb, vma);
 	pgd = pgd_offset(vma->vm_mm, addr);
 	do {
@@ -1294,6 +1308,7 @@
 		next = zap_pud_range(tlb, vma, pgd, addr, next, details);
 	} while (pgd++, addr = next, addr != end);
 	tlb_end_vma(tlb, vma);
+	vm_write_end(vma);
 }
 
 
@@ -1961,6 +1976,145 @@
 }
 EXPORT_SYMBOL_GPL(apply_to_page_range);
 
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+static bool pte_spinlock(struct mm_struct *mm,
+			struct fault_env *fe)
+{
+	bool ret = false;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	pmd_t pmdval;
+#endif
+
+	/* Check if vma is still valid */
+	if (!(fe->flags & FAULT_FLAG_SPECULATIVE)) {
+		fe->ptl = pte_lockptr(mm, fe->pmd);
+		spin_lock(fe->ptl);
+		return true;
+	}
+
+	local_irq_disable();
+	if (vma_has_changed(fe)) {
+		trace_spf_vma_changed(_RET_IP_, fe->vma, fe->address);
+		goto out;
+	}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	/*
+	 * We check if the pmd value is still the same to ensure that there
+	 * is not a huge collapse operation in progress in our back.
+	 */
+	pmdval = READ_ONCE(*fe->pmd);
+	if (!pmd_same(pmdval, fe->orig_pmd)) {
+		trace_spf_pmd_changed(_RET_IP_, fe->vma, fe->address);
+		goto out;
+	}
+#endif
+
+	fe->ptl = pte_lockptr(mm, fe->pmd);
+	if (unlikely(!spin_trylock(fe->ptl))) {
+		trace_spf_pte_lock(_RET_IP_, fe->vma, fe->address);
+		goto out;
+	}
+
+	if (vma_has_changed(fe)) {
+		spin_unlock(fe->ptl);
+		trace_spf_vma_changed(_RET_IP_, fe->vma, fe->address);
+		goto out;
+	}
+
+	ret = true;
+out:
+	local_irq_enable();
+	return ret;
+}
+
+static bool pte_map_lock(struct mm_struct *mm,
+				struct fault_env *fe)
+{
+	bool ret = false;
+	pte_t *pte;
+	spinlock_t *ptl;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	pmd_t pmdval;
+#endif
+
+	if (!(fe->flags & FAULT_FLAG_SPECULATIVE)) {
+		fe->pte = pte_offset_map_lock(mm, fe->pmd,
+					       fe->address, &fe->ptl);
+		return true;
+	}
+
+	/*
+	 * The first vma_has_changed() guarantees the page-tables are still
+	 * valid, having IRQs disabled ensures they stay around, hence the
+	 * second vma_has_changed() to make sure they are still valid once
+	 * we've got the lock. After that a concurrent zap_pte_range() will
+	 * block on the PTL and thus we're safe.
+	 */
+	local_irq_disable();
+	if (vma_has_changed(fe)) {
+		trace_spf_vma_changed(_RET_IP_, fe->vma, fe->address);
+		goto out;
+	}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	/*
+	 * We check if the pmd value is still the same to ensure that there
+	 * is not a huge collapse operation in progress in our back.
+	 */
+	pmdval = READ_ONCE(*fe->pmd);
+	if (!pmd_same(pmdval, fe->orig_pmd)) {
+		trace_spf_pmd_changed(_RET_IP_, fe->vma, fe->address);
+		goto out;
+	}
+#endif
+
+	/*
+	 * Same as pte_offset_map_lock() except that we call
+	 * spin_trylock() in place of spin_lock() to avoid race with
+	 * unmap path which may have the lock and wait for this CPU
+	 * to invalidate TLB but this CPU has irq disabled.
+	 * Since we are in a speculative patch, accept it could fail
+	 */
+	ptl = pte_lockptr(mm, fe->pmd);
+	pte = pte_offset_map(fe->pmd, fe->address);
+	if (unlikely(!spin_trylock(ptl))) {
+		pte_unmap(pte);
+		trace_spf_pte_lock(_RET_IP_, fe->vma, fe->address);
+		goto out;
+	}
+
+	if (vma_has_changed(fe)) {
+		pte_unmap_unlock(pte, ptl);
+		trace_spf_vma_changed(_RET_IP_, fe->vma, fe->address);
+		goto out;
+	}
+
+	fe->pte = pte;
+	fe->ptl = ptl;
+	ret = true;
+out:
+	local_irq_enable();
+	return ret;
+}
+#else
+static inline bool pte_spinlock(struct mm_struct *mm,
+			struct fault_env *fe)
+{
+	fe->ptl = pte_lockptr(mm, fe->pmd);
+	spin_lock(fe->ptl);
+	return true;
+}
+
+static inline bool pte_map_lock(struct mm_struct *mm,
+			struct fault_env *fe)
+{
+	fe->pte = pte_offset_map_lock(mm, fe->pmd,
+				       fe->address, &fe->ptl);
+	return true;
+}
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
 /*
  * handle_pte_fault chooses page fault handler according to an entry which was
  * read non-atomically.  Before making any commitment, on those architectures
@@ -1968,21 +2122,30 @@
  * parts, do_swap_page must check under lock before unmapping the pte and
  * proceeding (but do_wp_page is only called after already making such a check;
  * and do_anonymous_page can safely check later on).
+ *
+ * pte_unmap_same() returns:
+ *	0			if the PTE are the same
+ *	VM_FAULT_PTNOTSAME	if the PTE are different
+ *	VM_FAULT_RETRY		if the VMA has changed in our back during
+ *				a speculative page fault handling.
  */
-static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
-				pte_t *page_table, pte_t orig_pte)
+static inline int pte_unmap_same(struct mm_struct *mm, struct fault_env *fe,
+					pte_t orig_pte)
 {
-	int same = 1;
+	int ret = 0;
+
 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
 	if (sizeof(pte_t) > sizeof(unsigned long)) {
-		spinlock_t *ptl = pte_lockptr(mm, pmd);
-		spin_lock(ptl);
-		same = pte_same(*page_table, orig_pte);
-		spin_unlock(ptl);
+		if (pte_spinlock(mm, fe)) {
+			if (!pte_same(*fe->pte, orig_pte))
+				ret = VM_FAULT_PTNOTSAME;
+			spin_unlock(fe->ptl);
+		} else
+			ret = VM_FAULT_RETRY;
 	}
 #endif
-	pte_unmap(page_table);
-	return same;
+	pte_unmap(fe->pte);
+	return ret;
 }
 
 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
@@ -2085,7 +2248,7 @@
 
 	flush_cache_page(vma, fe->address, pte_pfn(orig_pte));
 	entry = pte_mkyoung(orig_pte);
-	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+	entry = maybe_mkwrite(pte_mkdirty(entry), fe->vma_flags);
 	if (ptep_set_access_flags(vma, fe->address, fe->pte, entry, 1))
 		update_mmu_cache(vma, fe->address, fe->pte);
 	pte_unmap_unlock(fe->pte, fe->ptl);
@@ -2145,24 +2308,25 @@
 	const unsigned long mmun_start = fe->address & PAGE_MASK;
 	const unsigned long mmun_end = mmun_start + PAGE_SIZE;
 	struct mem_cgroup *memcg;
+	int ret = VM_FAULT_OOM;
 
 	if (unlikely(anon_vma_prepare(vma)))
-		goto oom;
+		goto out;
 
 	if (is_zero_pfn(pte_pfn(orig_pte))) {
 		new_page = alloc_zeroed_user_highpage_movable(vma, fe->address);
 		if (!new_page)
-			goto oom;
+			goto out;
 	} else {
 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
 				fe->address);
 		if (!new_page)
-			goto oom;
+			goto out;
 		cow_user_page(new_page, old_page, fe->address, vma);
 	}
 
 	if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
-		goto oom_free_new;
+		goto out_free_new;
 
 	__SetPageUptodate(new_page);
 
@@ -2171,7 +2335,10 @@
 	/*
 	 * Re-check the pte - we dropped the lock
 	 */
-	fe->pte = pte_offset_map_lock(mm, fe->pmd, fe->address, &fe->ptl);
+	if (!pte_map_lock(mm, fe)) {
+		ret = VM_FAULT_RETRY;
+		goto out_uncharge;
+	}
 	if (likely(pte_same(*fe->pte, orig_pte))) {
 		if (old_page) {
 			if (!PageAnon(old_page)) {
@@ -2183,8 +2350,8 @@
 			inc_mm_counter_fast(mm, MM_ANONPAGES);
 		}
 		flush_cache_page(vma, fe->address, pte_pfn(orig_pte));
-		entry = mk_pte(new_page, vma->vm_page_prot);
-		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+		entry = mk_pte(new_page, fe->vma_page_prot);
+		entry = maybe_mkwrite(pte_mkdirty(entry), fe->vma_flags);
 		/*
 		 * Clear the pte entry and flush it first, before updating the
 		 * pte with the new entry. This will avoid a race condition
@@ -2192,9 +2359,9 @@
 		 * thread doing COW.
 		 */
 		ptep_clear_flush_notify(vma, fe->address, fe->pte);
-		page_add_new_anon_rmap(new_page, vma, fe->address, false);
+		__page_add_new_anon_rmap(new_page, vma, fe->address, false);
 		mem_cgroup_commit_charge(new_page, memcg, false, false);
-		lru_cache_add_active_or_unevictable(new_page, vma);
+		__lru_cache_add_active_or_unevictable(new_page, fe->vma_flags);
 		/*
 		 * We call the notify macro here because, when using secondary
 		 * mmu page tables (such as kvm shadow page tables), we want the
@@ -2245,7 +2412,7 @@
 		 * Don't let another task, with possibly unlocked vma,
 		 * keep the mlocked page.
 		 */
-		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
+		if (page_copied && (fe->vma_flags & VM_LOCKED)) {
 			lock_page(old_page);	/* LRU manipulation */
 			if (PageMlocked(old_page))
 				munlock_vma_page(old_page);
@@ -2254,12 +2421,14 @@
 		put_page(old_page);
 	}
 	return page_copied ? VM_FAULT_WRITE : 0;
-oom_free_new:
+out_uncharge:
+	mem_cgroup_cancel_charge(new_page, memcg, false);
+out_free_new:
 	put_page(new_page);
-oom:
+out:
 	if (old_page)
 		put_page(old_page);
-	return VM_FAULT_OOM;
+	return ret;
 }
 
 /*
@@ -2284,8 +2453,8 @@
 		ret = vma->vm_ops->pfn_mkwrite(vma, &vmf);
 		if (ret & VM_FAULT_ERROR)
 			return ret;
-		fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-				&fe->ptl);
+		if (!pte_map_lock(vma->vm_mm, fe))
+			return VM_FAULT_RETRY;
 		/*
 		 * We might have raced with another page fault while we
 		 * released the pte_offset_map_lock.
@@ -2361,7 +2530,8 @@
 	struct vm_area_struct *vma = fe->vma;
 	struct page *old_page;
 
-	old_page = vm_normal_page(vma, fe->address, orig_pte);
+	old_page = __vm_normal_page(vma, fe->address, orig_pte,
+				     fe->vma_flags);
 	if (!old_page) {
 		/*
 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
@@ -2370,7 +2540,7 @@
 		 * We should not cow pages in a shared writeable mapping.
 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
 		 */
-		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
+		if ((fe->vma_flags & (VM_WRITE|VM_SHARED)) ==
 				     (VM_WRITE|VM_SHARED))
 			return wp_pfn_shared(fe, orig_pte);
 
@@ -2388,8 +2558,11 @@
 			get_page(old_page);
 			pte_unmap_unlock(fe->pte, fe->ptl);
 			lock_page(old_page);
-			fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd,
-					fe->address, &fe->ptl);
+			if (!pte_map_lock(vma->vm_mm, fe)) {
+				unlock_page(old_page);
+				put_page(old_page);
+				return VM_FAULT_RETRY;
+			}
 			if (!pte_same(*fe->pte, orig_pte)) {
 				unlock_page(old_page);
 				pte_unmap_unlock(fe->pte, fe->ptl);
@@ -2413,7 +2586,7 @@
 			return wp_page_reuse(fe, orig_pte, old_page, 0, 0);
 		}
 		unlock_page(old_page);
-	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
+	} else if (unlikely((fe->vma_flags & (VM_WRITE|VM_SHARED)) ==
 					(VM_WRITE|VM_SHARED))) {
 		return wp_page_shared(fe, orig_pte, old_page);
 	}
@@ -2523,9 +2696,17 @@
 	int exclusive = 0;
 	int ret = 0;
 
-	if (!pte_unmap_same(vma->vm_mm, fe->pmd, fe->pte, orig_pte))
+	ret = pte_unmap_same(vma->vm_mm, fe, orig_pte);
+	if (ret) {
+		/*
+		 * If pte != orig_pte, this means another thread did the
+		 * swap operation in our back.
+		 * So nothing else to do.
+		 */
+		if (ret == VM_FAULT_PTNOTSAME)
+			ret = 0;
 		goto out;
-
+	}
 	entry = pte_to_swp_entry(orig_pte);
 	if (unlikely(non_swap_entry(entry))) {
 		if (is_migration_entry(entry)) {
@@ -2545,11 +2726,16 @@
 					GFP_HIGHUSER_MOVABLE, vma, fe->address);
 		if (!page) {
 			/*
-			 * Back out if somebody else faulted in this pte
-			 * while we released the pte lock.
+			 * Back out if the VMA has changed in our back during
+			 * a speculative page fault or if somebody else
+			 * faulted in this pte while we released the pte lock.
 			 */
-			fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd,
-					fe->address, &fe->ptl);
+			if (!pte_map_lock(vma->vm_mm, fe)) {
+				delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+				ret = VM_FAULT_RETRY;
+				goto out;
+			}
+
 			if (likely(pte_same(*fe->pte, orig_pte)))
 				ret = VM_FAULT_OOM;
 			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -2603,10 +2789,13 @@
 	}
 
 	/*
-	 * Back out if somebody else already faulted in this pte.
+	 * Back out if the VMA has changed in our back during a speculative
+	 * page fault or if somebody else already faulted in this pte.
 	 */
-	fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-			&fe->ptl);
+	if (!pte_map_lock(vma->vm_mm, fe)) {
+		ret = VM_FAULT_RETRY;
+		goto out_cancel_cgroup;
+	}
 	if (unlikely(!pte_same(*fe->pte, orig_pte)))
 		goto out_nomap;
 
@@ -2627,9 +2816,9 @@
 
 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
 	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
-	pte = mk_pte(page, vma->vm_page_prot);
+	pte = mk_pte(page, fe->vma_page_prot);
 	if ((fe->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
-		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
+		pte = maybe_mkwrite(pte_mkdirty(pte), fe->vma_flags);
 		fe->flags &= ~FAULT_FLAG_WRITE;
 		ret |= VM_FAULT_WRITE;
 		exclusive = RMAP_EXCLUSIVE;
@@ -2643,14 +2832,14 @@
 		mem_cgroup_commit_charge(page, memcg, true, false);
 		activate_page(page);
 	} else { /* ksm created a completely new copy */
-		page_add_new_anon_rmap(page, vma, fe->address, false);
+		__page_add_new_anon_rmap(page, vma, fe->address, false);
 		mem_cgroup_commit_charge(page, memcg, false, false);
-		lru_cache_add_active_or_unevictable(page, vma);
+		__lru_cache_add_active_or_unevictable(page, fe->vma_flags);
 	}
 
 	swap_free(entry);
 	if (mem_cgroup_swap_full(page) ||
-	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+	    (fe->vma_flags & VM_LOCKED) || PageMlocked(page))
 		try_to_free_swap(page);
 	unlock_page(page);
 	if (page != swapcache) {
@@ -2680,8 +2869,9 @@
 out:
 	return ret;
 out_nomap:
-	mem_cgroup_cancel_charge(page, memcg, false);
 	pte_unmap_unlock(fe->pte, fe->ptl);
+out_cancel_cgroup:
+	mem_cgroup_cancel_charge(page, memcg, false);
 out_page:
 	unlock_page(page);
 out_release:
@@ -2703,10 +2893,11 @@
 	struct vm_area_struct *vma = fe->vma;
 	struct mem_cgroup *memcg;
 	struct page *page;
+	int ret = 0;
 	pte_t entry;
 
 	/* File mapping without ->vm_ops ? */
-	if (vma->vm_flags & VM_SHARED)
+	if (fe->vma_flags & VM_SHARED)
 		return VM_FAULT_SIGBUS;
 
 	/*
@@ -2730,11 +2921,19 @@
 	if (!(fe->flags & FAULT_FLAG_WRITE) &&
 			!mm_forbids_zeropage(vma->vm_mm)) {
 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(fe->address),
-						vma->vm_page_prot));
-		fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-				&fe->ptl);
+						fe->vma_page_prot));
+		if (!pte_map_lock(vma->vm_mm, fe))
+			return VM_FAULT_RETRY;
 		if (!pte_none(*fe->pte))
 			goto unlock;
+		/*
+		 * Don't call the userfaultfd during the speculative path.
+		 * We already checked for the VMA to not be managed through
+		 * userfaultfd, but it may be set in our back once we have lock
+		 * the pte. In such a case we can ignore it this time.
+		 */
+		if (fe->flags & FAULT_FLAG_SPECULATIVE)
+			goto setpte;
 		/* Deliver the page fault to userland, check inside PT lock */
 		if (userfaultfd_missing(vma)) {
 			pte_unmap_unlock(fe->pte, fe->ptl);
@@ -2760,17 +2959,19 @@
 	 */
 	__SetPageUptodate(page);
 
-	entry = mk_pte(page, vma->vm_page_prot);
-	if (vma->vm_flags & VM_WRITE)
+	entry = mk_pte(page, fe->vma_page_prot);
+	if (fe->vma_flags & VM_WRITE)
 		entry = pte_mkwrite(pte_mkdirty(entry));
 
-	fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-			&fe->ptl);
-	if (!pte_none(*fe->pte))
+	if (!pte_map_lock(vma->vm_mm, fe)) {
+		ret = VM_FAULT_RETRY;
 		goto release;
+	}
+	if (!pte_none(*fe->pte))
+		goto unlock_and_release;
 
 	/* Deliver the page fault to userland, check inside PT lock */
-	if (userfaultfd_missing(vma)) {
+	if (!(fe->flags & FAULT_FLAG_SPECULATIVE) && userfaultfd_missing(vma)) {
 		pte_unmap_unlock(fe->pte, fe->ptl);
 		mem_cgroup_cancel_charge(page, memcg, false);
 		put_page(page);
@@ -2778,9 +2979,9 @@
 	}
 
 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
-	page_add_new_anon_rmap(page, vma, fe->address, false);
+	__page_add_new_anon_rmap(page, vma, fe->address, false);
 	mem_cgroup_commit_charge(page, memcg, false, false);
-	lru_cache_add_active_or_unevictable(page, vma);
+	__lru_cache_add_active_or_unevictable(page, fe->vma_flags);
 setpte:
 	set_pte_at(vma->vm_mm, fe->address, fe->pte, entry);
 
@@ -2788,11 +2989,13 @@
 	update_mmu_cache(vma, fe->address, fe->pte);
 unlock:
 	pte_unmap_unlock(fe->pte, fe->ptl);
-	return 0;
+	return ret;
+unlock_and_release:
+	pte_unmap_unlock(fe->pte, fe->ptl);
 release:
 	mem_cgroup_cancel_charge(page, memcg, false);
 	put_page(page);
-	goto unlock;
+	return ret;
 oom_free_page:
 	put_page(page);
 oom:
@@ -2897,8 +3100,9 @@
 	 * pte_none() under vmf->ptl protection when we return to
 	 * alloc_set_pte().
 	 */
-	fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-			&fe->ptl);
+	if (!pte_map_lock(vma->vm_mm, fe))
+		return VM_FAULT_RETRY;
+
 	return 0;
 }
 
@@ -2937,7 +3141,7 @@
 	for (i = 0; i < HPAGE_PMD_NR; i++)
 		flush_icache_page(vma, page + i);
 
-	entry = mk_huge_pmd(page, vma->vm_page_prot);
+	entry = mk_huge_pmd(page, fe->vma_page_prot);
 	if (write)
 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 
@@ -3005,15 +3209,19 @@
 		return VM_FAULT_NOPAGE;
 
 	flush_icache_page(vma, page);
-	entry = mk_pte(page, vma->vm_page_prot);
+	entry = mk_pte(page, fe->vma_page_prot);
 	if (write)
-		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+		entry = maybe_mkwrite(pte_mkdirty(entry), fe->vma_flags);
+
+	if (fe->flags & FAULT_FLAG_PREFAULT_OLD)
+		entry = pte_mkold(entry);
+
 	/* copy-on-write page */
-	if (write && !(vma->vm_flags & VM_SHARED)) {
+	if (write && !(fe->vma_flags & VM_SHARED)) {
 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
-		page_add_new_anon_rmap(page, vma, fe->address, false);
+		__page_add_new_anon_rmap(page, vma, fe->address, false);
 		mem_cgroup_commit_charge(page, memcg, false, false);
-		lru_cache_add_active_or_unevictable(page, vma);
+		__lru_cache_add_active_or_unevictable(page, fe->vma_flags);
 	} else {
 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
 		page_add_file_rmap(page, false);
@@ -3026,8 +3234,16 @@
 	return 0;
 }
 
+/*
+ * If architecture emulates "accessed" or "young" bit without HW support,
+ * there is no much gain with fault_around.
+ */
 static unsigned long fault_around_bytes __read_mostly =
-	rounddown_pow_of_two(4096);
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+	PAGE_SIZE;
+#else
+	rounddown_pow_of_two(65536);
+#endif
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
@@ -3096,6 +3312,7 @@
 	pgoff_t end_pgoff;
 	int off, ret = 0;
 
+	fe->fault_address = address;
 	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
 	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
 
@@ -3301,7 +3518,7 @@
 		return VM_FAULT_SIGBUS;
 	if (!(fe->flags & FAULT_FLAG_WRITE))
 		return do_read_fault(fe, pgoff);
-	if (!(vma->vm_flags & VM_SHARED))
+	if (!(fe->vma_flags & VM_SHARED))
 		return do_cow_fault(fe, pgoff);
 	return do_shared_fault(fe, pgoff);
 }
@@ -3341,22 +3558,22 @@
 	* page table entry is not accessible, so there would be no
 	* concurrent hardware modifications to the PTE.
 	*/
-	fe->ptl = pte_lockptr(vma->vm_mm, fe->pmd);
-	spin_lock(fe->ptl);
+	if (!pte_spinlock(vma->vm_mm, fe))
+		return VM_FAULT_RETRY;
 	if (unlikely(!pte_same(*fe->pte, pte))) {
 		pte_unmap_unlock(fe->pte, fe->ptl);
 		goto out;
 	}
 
 	/* Make it present again */
-	pte = pte_modify(pte, vma->vm_page_prot);
+	pte = pte_modify(pte, fe->vma_page_prot);
 	pte = pte_mkyoung(pte);
 	if (was_writable)
 		pte = pte_mkwrite(pte);
 	set_pte_at(vma->vm_mm, fe->address, fe->pte, pte);
 	update_mmu_cache(vma, fe->address, fe->pte);
 
-	page = vm_normal_page(vma, fe->address, pte);
+	page = __vm_normal_page(vma, fe->address, pte, fe->vma_flags);
 	if (!page) {
 		pte_unmap_unlock(fe->pte, fe->ptl);
 		return 0;
@@ -3383,7 +3600,7 @@
 	 * Flag if the page is shared between multiple address spaces. This
 	 * is later used when determining whether to group tasks together
 	 */
-	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
+	if (page_mapcount(page) > 1 && (fe->vma_flags & VM_SHARED))
 		flags |= TNF_SHARED;
 
 	last_cpupid = page_cpupid_last(page);
@@ -3397,7 +3614,7 @@
 	}
 
 	/* Migrate to the requested node */
-	migrated = migrate_misplaced_page(page, vma, target_nid);
+	migrated = migrate_misplaced_page(page, fe, target_nid);
 	if (migrated) {
 		page_nid = target_nid;
 		flags |= TNF_MIGRATED;
@@ -3430,7 +3647,7 @@
 				fe->flags);
 
 	/* COW handled on pte level: split pmd */
-	VM_BUG_ON_VMA(fe->vma->vm_flags & VM_SHARED, fe->vma);
+	VM_BUG_ON_VMA(fe->vma_flags & VM_SHARED, fe->vma);
 	split_huge_pmd(fe->vma, fe->pmd, fe->address);
 
 	return VM_FAULT_FALLBACK;
@@ -3458,17 +3675,26 @@
  */
 static int handle_pte_fault(struct fault_env *fe)
 {
-	pte_t entry;
+	pte_t uninitialized_var(entry);
 
 	if (unlikely(pmd_none(*fe->pmd))) {
 		/*
+		 * In the case of the speculative page fault handler we abort
+		 * the speculative path immediately as the pmd is probably
+		 * in the way to be converted in a huge one. We will try
+		 * again holding the mmap_sem (which implies that the collapse
+		 * operation is done).
+		 */
+		if (fe->flags & FAULT_FLAG_SPECULATIVE)
+			return VM_FAULT_RETRY;
+		/*
 		 * Leave __pte_alloc() until later: because vm_ops->fault may
 		 * want to allocate huge page, and if we expose page table
 		 * for an instant, it will be difficult to retract from
 		 * concurrent faults and from rmap lookups.
 		 */
 		fe->pte = NULL;
-	} else {
+	} else if (!(fe->flags & FAULT_FLAG_SPECULATIVE)) {
 		/* See comment in pte_alloc_one_map() */
 		if (pmd_devmap_trans_unstable(fe->pmd))
 			return 0;
@@ -3477,6 +3703,9 @@
 		 * pmd from under us anymore at this point because we hold the
 		 * mmap_sem read mode and khugepaged takes it in write mode.
 		 * So now it's safe to run pte_offset_map().
+		 * This is not applicable to the speculative page fault handler
+		 * but in that case, the pte is fetched earlier in
+		 * handle_speculative_fault().
 		 */
 		fe->pte = pte_offset_map(fe->pmd, fe->address);
 
@@ -3500,18 +3729,24 @@
 	if (!fe->pte) {
 		if (vma_is_anonymous(fe->vma))
 			return do_anonymous_page(fe);
+		else if (fe->flags & FAULT_FLAG_SPECULATIVE)
+			return VM_FAULT_RETRY;
 		else
 			return do_fault(fe);
 	}
 
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+	if (fe->flags & FAULT_FLAG_SPECULATIVE)
+		entry = fe->orig_pte;
+#endif
 	if (!pte_present(entry))
 		return do_swap_page(fe, entry);
 
 	if (pte_protnone(entry) && vma_is_accessible(fe->vma))
 		return do_numa_page(fe, entry);
 
-	fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd);
-	spin_lock(fe->ptl);
+	if (!pte_spinlock(fe->vma->vm_mm, fe))
+		return VM_FAULT_RETRY;
 	if (unlikely(!pte_same(*fe->pte, entry)))
 		goto unlock;
 	if (fe->flags & FAULT_FLAG_WRITE) {
@@ -3551,6 +3786,8 @@
 		.vma = vma,
 		.address = address,
 		.flags = flags,
+		.vma_flags = vma->vm_flags,
+		.vma_page_prot = vma->vm_page_prot,
 	};
 	struct mm_struct *mm = vma->vm_mm;
 	pgd_t *pgd;
@@ -3570,7 +3807,9 @@
 	} else {
 		pmd_t orig_pmd = *fe.pmd;
 		int ret;
-
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+		fe.sequence = raw_read_seqcount(&vma->vm_sequence);
+#endif
 		barrier();
 		if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
 			if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
@@ -3591,6 +3830,247 @@
 	return handle_pte_fault(&fe);
 }
 
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+
+#ifndef __HAVE_ARCH_PTE_SPECIAL
+/* This is required by vm_normal_page() */
+#error "Speculative page fault handler requires __HAVE_ARCH_PTE_SPECIAL"
+#endif
+/*
+ * vm_normal_page() adds some processing which should be done while
+ * hodling the mmap_sem.
+ */
+
+/*
+ * Tries to handle the page fault in a speculative way, without grabbing the
+ * mmap_sem.
+ * When VM_FAULT_RETRY is returned, the vma pointer is valid and this vma must
+ * be checked later when the mmap_sem has been grabbed by calling
+ * can_reuse_spf_vma().
+ * This is needed as the returned vma is kept in memory until the call to
+ * can_reuse_spf_vma() is made.
+ */
+int __handle_speculative_fault(struct mm_struct *mm, unsigned long address,
+			       unsigned int flags, struct vm_area_struct **vma)
+{
+	struct fault_env fe = {
+		.address = address,
+	};
+	pgd_t *pgd, pgdval;
+	pud_t *pud, pudval;
+	int seq, ret;
+
+	/* Clear flags that may lead to release the mmap_sem to retry */
+	flags &= ~(FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_KILLABLE);
+	flags |= FAULT_FLAG_SPECULATIVE;
+
+	*vma = get_vma(mm, address);
+	if (!*vma)
+		return VM_FAULT_RETRY;
+	fe.vma = *vma;
+
+	/* rmb <-> seqlock,vma_rb_erase() */
+	seq = raw_read_seqcount(&fe.vma->vm_sequence);
+	if (seq & 1) {
+		trace_spf_vma_changed(_RET_IP_, fe.vma, address);
+		return VM_FAULT_RETRY;
+	}
+
+	/*
+	 * Can't call vm_ops service has we don't know what they would do
+	 * with the VMA.
+	 * This include huge page from hugetlbfs.
+	 */
+	if (fe.vma->vm_ops) {
+		trace_spf_vma_notsup(_RET_IP_, fe.vma, address);
+		return VM_FAULT_RETRY;
+	}
+
+	/*
+	 * __anon_vma_prepare() requires the mmap_sem to be held
+	 * because vm_next and vm_prev must be safe. This can't be guaranteed
+	 * in the speculative path.
+	 */
+	if (unlikely(!fe.vma->anon_vma)) {
+		trace_spf_vma_notsup(_RET_IP_, fe.vma, address);
+		return VM_FAULT_RETRY;
+	}
+
+	fe.vma_flags = READ_ONCE(fe.vma->vm_flags);
+	fe.vma_page_prot = READ_ONCE(fe.vma->vm_page_prot);
+
+	/* Can't call userland page fault handler in the speculative path */
+	if (unlikely(fe.vma_flags & VM_UFFD_MISSING)) {
+		trace_spf_vma_notsup(_RET_IP_, fe.vma, address);
+		return VM_FAULT_RETRY;
+	}
+
+	if (fe.vma_flags & VM_GROWSDOWN || fe.vma_flags & VM_GROWSUP) {
+		/*
+		 * This could be detected by the check address against VMA's
+		 * boundaries but we want to trace it as not supported instead
+		 * of changed.
+		 */
+		trace_spf_vma_notsup(_RET_IP_, fe.vma, address);
+		return VM_FAULT_RETRY;
+	}
+
+	if (address < READ_ONCE(fe.vma->vm_start)
+	    || READ_ONCE(fe.vma->vm_end) <= address) {
+		trace_spf_vma_changed(_RET_IP_, fe.vma, address);
+		return VM_FAULT_RETRY;
+	}
+
+	if (!arch_vma_access_permitted(fe.vma, flags & FAULT_FLAG_WRITE,
+				       flags & FAULT_FLAG_INSTRUCTION,
+				       flags & FAULT_FLAG_REMOTE))
+		goto out_segv;
+
+	/* This is one is required to check that the VMA has write access set */
+	if (flags & FAULT_FLAG_WRITE) {
+		if (unlikely(!(fe.vma_flags & VM_WRITE)))
+			goto out_segv;
+	} else if (unlikely(!(fe.vma_flags & (VM_READ|VM_EXEC|VM_WRITE))))
+		goto out_segv;
+
+#ifdef CONFIG_NUMA
+	struct mempolicy *pol;
+
+	/*
+	 * MPOL_INTERLEAVE implies additional checks in
+	 * mpol_misplaced() which are not compatible with the
+	 *speculative page fault processing.
+	 */
+	pol = __get_vma_policy(fe.vma, address);
+	if (!pol)
+		pol = get_task_policy(current);
+
+	if (pol && pol->mode == MPOL_INTERLEAVE) {
+		trace_spf_vma_notsup(_RET_IP_, fe.vma, address);
+		return VM_FAULT_RETRY;
+	}
+#endif
+
+	/*
+	 * Do a speculative lookup of the PTE entry.
+	 */
+	local_irq_disable();
+	pgd = pgd_offset(mm, address);
+	pgdval = READ_ONCE(*pgd);
+	if (pgd_none(pgdval) || unlikely(pgd_bad(pgdval)))
+		goto out_walk;
+
+	pud = pud_offset(pgd, address);
+	pudval = READ_ONCE(*pud);
+	if (pud_none(pudval) || unlikely(pud_bad(pudval)))
+		goto out_walk;
+
+	fe.pmd = pmd_offset(pud, address);
+	fe.orig_pmd = READ_ONCE(*fe.pmd);
+	/*
+	 * pmd_none could mean that a hugepage collapse is in progress
+	 * in our back as collapse_huge_page() mark it before
+	 * invalidating the pte (which is done once the IPI is catched
+	 * by all CPU and we have interrupt disabled).
+	 * For this reason we cannot handle THP in a speculative way since we
+	 * can't safely indentify an in progress collapse operation done in our
+	 * back on that PMD.
+	 * Regarding the order of the following checks, see comment in
+	 * pmd_devmap_trans_unstable()
+	 */
+	if (unlikely(pmd_devmap(fe.orig_pmd) ||
+		     pmd_none(fe.orig_pmd) || pmd_trans_huge(fe.orig_pmd)))
+		goto out_walk;
+
+	/*
+	 * The above does not allocate/instantiate page-tables because doing so
+	 * would lead to the possibility of instantiating page-tables after
+	 * free_pgtables() -- and consequently leaking them.
+	 *
+	 * The result is that we take at least one !speculative fault per PMD
+	 * in order to instantiate it.
+	 */
+
+	fe.pte = pte_offset_map(fe.pmd, address);
+	fe.orig_pte = READ_ONCE(*fe.pte);
+	barrier(); /* See comment in handle_pte_fault() */
+	if (pte_none(fe.orig_pte)) {
+		pte_unmap(fe.pte);
+		fe.pte = NULL;
+	}
+
+	fe.sequence = seq;
+	fe.flags = flags;
+
+	local_irq_enable();
+
+	/*
+	 * We need to re-validate the VMA after checking the bounds, otherwise
+	 * we might have a false positive on the bounds.
+	 */
+	if (read_seqcount_retry(&fe.vma->vm_sequence, seq)) {
+		trace_spf_vma_changed(_RET_IP_, fe.vma, address);
+		return VM_FAULT_RETRY;
+	}
+
+	mem_cgroup_oom_enable();
+	ret = handle_pte_fault(&fe);
+	mem_cgroup_oom_disable();
+
+	/*
+	 * If there is no need to retry, don't return the vma to the caller.
+	 */
+	if (ret != VM_FAULT_RETRY) {
+		count_vm_event(SPECULATIVE_PGFAULT);
+		put_vma(fe.vma);
+		*vma = NULL;
+	}
+
+	/*
+	 * The task may have entered a memcg OOM situation but
+	 * if the allocation error was handled gracefully (no
+	 * VM_FAULT_OOM), there is no need to kill anything.
+	 * Just clean up the OOM state peacefully.
+	 */
+	if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
+		mem_cgroup_oom_synchronize(false);
+	return ret;
+
+out_walk:
+	trace_spf_vma_notsup(_RET_IP_, fe.vma, address);
+	local_irq_enable();
+	return VM_FAULT_RETRY;
+
+out_segv:
+	trace_spf_vma_access(_RET_IP_, fe.vma, address);
+	/*
+	 * We don't return VM_FAULT_RETRY so the caller is not expected to
+	 * retrieve the fetched VMA.
+	 */
+	put_vma(fe.vma);
+	*vma = NULL;
+	return VM_FAULT_SIGSEGV;
+}
+
+/*
+ * This is used to know if the vma fetch in the speculative page fault handler
+ * is still valid when trying the regular fault path while holding the
+ * mmap_sem.
+ * The call to put_vma(vma) must be made after checking the vma's fields, as
+ * the vma may be freed by put_vma(). In such a case it is expected that false
+ * is returned.
+ */
+bool can_reuse_spf_vma(struct vm_area_struct *vma, unsigned long address)
+{
+	bool ret;
+
+	ret = !RB_EMPTY_NODE(&vma->vm_rb) &&
+		vma->vm_start <= address && address < vma->vm_end;
+	put_vma(vma);
+	return ret;
+}
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
 /*
  * By the time we get here, we already hold the mm semaphore
  *
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9547583..ecbe6ec 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -442,8 +442,11 @@
 	struct vm_area_struct *vma;
 
 	down_write(&mm->mmap_sem);
-	for (vma = mm->mmap; vma; vma = vma->vm_next)
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		vm_write_begin(vma);
 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
+		vm_write_end(vma);
+	}
 	up_write(&mm->mmap_sem);
 }
 
@@ -601,9 +604,11 @@
 {
 	int nr_updated;
 
+	vm_write_begin(vma);
 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
 	if (nr_updated)
 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
+	vm_write_end(vma);
 
 	return nr_updated;
 }
@@ -704,6 +709,7 @@
 	if (IS_ERR(new))
 		return PTR_ERR(new);
 
+	vm_write_begin(vma);
 	if (vma->vm_ops && vma->vm_ops->set_policy) {
 		err = vma->vm_ops->set_policy(vma, new);
 		if (err)
@@ -711,11 +717,17 @@
 	}
 
 	old = vma->vm_policy;
-	vma->vm_policy = new; /* protected by mmap_sem */
+	/*
+	 * The speculative page fault handler accesses this field without
+	 * hodling the mmap_sem.
+	 */
+	WRITE_ONCE(vma->vm_policy,  new);
+	vm_write_end(vma);
 	mpol_put(old);
 
 	return 0;
  err_out:
+	vm_write_end(vma);
 	mpol_put(new);
 	return err;
 }
@@ -1265,6 +1277,7 @@
 		     unsigned long maxnode)
 {
 	unsigned long k;
+	unsigned long t;
 	unsigned long nlongs;
 	unsigned long endmask;
 
@@ -1281,13 +1294,19 @@
 	else
 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
 
-	/* When the user specified more nodes than supported just check
-	   if the non supported part is all zero. */
+	/*
+	 * When the user specified more nodes than supported just check
+	 * if the non supported part is all zero.
+	 *
+	 * If maxnode have more longs than MAX_NUMNODES, check
+	 * the bits in that area first. And then go through to
+	 * check the rest bits which equal or bigger than MAX_NUMNODES.
+	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
+	 */
 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
 		if (nlongs > PAGE_SIZE/sizeof(long))
 			return -EINVAL;
 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
-			unsigned long t;
 			if (get_user(t, nmask + k))
 				return -EFAULT;
 			if (k == nlongs - 1) {
@@ -1300,6 +1319,16 @@
 		endmask = ~0UL;
 	}
 
+	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
+		unsigned long valid_mask = endmask;
+
+		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
+		if (get_user(t, nmask + nlongs - 1))
+			return -EFAULT;
+		if (t & valid_mask)
+			return -EINVAL;
+	}
+
 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
 		return -EFAULT;
 	nodes_addr(*nodes)[nlongs-1] &= endmask;
@@ -1426,10 +1455,14 @@
 		goto out_put;
 	}
 
-	if (!nodes_subset(*new, node_states[N_MEMORY])) {
-		err = -EINVAL;
+	task_nodes = cpuset_mems_allowed(current);
+	nodes_and(*new, *new, task_nodes);
+	if (nodes_empty(*new))
 		goto out_put;
-	}
+
+	nodes_and(*new, *new, node_states[N_MEMORY]);
+	if (nodes_empty(*new))
+		goto out_put;
 
 	err = security_task_movememory(task);
 	if (err)
@@ -1565,23 +1598,28 @@
 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
 						unsigned long addr)
 {
-	struct mempolicy *pol = NULL;
+	struct mempolicy *pol;
 
-	if (vma) {
-		if (vma->vm_ops && vma->vm_ops->get_policy) {
-			pol = vma->vm_ops->get_policy(vma, addr);
-		} else if (vma->vm_policy) {
-			pol = vma->vm_policy;
+	if (!vma)
+		return NULL;
 
-			/*
-			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
-			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
-			 * count on these policies which will be dropped by
-			 * mpol_cond_put() later
-			 */
-			if (mpol_needs_cond_ref(pol))
-				mpol_get(pol);
-		}
+	if (vma->vm_ops && vma->vm_ops->get_policy)
+		return vma->vm_ops->get_policy(vma, addr);
+
+	/*
+	 * This could be called without holding the mmap_sem in the
+	 * speculative page fault handler's path.
+	 */
+	pol = READ_ONCE(vma->vm_policy);
+	if (pol) {
+		/*
+		 * shmem_alloc_page() passes MPOL_F_SHARED policy with
+		 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
+		 * count on these policies which will be dropped by
+		 * mpol_cond_put() later
+		 */
+		if (mpol_needs_cond_ref(pol))
+			mpol_get(pol);
 	}
 
 	return pol;
@@ -2139,6 +2177,9 @@
 	case MPOL_INTERLEAVE:
 		return !!nodes_equal(a->v.nodes, b->v.nodes);
 	case MPOL_PREFERRED:
+		/* a's ->flags is the same as b's */
+		if (a->flags & MPOL_F_LOCAL)
+			return true;
 		return a->v.preferred_node == b->v.preferred_node;
 	default:
 		BUG();
diff --git a/mm/migrate.c b/mm/migrate.c
index eb1f043..595b456 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -241,7 +241,7 @@
 
 	/* Recheck VMA as permissions can change since migration started  */
 	if (is_write_migration_entry(entry))
-		pte = maybe_mkwrite(pte, vma);
+		pte = maybe_mkwrite(pte, vma->vm_flags);
 
 #ifdef CONFIG_HUGETLB_PAGE
 	if (PageHuge(new)) {
@@ -1855,7 +1855,7 @@
  * node. Caller is expected to have an elevated reference count on
  * the page that will be dropped by this function before returning.
  */
-int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+int migrate_misplaced_page(struct page *page, struct fault_env *fe,
 			   int node)
 {
 	pg_data_t *pgdat = NODE_DATA(node);
@@ -1868,7 +1868,7 @@
 	 * with execute permissions as they are probably shared libraries.
 	 */
 	if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
-	    (vma->vm_flags & VM_EXEC))
+	    (fe->vma_flags & VM_EXEC))
 		goto out;
 
 	/*
diff --git a/mm/mlock.c b/mm/mlock.c
index 9cdd063..f648acb 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -438,7 +438,9 @@
 void munlock_vma_pages_range(struct vm_area_struct *vma,
 			     unsigned long start, unsigned long end)
 {
-	vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
+	vm_write_begin(vma);
+	WRITE_ONCE(vma->vm_flags, vma->vm_flags & VM_LOCKED_CLEAR_MASK);
+	vm_write_end(vma);
 
 	while (start < end) {
 		struct page *page;
@@ -563,10 +565,11 @@
 	 * It's okay if try_to_unmap_one unmaps a page just after we
 	 * set VM_LOCKED, populate_vma_page_range will bring it back.
 	 */
-
-	if (lock)
-		vma->vm_flags = newflags;
-	else
+	if (lock) {
+		vm_write_begin(vma);
+		WRITE_ONCE(vma->vm_flags, newflags);
+		vm_write_end(vma);
+	} else
 		munlock_vma_pages_range(vma, start, end);
 
 out:
diff --git a/mm/mmap.c b/mm/mmap.c
index 5df92da..f549597 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -160,6 +160,27 @@
 	}
 }
 
+static void __free_vma(struct vm_area_struct *vma)
+{
+	if (vma->vm_file)
+		fput(vma->vm_file);
+	mpol_put(vma_policy(vma));
+	kmem_cache_free(vm_area_cachep, vma);
+}
+
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+void put_vma(struct vm_area_struct *vma)
+{
+	if (atomic_dec_and_test(&vma->vm_ref_count))
+		__free_vma(vma);
+}
+#else
+static inline void put_vma(struct vm_area_struct *vma)
+{
+	__free_vma(vma);
+}
+#endif
+
 /*
  * Close a vm structure and free it, returning the next.
  */
@@ -170,10 +191,7 @@
 	might_sleep();
 	if (vma->vm_ops && vma->vm_ops->close)
 		vma->vm_ops->close(vma);
-	if (vma->vm_file)
-		fput(vma->vm_file);
-	mpol_put(vma_policy(vma));
-	kmem_cache_free(vm_area_cachep, vma);
+	put_vma(vma);
 	return next;
 }
 
@@ -391,6 +409,14 @@
 #define validate_mm(mm) do { } while (0)
 #endif
 
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+#define mm_rb_write_lock(mm)	write_lock(&(mm)->mm_rb_lock)
+#define mm_rb_write_unlock(mm)	write_unlock(&(mm)->mm_rb_lock)
+#else
+#define mm_rb_write_lock(mm)	do { } while (0)
+#define mm_rb_write_unlock(mm)	do { } while (0)
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
 RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
 		     unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
 
@@ -409,26 +435,37 @@
 }
 
 static inline void vma_rb_insert(struct vm_area_struct *vma,
-				 struct rb_root *root)
+				 struct mm_struct *mm)
 {
+	struct rb_root *root = &mm->mm_rb;
+
 	/* All rb_subtree_gap values must be consistent prior to insertion */
 	validate_mm_rb(root, NULL);
 
 	rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
 }
 
-static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
+static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm)
 {
+	struct rb_root *root = &mm->mm_rb;
 	/*
 	 * Note rb_erase_augmented is a fairly large inline function,
 	 * so make sure we instantiate it only once with our desired
 	 * augmented rbtree callbacks.
 	 */
+	mm_rb_write_lock(mm);
 	rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
+	mm_rb_write_unlock(mm); /* wmb */
+
+	/*
+	 * Ensure the removal is complete before clearing the node.
+	 * Matched by vma_has_changed()/handle_speculative_fault().
+	 */
+	RB_CLEAR_NODE(&vma->vm_rb);
 }
 
 static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
-						struct rb_root *root,
+						struct mm_struct *mm,
 						struct vm_area_struct *ignore)
 {
 	/*
@@ -436,21 +473,21 @@
 	 * with the possible exception of the "next" vma being erased if
 	 * next->vm_start was reduced.
 	 */
-	validate_mm_rb(root, ignore);
+	validate_mm_rb(&mm->mm_rb, ignore);
 
-	__vma_rb_erase(vma, root);
+	__vma_rb_erase(vma, mm);
 }
 
 static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
-					 struct rb_root *root)
+					 struct mm_struct *mm)
 {
 	/*
 	 * All rb_subtree_gap values must be consistent prior to erase,
 	 * with the possible exception of the vma being erased.
 	 */
-	validate_mm_rb(root, vma);
+	validate_mm_rb(&mm->mm_rb, vma);
 
-	__vma_rb_erase(vma, root);
+	__vma_rb_erase(vma, mm);
 }
 
 /*
@@ -565,10 +602,12 @@
 	 * immediately update the gap to the correct value. Finally we
 	 * rebalance the rbtree after all augmented values have been set.
 	 */
+	mm_rb_write_lock(mm);
 	rb_link_node(&vma->vm_rb, rb_parent, rb_link);
 	vma->rb_subtree_gap = 0;
 	vma_gap_update(vma);
-	vma_rb_insert(vma, &mm->mm_rb);
+	vma_rb_insert(vma, mm);
+	mm_rb_write_unlock(mm);
 }
 
 static void __vma_link_file(struct vm_area_struct *vma)
@@ -644,7 +683,7 @@
 {
 	struct vm_area_struct *next;
 
-	vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
+	vma_rb_erase_ignore(vma, mm, ignore);
 	next = vma->vm_next;
 	if (has_prev)
 		prev->vm_next = next;
@@ -678,7 +717,7 @@
  */
 int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
-	struct vm_area_struct *expand)
+	struct vm_area_struct *expand, bool keep_locked)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
@@ -690,6 +729,30 @@
 	long adjust_next = 0;
 	int remove_next = 0;
 
+	/*
+	 * Why using vm_raw_write*() functions here to avoid lockdep's warning ?
+	 *
+	 * Locked is complaining about a theoretical lock dependency, involving
+	 * 3 locks:
+	 *   mapping->i_mmap_rwsem --> vma->vm_sequence --> fs_reclaim
+	 *
+	 * Here are the major path leading to this dependency :
+	 *  1. __vma_adjust() mmap_sem  -> vm_sequence -> i_mmap_rwsem
+	 *  2. move_vmap() mmap_sem -> vm_sequence -> fs_reclaim
+	 *  3. __alloc_pages_nodemask() fs_reclaim -> i_mmap_rwsem
+	 *  4. unmap_mapping_range() i_mmap_rwsem -> vm_sequence
+	 *
+	 * So there is no way to solve this easily, especially because in
+	 * unmap_mapping_range() the i_mmap_rwsem is grab while the impacted
+	 * VMAs are not yet known.
+	 * However, the way the vm_seq is used is guarantying that we will
+	 * never block on it since we just check for its value and never wait
+	 * for it to move, see vma_has_changed() and handle_speculative_fault().
+	 */
+	vm_raw_write_begin(vma);
+	if (next)
+		vm_raw_write_begin(next);
+
 	if (next && !insert) {
 		struct vm_area_struct *exporter = NULL, *importer = NULL;
 
@@ -770,8 +833,12 @@
 
 			importer->anon_vma = exporter->anon_vma;
 			error = anon_vma_clone(importer, exporter);
-			if (error)
+			if (error) {
+				if (next && next != vma)
+					vm_raw_write_end(next);
+				vm_raw_write_end(vma);
 				return error;
+			}
 		}
 	}
 again:
@@ -817,17 +884,18 @@
 	}
 
 	if (start != vma->vm_start) {
-		vma->vm_start = start;
+		WRITE_ONCE(vma->vm_start, start);
 		start_changed = true;
 	}
 	if (end != vma->vm_end) {
-		vma->vm_end = end;
+		WRITE_ONCE(vma->vm_end, end);
 		end_changed = true;
 	}
-	vma->vm_pgoff = pgoff;
+	WRITE_ONCE(vma->vm_pgoff, pgoff);
 	if (adjust_next) {
-		next->vm_start += adjust_next << PAGE_SHIFT;
-		next->vm_pgoff += adjust_next;
+		WRITE_ONCE(next->vm_start,
+			   next->vm_start + (adjust_next << PAGE_SHIFT));
+		WRITE_ONCE(next->vm_pgoff, next->vm_pgoff + adjust_next);
 	}
 
 	if (root) {
@@ -892,15 +960,13 @@
 	}
 
 	if (remove_next) {
-		if (file) {
+		if (file)
 			uprobe_munmap(next, next->vm_start, next->vm_end);
-			fput(file);
-		}
 		if (next->anon_vma)
 			anon_vma_merge(vma, next);
 		mm->map_count--;
-		mpol_put(vma_policy(next));
-		kmem_cache_free(vm_area_cachep, next);
+		vm_raw_write_end(next);
+		put_vma(next);
 		/*
 		 * In mprotect's case 6 (see comments on vma_merge),
 		 * we must remove another next too. It would clutter
@@ -914,6 +980,8 @@
 			 * "vma->vm_next" gap must be updated.
 			 */
 			next = vma->vm_next;
+			if (next)
+				vm_raw_write_begin(next);
 		} else {
 			/*
 			 * For the scope of the comment "next" and
@@ -960,6 +1028,11 @@
 	if (insert && file)
 		uprobe_mmap(insert);
 
+	if (next && next != vma)
+		vm_raw_write_end(next);
+	if (!keep_locked)
+		vm_raw_write_end(vma);
+
 	validate_mm(mm);
 
 	return 0;
@@ -1099,13 +1172,13 @@
  * parameter) may establish ptes with the wrong permissions of NNNN
  * instead of the right permissions of XXXX.
  */
-struct vm_area_struct *vma_merge(struct mm_struct *mm,
+struct vm_area_struct *__vma_merge(struct mm_struct *mm,
 			struct vm_area_struct *prev, unsigned long addr,
 			unsigned long end, unsigned long vm_flags,
 			struct anon_vma *anon_vma, struct file *file,
 			pgoff_t pgoff, struct mempolicy *policy,
 			struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
-			const char __user *anon_name)
+			const char __user *anon_name, bool keep_locked)
 {
 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
 	struct vm_area_struct *area, *next;
@@ -1155,10 +1228,11 @@
 							/* cases 1, 6 */
 			err = __vma_adjust(prev, prev->vm_start,
 					 next->vm_end, prev->vm_pgoff, NULL,
-					 prev);
+					 prev, keep_locked);
 		} else					/* cases 2, 5, 7 */
 			err = __vma_adjust(prev, prev->vm_start,
-					 end, prev->vm_pgoff, NULL, prev);
+					   end, prev->vm_pgoff, NULL, prev,
+					   keep_locked);
 		if (err)
 			return NULL;
 		khugepaged_enter_vma_merge(prev, vm_flags);
@@ -1176,10 +1250,12 @@
 					     anon_name)) {
 		if (prev && addr < prev->vm_end)	/* case 4 */
 			err = __vma_adjust(prev, prev->vm_start,
-					 addr, prev->vm_pgoff, NULL, next);
+					 addr, prev->vm_pgoff, NULL, next,
+					 keep_locked);
 		else {					/* cases 3, 8 */
 			err = __vma_adjust(area, addr, next->vm_end,
-					 next->vm_pgoff - pglen, NULL, next);
+					 next->vm_pgoff - pglen, NULL, next,
+					 keep_locked);
 			/*
 			 * In case 3 area is already equal to next and
 			 * this is a noop, but in case 8 "area" has
@@ -1672,7 +1748,7 @@
 	vma->vm_flags = vm_flags;
 	vma->vm_page_prot = vm_get_page_prot(vm_flags);
 	vma->vm_pgoff = pgoff;
-	INIT_LIST_HEAD(&vma->anon_vma_chain);
+	INIT_VMA(vma);
 
 	if (file) {
 		if (vm_flags & VM_DENYWRITE) {
@@ -1725,13 +1801,15 @@
 out:
 	perf_event_mmap(vma);
 
+	vm_write_begin(vma);
 	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
 	if (vm_flags & VM_LOCKED) {
 		if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
 					vma == get_gate_vma(current->mm)))
 			mm->locked_vm += (len >> PAGE_SHIFT);
 		else
-			vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
+			WRITE_ONCE(vma->vm_flags,
+				   vma->vm_flags & VM_LOCKED_CLEAR_MASK);
 	}
 
 	if (file)
@@ -1744,9 +1822,10 @@
 	 * then new mapped in-place (which must be aimed as
 	 * a completely new data area).
 	 */
-	vma->vm_flags |= VM_SOFTDIRTY;
+	WRITE_ONCE(vma->vm_flags, vma->vm_flags | VM_SOFTDIRTY);
 
 	vma_set_page_prot(vma);
+	vm_write_end(vma);
 
 	return addr;
 
@@ -2118,15 +2197,11 @@
 EXPORT_SYMBOL(get_unmapped_area);
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
-struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+static struct vm_area_struct *__find_vma(struct mm_struct *mm,
+					 unsigned long addr)
 {
 	struct rb_node *rb_node;
-	struct vm_area_struct *vma;
-
-	/* Check the cache first. */
-	vma = vmacache_find(mm, addr);
-	if (likely(vma))
-		return vma;
+	struct vm_area_struct *vma = NULL;
 
 	rb_node = mm->mm_rb.rb_node;
 
@@ -2144,13 +2219,40 @@
 			rb_node = rb_node->rb_right;
 	}
 
+	return vma;
+}
+
+struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+{
+	struct vm_area_struct *vma;
+
+	/* Check the cache first. */
+	vma = vmacache_find(mm, addr);
+	if (likely(vma))
+		return vma;
+
+	vma = __find_vma(mm, addr);
 	if (vma)
 		vmacache_update(addr, vma);
 	return vma;
 }
-
 EXPORT_SYMBOL(find_vma);
 
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr)
+{
+	struct vm_area_struct *vma = NULL;
+
+	read_lock(&mm->mm_rb_lock);
+	vma = __find_vma(mm, addr);
+	if (vma)
+		atomic_inc(&vma->vm_ref_count);
+	read_unlock(&mm->mm_rb_lock);
+
+	return vma;
+}
+#endif
+
 /*
  * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
  */
@@ -2380,8 +2482,8 @@
 					mm->locked_vm += grow;
 				vm_stat_account(mm, vma->vm_flags, grow);
 				anon_vma_interval_tree_pre_update_vma(vma);
-				vma->vm_start = address;
-				vma->vm_pgoff -= grow;
+				WRITE_ONCE(vma->vm_start, address);
+				WRITE_ONCE(vma->vm_pgoff, vma->vm_pgoff - grow);
 				anon_vma_interval_tree_post_update_vma(vma);
 				vma_gap_update(vma);
 				spin_unlock(&mm->page_table_lock);
@@ -2523,7 +2625,7 @@
 	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
 	vma->vm_prev = NULL;
 	do {
-		vma_rb_erase(vma, &mm->mm_rb);
+		vma_rb_erase(vma, mm);
 		mm->map_count--;
 		tail_vma = vma;
 		vma = vma->vm_next;
@@ -2563,7 +2665,7 @@
 	/* most fields are the same, copy all, and then fixup */
 	*new = *vma;
 
-	INIT_LIST_HEAD(&new->anon_vma_chain);
+	INIT_VMA(new);
 
 	if (new_below)
 		new->vm_end = addr;
@@ -2920,7 +3022,7 @@
 		return -ENOMEM;
 	}
 
-	INIT_LIST_HEAD(&vma->anon_vma_chain);
+	INIT_VMA(vma);
 	vma->vm_mm = mm;
 	vma->vm_start = addr;
 	vma->vm_end = addr + len;
@@ -3083,9 +3185,21 @@
 
 	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
 		return NULL;	/* should never get here */
-	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
-			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-			    vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
+
+	/* There is 3 cases to manage here in
+	 *     AAAA            AAAA              AAAA              AAAA
+	 * PPPP....      PPPP......NNNN      PPPP....NNNN      PP........NN
+	 * PPPPPPPP(A)   PPPP..NNNNNNNN(B)   PPPPPPPPPPPP(1)       NULL
+	 *                                   PPPPPPPPNNNN(2)
+	 *                                   PPPPNNNNNNNN(3)
+	 *
+	 * new_vma == prev in case A,1,2
+	 * new_vma == next in case B,3
+	 */
+	new_vma = __vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
+			      vma->anon_vma, vma->vm_file, pgoff,
+			      vma_policy(vma), vma->vm_userfaultfd_ctx,
+				vma_get_anon_name(vma), true);
 	if (new_vma) {
 		/*
 		 * Source vma may have been merged into new_vma
@@ -3118,13 +3232,22 @@
 		new_vma->vm_pgoff = pgoff;
 		if (vma_dup_policy(vma, new_vma))
 			goto out_free_vma;
-		INIT_LIST_HEAD(&new_vma->anon_vma_chain);
+		INIT_VMA(new_vma);
 		if (anon_vma_clone(new_vma, vma))
 			goto out_free_mempol;
 		if (new_vma->vm_file)
 			get_file(new_vma->vm_file);
 		if (new_vma->vm_ops && new_vma->vm_ops->open)
 			new_vma->vm_ops->open(new_vma);
+		/*
+		 * As the VMA is linked right now, it may be hit by the
+		 * speculative page fault handler. But we don't want it to
+		 * to start mapping page in this area until the caller has
+		 * potentially move the pte from the moved VMA. To prevent
+		 * that we protect it right now, and let the caller unprotect
+		 * it once the move is done.
+		 */
+		vm_raw_write_begin(new_vma);
 		vma_link(mm, new_vma, prev, rb_link, rb_parent);
 		*need_rmap_locks = false;
 	}
@@ -3256,7 +3379,7 @@
 	if (unlikely(vma == NULL))
 		return ERR_PTR(-ENOMEM);
 
-	INIT_LIST_HEAD(&vma->anon_vma_chain);
+	INIT_VMA(vma);
 	vma->vm_mm = mm;
 	vma->vm_start = addr;
 	vma->vm_end = addr + len;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 1f2c969..60b16418 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -329,12 +329,14 @@
 	 * vm_flags and vm_page_prot are protected by the mmap_sem
 	 * held in write mode.
 	 */
-	vma->vm_flags = newflags;
+	vm_write_begin(vma);
+	WRITE_ONCE(vma->vm_flags, newflags);
 	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
 	vma_set_page_prot(vma);
 
 	change_protection(vma, start, end, vma->vm_page_prot,
 			  dirty_accountable, 0);
+	vm_write_end(vma);
 
 	/*
 	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
diff --git a/mm/mremap.c b/mm/mremap.c
index 1597671..2302762 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -289,6 +289,14 @@
 	if (!new_vma)
 		return -ENOMEM;
 
+	/* new_vma is returned protected by copy_vma, to prevent speculative
+	 * page fault to be done in the destination area before we move the pte.
+	 * Now, we must also protect the source VMA since we don't want pages
+	 * to be mapped in our back while we are copying the PTEs.
+	 */
+	if (vma != new_vma)
+		vm_raw_write_begin(vma);
+
 	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
 				     need_rmap_locks);
 	if (moved_len < old_len) {
@@ -305,6 +313,8 @@
 		 */
 		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
 				 true);
+		if (vma != new_vma)
+			vm_raw_write_end(vma);
 		vma = new_vma;
 		old_len = new_len;
 		old_addr = new_addr;
@@ -312,7 +322,10 @@
 	} else {
 		arch_remap(mm, old_addr, old_addr + old_len,
 			   new_addr, new_addr + new_len);
+		if (vma != new_vma)
+			vm_raw_write_end(vma);
 	}
+	vm_raw_write_end(new_vma);
 
 	/* Conceal VM_ACCOUNT so old reservation is not undone */
 	if (vm_flags & VM_ACCOUNT) {
diff --git a/mm/nommu.c b/mm/nommu.c
index 44265e0..d033ee8 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1243,7 +1243,7 @@
 	region->vm_flags = vm_flags;
 	region->vm_pgoff = pgoff;
 
-	INIT_LIST_HEAD(&vma->anon_vma_chain);
+	INIT_VMA(vma);
 	vma->vm_flags = vm_flags;
 	vma->vm_pgoff = pgoff;
 
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index e6fbdf4..ca18dc0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2506,13 +2506,13 @@
 	if (mapping && mapping_cap_account_dirty(mapping)) {
 		struct inode *inode = mapping->host;
 		struct bdi_writeback *wb;
-		bool locked;
+		struct wb_lock_cookie cookie = {};
 
-		wb = unlocked_inode_to_wb_begin(inode, &locked);
+		wb = unlocked_inode_to_wb_begin(inode, &cookie);
 		current->nr_dirtied--;
 		dec_node_page_state(page, NR_DIRTIED);
 		dec_wb_stat(wb, WB_DIRTIED);
-		unlocked_inode_to_wb_end(inode, locked);
+		unlocked_inode_to_wb_end(inode, &cookie);
 	}
 }
 EXPORT_SYMBOL(account_page_redirty);
@@ -2618,15 +2618,15 @@
 	if (mapping_cap_account_dirty(mapping)) {
 		struct inode *inode = mapping->host;
 		struct bdi_writeback *wb;
-		bool locked;
+		struct wb_lock_cookie cookie = {};
 
 		lock_page_memcg(page);
-		wb = unlocked_inode_to_wb_begin(inode, &locked);
+		wb = unlocked_inode_to_wb_begin(inode, &cookie);
 
 		if (TestClearPageDirty(page))
 			account_page_cleaned(page, mapping, wb);
 
-		unlocked_inode_to_wb_end(inode, locked);
+		unlocked_inode_to_wb_end(inode, &cookie);
 		unlock_page_memcg(page);
 	} else {
 		ClearPageDirty(page);
@@ -2658,7 +2658,7 @@
 	if (mapping && mapping_cap_account_dirty(mapping)) {
 		struct inode *inode = mapping->host;
 		struct bdi_writeback *wb;
-		bool locked;
+		struct wb_lock_cookie cookie = {};
 
 		/*
 		 * Yes, Virginia, this is indeed insane.
@@ -2695,7 +2695,7 @@
 		 * always locked coming in here, so we get the desired
 		 * exclusion.
 		 */
-		wb = unlocked_inode_to_wb_begin(inode, &locked);
+		wb = unlocked_inode_to_wb_begin(inode, &cookie);
 		if (TestClearPageDirty(page)) {
 			mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
 			dec_node_page_state(page, NR_FILE_DIRTY);
@@ -2703,7 +2703,7 @@
 			dec_wb_stat(wb, WB_RECLAIMABLE);
 			ret = 1;
 		}
-		unlocked_inode_to_wb_end(inode, locked);
+		unlocked_inode_to_wb_end(inode, &cookie);
 		return ret;
 	}
 	return TestClearPageDirty(page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b74f30e..1345072 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6851,9 +6851,10 @@
 			    mult_frac(zone->managed_pages,
 				      watermark_scale_factor, 10000));
 
-		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + low + min;
-		zone->watermark[WMARK_HIGH] =
-					min_wmark_pages(zone) + low + min * 2;
+		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) +
+					low + min;
+		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
+					low + min * 2;
 
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
diff --git a/mm/percpu.c b/mm/percpu.c
index f014ceb..3794cfc 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -70,6 +70,7 @@
 #include <linux/vmalloc.h>
 #include <linux/workqueue.h>
 #include <linux/kmemleak.h>
+#include <linux/sched.h>
 
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
diff --git a/mm/rmap.c b/mm/rmap.c
index 4d19dd1..24470a6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1266,7 +1266,7 @@
 }
 
 /**
- * page_add_new_anon_rmap - add pte mapping to a new anonymous page
+ * __page_add_new_anon_rmap - add pte mapping to a new anonymous page
  * @page:	the page to add the mapping to
  * @vma:	the vm area in which the mapping is added
  * @address:	the user virtual address mapped
@@ -1276,12 +1276,11 @@
  * This means the inc-and-test can be bypassed.
  * Page does not have to be locked.
  */
-void page_add_new_anon_rmap(struct page *page,
+void __page_add_new_anon_rmap(struct page *page,
 	struct vm_area_struct *vma, unsigned long address, bool compound)
 {
 	int nr = compound ? hpage_nr_pages(page) : 1;
 
-	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
 	__SetPageSwapBacked(page);
 	if (compound) {
 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
diff --git a/mm/slab.c b/mm/slab.c
index 1f82d16..c59844d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4096,7 +4096,8 @@
 	next_reap_node();
 out:
 	/* Set up the next iteration */
-	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
+	schedule_delayed_work_on(smp_processor_id(), work,
+				round_jiffies_relative(REAPTIMEOUT_AC));
 }
 
 #ifdef CONFIG_SLABINFO
diff --git a/mm/swap.c b/mm/swap.c
index 4dcf852..5827225 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -208,9 +208,10 @@
 {
 	int *pgmoved = arg;
 
-	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
-		enum lru_list lru = page_lru_base_type(page);
-		list_move_tail(&page->lru, &lruvec->lists[lru]);
+	if (PageLRU(page) && !PageUnevictable(page)) {
+		del_page_from_lru_list(page, lruvec, page_lru(page));
+		ClearPageActive(page);
+		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
 		(*pgmoved)++;
 	}
 }
@@ -234,7 +235,7 @@
  */
 void rotate_reclaimable_page(struct page *page)
 {
-	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
+	if (!PageLocked(page) && !PageDirty(page) &&
 	    !PageUnevictable(page) && PageLRU(page)) {
 		struct pagevec *pvec;
 		unsigned long flags;
@@ -467,12 +468,12 @@
  * directly back onto it's zone's unevictable list, it does NOT use a
  * per cpu pagevec.
  */
-void lru_cache_add_active_or_unevictable(struct page *page,
-					 struct vm_area_struct *vma)
+void __lru_cache_add_active_or_unevictable(struct page *page,
+					   unsigned long vma_flags)
 {
 	VM_BUG_ON_PAGE(PageLRU(page), page);
 
-	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
+	if (likely((vma_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
 		SetPageActive(page);
 		lru_cache_add(page);
 		return;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 5ac5846..35f882d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -467,6 +467,10 @@
  * the readahead.
  *
  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
+ * This is needed to ensure the VMA will not be freed in our back. In the case
+ * of the speculative page fault handler, this cannot happen, even if we don't
+ * hold the mmap_sem. Callees are assumed to take care of reading VMA's fields
+ * using READ_ONCE() to read consistent values.
  */
 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 			struct vm_area_struct *vma, unsigned long addr)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9cf2595..7b439c9 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2313,6 +2313,10 @@
 	maxpages = swp_offset(pte_to_swp_entry(
 			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
 	last_page = swap_header->info.last_page;
+	if (!last_page) {
+		pr_warn("Empty swap-file\n");
+		return 0;
+	}
 	if (last_page > maxpages) {
 		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
 			maxpages << (PAGE_SHIFT - 10),
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ed89128..7b3865c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -657,7 +657,8 @@
 
 	log = fls(num_online_cpus());
 
-	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
+	return log * (1UL * CONFIG_VMAP_LAZY_PURGING_FACTOR *
+					1024 * 1024 / PAGE_SIZE);
 }
 
 static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 658d62c..4daac9a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -87,6 +87,7 @@
 	/* The highest zone to isolate pages for reclaim from */
 	enum zone_type reclaim_idx;
 
+	/* Writepage batching in laptop mode; RECLAIM_WRITE */
 	unsigned int may_writepage:1;
 
 	/* Can mapped pages be reclaimed? */
@@ -1057,6 +1058,15 @@
 		 *    throttling so we could easily OOM just because too many
 		 *    pages are in writeback and there is nothing else to
 		 *    reclaim. Wait for the writeback to complete.
+		 *
+		 * In cases 1) and 2) we activate the pages to get them out of
+		 * the way while we continue scanning for clean pages on the
+		 * inactive list and refilling from the active list. The
+		 * observation here is that waiting for disk writes is more
+		 * expensive than potentially causing reloads down the line.
+		 * Since they're marked for immediate reclaim, they won't put
+		 * memory pressure on the cache working set any longer than it
+		 * takes to write them to disk.
 		 */
 		if (PageWriteback(page)) {
 			/* Case 1 above */
@@ -1064,7 +1074,7 @@
 			    PageReclaim(page) &&
 			    (pgdat && test_bit(PGDAT_WRITEBACK, &pgdat->flags))) {
 				nr_immediate++;
-				goto keep_locked;
+				goto activate_locked;
 
 			/* Case 2 above */
 			} else if (sane_reclaim(sc) ||
@@ -1082,7 +1092,7 @@
 				 */
 				SetPageReclaim(page);
 				nr_writeback++;
-				goto keep_locked;
+				goto activate_locked;
 
 			/* Case 3 above */
 			} else {
@@ -1153,14 +1163,18 @@
 
 		if (PageDirty(page)) {
 			/*
-			 * Only kswapd can writeback filesystem pages to
-			 * avoid risk of stack overflow but only writeback
-			 * if many dirty pages have been encountered.
+			 * Only kswapd can writeback filesystem pages
+			 * to avoid risk of stack overflow. But avoid
+			 * injecting inefficient single-page IO into
+			 * flusher writeback as much as possible: only
+			 * write pages when we've encountered many
+			 * dirty pages, and when we've already scanned
+			 * the rest of the LRU for clean pages and see
+			 * the same dirty pages again (PageReclaim).
 			 */
 			if (page_is_file_cache(page) &&
-					(!current_is_kswapd() ||
-					(pgdat &&
-					 !test_bit(PGDAT_DIRTY, &pgdat->flags)))) {
+			    (!current_is_kswapd() || !PageReclaim(page) ||
+			     !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
 				/*
 				 * Immediately reclaim when written back.
 				 * Similar in principal to deactivate_page()
@@ -1170,7 +1184,7 @@
 				inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
 				SetPageReclaim(page);
 
-				goto keep_locked;
+				goto activate_locked;
 			}
 
 			if (references == PAGEREF_RECLAIM_CLEAN)
@@ -1416,31 +1430,34 @@
 	 * wants to isolate pages it will be able to operate on without
 	 * blocking - clean pages for the most part.
 	 *
-	 * ISOLATE_CLEAN means that only clean pages should be isolated. This
-	 * is used by reclaim when it is cannot write to backing storage
-	 *
 	 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
 	 * that it is possible to migrate without blocking
 	 */
-	if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
+	if (mode & ISOLATE_ASYNC_MIGRATE) {
 		/* All the caller can do on PageWriteback is block */
 		if (PageWriteback(page))
 			return ret;
 
 		if (PageDirty(page)) {
 			struct address_space *mapping;
-
-			/* ISOLATE_CLEAN means only clean pages */
-			if (mode & ISOLATE_CLEAN)
-				return ret;
+			bool migrate_dirty;
 
 			/*
 			 * Only pages without mappings or that have a
 			 * ->migratepage callback are possible to migrate
-			 * without blocking
+			 * without blocking. However, we can be racing with
+			 * truncation so it's necessary to lock the page
+			 * to stabilise the mapping as truncation holds
+			 * the page lock until after the page is removed
+			 * from the page cache.
 			 */
+			if (!trylock_page(page))
+				return ret;
+
 			mapping = page_mapping(page);
-			if (mapping && !mapping->a_ops->migratepage)
+			migrate_dirty = mapping && mapping->a_ops->migratepage;
+			unlock_page(page);
+			if (!migrate_dirty)
 				return ret;
 		}
 	}
@@ -1831,8 +1848,6 @@
 
 	if (!sc->may_unmap)
 		isolate_mode |= ISOLATE_UNMAPPED;
-	if (!sc->may_writepage)
-		isolate_mode |= ISOLATE_CLEAN;
 
 	spin_lock_irq(&pgdat->lru_lock);
 
@@ -1894,6 +1909,20 @@
 		set_bit(PGDAT_WRITEBACK, &pgdat->flags);
 
 	/*
+	 * If dirty pages are scanned that are not queued for IO, it
+	 * implies that flushers are not doing their job. This can
+	 * happen when memory pressure pushes dirty pages to the end of
+	 * the LRU before the dirty limits are breached and the dirty
+	 * data has expired. It can also happen when the proportion of
+	 * dirty pages grows not through writes but through memory
+	 * pressure reclaiming all the clean cache. And in some cases,
+	 * the flushers simply cannot keep up with the allocation
+	 * rate. Nudge the flusher threads in case they are asleep.
+	 */
+	if (nr_unqueued_dirty == nr_taken)
+		wakeup_flusher_threads(0, WB_REASON_VMSCAN);
+
+	/*
 	 * Legacy memcg will stall in page writeback so avoid forcibly
 	 * stalling here.
 	 */
@@ -1905,12 +1934,7 @@
 		if (nr_dirty && nr_dirty == nr_congested)
 			set_bit(PGDAT_CONGESTED, &pgdat->flags);
 
-		/*
-		 * If dirty pages are scanned that are not queued for IO, it
-		 * implies that flushers are not keeping up. In this case, flag
-		 * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
-		 * reclaim context.
-		 */
+		/* Allow kswapd to start writing pages during reclaim. */
 		if (nr_unqueued_dirty == nr_taken)
 			set_bit(PGDAT_DIRTY, &pgdat->flags);
 
@@ -2020,8 +2044,6 @@
 
 	if (!sc->may_unmap)
 		isolate_mode |= ISOLATE_UNMAPPED;
-	if (!sc->may_writepage)
-		isolate_mode |= ISOLATE_CLEAN;
 
 	spin_lock_irq(&pgdat->lru_lock);
 
@@ -2823,8 +2845,6 @@
 					  struct scan_control *sc)
 {
 	int initial_priority = sc->priority;
-	unsigned long total_scanned = 0;
-	unsigned long writeback_threshold;
 retry:
 	delayacct_freepages_start();
 
@@ -2837,7 +2857,6 @@
 		sc->nr_scanned = 0;
 		shrink_zones(zonelist, sc);
 
-		total_scanned += sc->nr_scanned;
 		if (sc->nr_reclaimed >= sc->nr_to_reclaim)
 			break;
 
@@ -2850,20 +2869,6 @@
 		 */
 		if (sc->priority < DEF_PRIORITY - 2)
 			sc->may_writepage = 1;
-
-		/*
-		 * Try to write back as many pages as we just scanned.  This
-		 * tends to cause slow streaming writers to write data to the
-		 * disk smoothly, at the dirtying rate, which is nice.   But
-		 * that's undesirable in laptop mode, where we *want* lumpy
-		 * writeout.  So in laptop mode, write out the whole world.
-		 */
-		writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
-		if (total_scanned > writeback_threshold) {
-			wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
-						WB_REASON_TRY_TO_FREE_PAGES);
-			sc->may_writepage = 1;
-		}
 	} while (--sc->priority >= 0);
 
 	delayacct_freepages_end();
@@ -3023,7 +3028,7 @@
 	unsigned long nr_reclaimed;
 	struct scan_control sc = {
 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
-		.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
+		.gfp_mask = memalloc_noio_flags(gfp_mask),
 		.reclaim_idx = gfp_zone(gfp_mask),
 		.order = order,
 		.nodemask = nodemask,
@@ -3038,12 +3043,12 @@
 	 * 1 is returned so that the page allocator does not OOM kill at this
 	 * point.
 	 */
-	if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
+	if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
 		return 1;
 
 	trace_mm_vmscan_direct_reclaim_begin(order,
 				sc.may_writepage,
-				gfp_mask,
+				sc.gfp_mask,
 				sc.reclaim_idx);
 
 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
@@ -3827,16 +3832,15 @@
 	const unsigned long nr_pages = 1 << order;
 	struct task_struct *p = current;
 	struct reclaim_state reclaim_state;
-	int classzone_idx = gfp_zone(gfp_mask);
 	struct scan_control sc = {
 		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
-		.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
+		.gfp_mask = memalloc_noio_flags(gfp_mask),
 		.order = order,
 		.priority = NODE_RECLAIM_PRIORITY,
 		.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
 		.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
 		.may_swap = 1,
-		.reclaim_idx = classzone_idx,
+		.reclaim_idx = gfp_zone(gfp_mask),
 	};
 
 	cond_resched();
@@ -3846,7 +3850,7 @@
 	 * and RECLAIM_UNMAP.
 	 */
 	p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
-	lockdep_set_current_reclaim_state(gfp_mask);
+	lockdep_set_current_reclaim_state(sc.gfp_mask);
 	reclaim_state.reclaimed_slab = 0;
 	p->reclaim_state = &reclaim_state;
 
@@ -3926,7 +3930,13 @@
  */
 int page_evictable(struct page *page)
 {
-	return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
+	int ret;
+
+	/* Prevent address_space of inode and swap cache from being freed */
+	rcu_read_lock();
+	ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
+	rcu_read_unlock();
+	return ret;
 }
 
 #ifdef CONFIG_SHMEM
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f97fb09..3d128da 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1088,7 +1088,10 @@
 	"vmacache_find_hits",
 	"vmacache_full_flushes",
 #endif
-#endif /* CONFIG_VM_EVENTS_COUNTERS */
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+	"speculative_pgfault"
+#endif
+#endif /* CONFIG_VM_EVENT_COUNTERS */
 };
 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
 
@@ -1356,8 +1359,6 @@
 			return zone == compare;
 	}
 
-	/* The zone must be somewhere! */
-	WARN_ON_ONCE(1);
 	return false;
 }
 
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 7671441..fb3e2a5 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -29,6 +29,7 @@
 #include <linux/net_tstamp.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
+#include <linux/phy.h>
 #include <net/arp.h>
 #include <net/switchdev.h>
 
@@ -658,8 +659,11 @@
 {
 	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
+	struct phy_device *phydev = vlan->real_dev->phydev;
 
-	if (ops->get_ts_info) {
+	if (phydev && phydev->drv && phydev->drv->ts_info) {
+		 return phydev->drv->ts_info(phydev, info);
+	} else if (ops->get_ts_info) {
 		return ops->get_ts_info(vlan->real_dev, info);
 	} else {
 		info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5d26938..1e84c52 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -41,6 +41,9 @@
 #include <linux/module.h>
 #include <linux/init.h>
 
+/* Hardening for Spectre-v1 */
+#include <linux/nospec.h>
+
 #include "lec.h"
 #include "lec_arpc.h"
 #include "resources.h"
@@ -697,8 +700,10 @@
 	bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
 	if (bytes_left != 0)
 		pr_info("copy from user failed for %d bytes\n", bytes_left);
-	if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
-	    !dev_lec[ioc_data.dev_num])
+	if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
+		return -EINVAL;
+	ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
+	if (!dev_lec[ioc_data.dev_num])
 		return -EINVAL;
 	vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
 	if (!vpriv)
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index e2d18d0..946f1c2 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -2705,7 +2705,7 @@
 	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
 	struct batadv_neigh_node *router;
 	struct batadv_gw_node *curr_gw;
-	int ret = -EINVAL;
+	int ret = 0;
 	void *hdr;
 
 	router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index e79f6f0..ed4ddf2 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -920,7 +920,7 @@
 	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
 	struct batadv_neigh_node *router;
 	struct batadv_gw_node *curr_gw;
-	int ret = -EINVAL;
+	int ret = 0;
 	void *hdr;
 
 	router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5419b12..582e276 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -2149,22 +2149,25 @@
 {
 	struct batadv_bla_claim *claim;
 	int idx = 0;
+	int ret = 0;
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(claim, head, hash_entry) {
 		if (idx++ < *idx_skip)
 			continue;
-		if (batadv_bla_claim_dump_entry(msg, portid, seq,
-						primary_if, claim)) {
+
+		ret = batadv_bla_claim_dump_entry(msg, portid, seq,
+						  primary_if, claim);
+		if (ret) {
 			*idx_skip = idx - 1;
 			goto unlock;
 		}
 	}
 
-	*idx_skip = idx;
+	*idx_skip = 0;
 unlock:
 	rcu_read_unlock();
-	return 0;
+	return ret;
 }
 
 /**
@@ -2379,22 +2382,25 @@
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
 	int idx = 0;
+	int ret = 0;
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
 		if (idx++ < *idx_skip)
 			continue;
-		if (batadv_bla_backbone_dump_entry(msg, portid, seq,
-						   primary_if, backbone_gw)) {
+
+		ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
+						     primary_if, backbone_gw);
+		if (ret) {
 			*idx_skip = idx - 1;
 			goto unlock;
 		}
 	}
 
-	*idx_skip = idx;
+	*idx_skip = 0;
 unlock:
 	rcu_read_unlock();
-	return 0;
+	return ret;
 }
 
 /**
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index e257efd..df7c6a0 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -391,7 +391,7 @@
 		   batadv_arp_hw_src(skb, hdr_size), &ip_src,
 		   batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
 
-	if (hdr_size == 0)
+	if (hdr_size < sizeof(struct batadv_unicast_packet))
 		return;
 
 	unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 0934730..57215e3 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -276,7 +276,8 @@
 	/* Move the existing MAC header to just before the payload. (Override
 	 * the fragment header.)
 	 */
-	skb_pull_rcsum(skb_out, hdr_size);
+	skb_pull(skb_out, hdr_size);
+	skb_out->ip_summed = CHECKSUM_NONE;
 	memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
 	skb_set_mac_header(skb_out, -ETH_HLEN);
 	skb_reset_network_header(skb_out);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index de055d6..ed9aaf3 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -715,6 +715,9 @@
 
 	vid = batadv_get_vid(skb, 0);
 
+	if (is_multicast_ether_addr(ethhdr->h_dest))
+		goto out;
+
 	orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
 						 ethhdr->h_dest, vid);
 	if (!orig_dst_node)
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 13661f4..5a2aac1 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -527,8 +527,8 @@
 		bat_priv->mcast.enabled = true;
 	}
 
-	return !(mcast_data.flags &
-		 (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6));
+	return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 &&
+		 mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6);
 }
 
 /**
@@ -769,8 +769,8 @@
 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
 			      struct ethhdr *ethhdr)
 {
-	return batadv_transtable_search(bat_priv, ethhdr->h_source,
-					ethhdr->h_dest, BATADV_NO_FLAGS);
+	return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
+					BATADV_NO_FLAGS);
 }
 
 /**
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 7e8dc64..8b98609 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -724,6 +724,7 @@
 /**
  * batadv_reroute_unicast_packet - update the unicast header for re-routing
  * @bat_priv: the bat priv with all the soft interface information
+ * @skb: unicast packet to process
  * @unicast_packet: the unicast header to be updated
  * @dst_addr: the payload destination
  * @vid: VLAN identifier
@@ -735,7 +736,7 @@
  * Return: true if the packet header has been updated, false otherwise
  */
 static bool
-batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
+batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
 			      struct batadv_unicast_packet *unicast_packet,
 			      u8 *dst_addr, unsigned short vid)
 {
@@ -764,8 +765,10 @@
 	}
 
 	/* update the packet header */
+	skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
 	ether_addr_copy(unicast_packet->dest, orig_addr);
 	unicast_packet->ttvn = orig_ttvn;
+	skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
 
 	ret = true;
 out:
@@ -806,7 +809,7 @@
 	 * the packet to
 	 */
 	if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
-		if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
+		if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
 						  ethhdr->h_dest, vid))
 			batadv_dbg_ratelimited(BATADV_DBG_TT,
 					       bat_priv,
@@ -852,7 +855,7 @@
 	 * destination can possibly be updated and forwarded towards the new
 	 * target host
 	 */
-	if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
+	if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
 					  ethhdr->h_dest, vid)) {
 		batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
 				       "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
@@ -875,12 +878,14 @@
 	if (!primary_if)
 		return false;
 
+	/* update the packet header */
+	skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
 	ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
+	unicast_packet->ttvn = curr_ttvn;
+	skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
 
 	batadv_hardif_put(primary_if);
 
-	unicast_packet->ttvn = curr_ttvn;
-
 	return true;
 }
 
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 49e16b6..84c1b38 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -448,13 +448,7 @@
 
 	/* skb->dev & skb->pkt_type are set here */
 	skb->protocol = eth_type_trans(skb, soft_iface);
-
-	/* should not be necessary anymore as we use skb_pull_rcsum()
-	 * TODO: please verify this and remove this TODO
-	 * -- Dec 21st 2009, Simon Wunderlich
-	 */
-
-	/* skb->ip_summed = CHECKSUM_UNNECESSARY; */
+	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
 	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index dc59eae..cc06149 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -749,18 +749,31 @@
 }
 
 static void hci_req_add_le_create_conn(struct hci_request *req,
-				       struct hci_conn *conn)
+				       struct hci_conn *conn,
+				       bdaddr_t *direct_rpa)
 {
 	struct hci_cp_le_create_conn cp;
 	struct hci_dev *hdev = conn->hdev;
 	u8 own_addr_type;
 
-	/* Update random address, but set require_privacy to false so
-	 * that we never connect with an non-resolvable address.
+	/* If direct address was provided we use it instead of current
+	 * address.
 	 */
-	if (hci_update_random_address(req, false, conn_use_rpa(conn),
-				      &own_addr_type))
-		return;
+	if (direct_rpa) {
+		if (bacmp(&req->hdev->random_addr, direct_rpa))
+			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+								direct_rpa);
+
+		/* direct address is always RPA */
+		own_addr_type = ADDR_LE_DEV_RANDOM;
+	} else {
+		/* Update random address, but set require_privacy to false so
+		 * that we never connect with an non-resolvable address.
+		 */
+		if (hci_update_random_address(req, false, conn_use_rpa(conn),
+					      &own_addr_type))
+			return;
+	}
 
 	memset(&cp, 0, sizeof(cp));
 
@@ -825,7 +838,7 @@
 
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
 				u8 dst_type, u8 sec_level, u16 conn_timeout,
-				u8 role)
+				u8 role, bdaddr_t *direct_rpa)
 {
 	struct hci_conn_params *params;
 	struct hci_conn *conn;
@@ -940,7 +953,7 @@
 		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
 	}
 
-	hci_req_add_le_create_conn(&req, conn);
+	hci_req_add_le_create_conn(&req, conn, direct_rpa);
 
 create_conn:
 	err = hci_req_run(&req, create_le_conn_complete);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 3ac89e9..4bd72d2 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -548,6 +548,7 @@
 {
 	struct hci_dev *hdev = req->hdev;
 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+	bool changed = false;
 
 	/* If Connectionless Slave Broadcast master role is supported
 	 * enable all necessary events for it.
@@ -557,6 +558,7 @@
 		events[1] |= 0x80;	/* Synchronization Train Complete */
 		events[2] |= 0x10;	/* Slave Page Response Timeout */
 		events[2] |= 0x20;	/* CSB Channel Map Change */
+		changed = true;
 	}
 
 	/* If Connectionless Slave Broadcast slave role is supported
@@ -567,13 +569,24 @@
 		events[2] |= 0x02;	/* CSB Receive */
 		events[2] |= 0x04;	/* CSB Timeout */
 		events[2] |= 0x08;	/* Truncated Page Complete */
+		changed = true;
 	}
 
 	/* Enable Authenticated Payload Timeout Expired event if supported */
-	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
+	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
 		events[2] |= 0x80;
+		changed = true;
+	}
 
-	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
+	/* Some Broadcom based controllers indicate support for Set Event
+	 * Mask Page 2 command, but then actually do not support it. Since
+	 * the default value is all bits set to zero, the command is only
+	 * required if the event mask has to be changed. In case no change
+	 * to the event mask is needed, skip this command.
+	 */
+	if (changed)
+		hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
+			    sizeof(events), events);
 }
 
 static int hci_init3_req(struct hci_request *req, unsigned long opt)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index e17aacb..d2f9eb1 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -4646,7 +4646,8 @@
 /* This function requires the caller holds hdev->lock */
 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
 					      bdaddr_t *addr,
-					      u8 addr_type, u8 adv_type)
+					      u8 addr_type, u8 adv_type,
+					      bdaddr_t *direct_rpa)
 {
 	struct hci_conn *conn;
 	struct hci_conn_params *params;
@@ -4697,7 +4698,8 @@
 	}
 
 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
-			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
+			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
+			      direct_rpa);
 	if (!IS_ERR(conn)) {
 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
 		 * by higher layer that tried to connect, if no then
@@ -4807,8 +4809,13 @@
 		bdaddr_type = irk->addr_type;
 	}
 
-	/* Check if we have been requested to connect to this device */
-	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
+	/* Check if we have been requested to connect to this device.
+	 *
+	 * direct_addr is set only for directed advertising reports (it is NULL
+	 * for advertising reports) and is already verified to be RPA above.
+	 */
+	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
+								direct_addr);
 	if (conn && type == LE_ADV_IND) {
 		/* Store report for later inclusion by
 		 * mgmt_device_connected
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 1fc0764..1811f8e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -431,8 +431,8 @@
 		del_timer(&session->timer);
 }
 
-static void hidp_process_report(struct hidp_session *session,
-				int type, const u8 *data, int len, int intr)
+static void hidp_process_report(struct hidp_session *session, int type,
+				const u8 *data, unsigned int len, int intr)
 {
 	if (len > HID_MAX_BUFFER_SIZE)
 		len = HID_MAX_BUFFER_SIZE;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2bbca23..1fc23cb 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7148,7 +7148,7 @@
 			hcon = hci_connect_le(hdev, dst, dst_type,
 					      chan->sec_level,
 					      HCI_LE_CONN_TIMEOUT,
-					      HCI_ROLE_SLAVE);
+					      HCI_ROLE_SLAVE, NULL);
 		else
 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
 						   chan->sec_level,
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 658c900..ead4d1b 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2233,8 +2233,14 @@
 	else
 		sec_level = authreq_to_seclevel(auth);
 
-	if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
+	if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) {
+		/* If link is already encrypted with sufficient security we
+		 * still need refresh encryption as per Core Spec 5.0 Vol 3,
+		 * Part H 2.4.6
+		 */
+		smp_ltk_encrypt(conn, hcon->sec_level);
 		return 0;
+	}
 
 	if (sec_level > hcon->pending_sec_level)
 		hcon->pending_sec_level = sec_level;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 9218931..f57de0a 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -504,8 +504,8 @@
 	if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
 		return -ELOOP;
 
-	/* Device is already being bridged */
-	if (br_port_exists(dev))
+	/* Device has master upper dev */
+	if (netdev_master_upper_dev_get(dev))
 		return -EBUSY;
 
 	/* No bridging devices that dislike that (e.g. wireless) */
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 9637a68..9adf162 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -177,6 +177,28 @@
 	return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
 }
 
+static bool wormhash_offset_invalid(int off, unsigned int len)
+{
+	if (off == 0) /* not present */
+		return false;
+
+	if (off < (int)sizeof(struct ebt_among_info) ||
+	    off % __alignof__(struct ebt_mac_wormhash))
+		return true;
+
+	off += sizeof(struct ebt_mac_wormhash);
+
+	return off > len;
+}
+
+static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
+{
+	if (a == 0)
+		a = sizeof(struct ebt_among_info);
+
+	return ebt_mac_wormhash_size(wh) + a == b;
+}
+
 static int ebt_among_mt_check(const struct xt_mtchk_param *par)
 {
 	const struct ebt_among_info *info = par->matchinfo;
@@ -189,6 +211,10 @@
 	if (expected_length > em->match_size)
 		return -EINVAL;
 
+	if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
+	    wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
+		return -EINVAL;
+
 	wh_dst = ebt_among_wh_dst(info);
 	if (poolsize_invalid(wh_dst))
 		return -EINVAL;
@@ -201,6 +227,14 @@
 	if (poolsize_invalid(wh_src))
 		return -EINVAL;
 
+	if (info->wh_src_ofs < info->wh_dst_ofs) {
+		if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
+			return -EINVAL;
+	} else {
+		if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
+			return -EINVAL;
+	}
+
 	expected_length += ebt_mac_wormhash_size(wh_src);
 
 	if (em->match_size != EBT_ALIGN(expected_length)) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5a89a4a..0a9222e 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1625,7 +1625,8 @@
 	int off = ebt_compat_match_offset(match, m->match_size);
 	compat_uint_t msize = m->match_size - off;
 
-	BUG_ON(off >= m->match_size);
+	if (WARN_ON(off >= m->match_size))
+		return -EINVAL;
 
 	if (copy_to_user(cm->u.name, match->name,
 	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
@@ -1652,7 +1653,8 @@
 	int off = xt_compat_target_offset(target);
 	compat_uint_t tsize = t->target_size - off;
 
-	BUG_ON(off >= t->target_size);
+	if (WARN_ON(off >= t->target_size))
+		return -EINVAL;
 
 	if (copy_to_user(cm->u.name, target->name,
 	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
@@ -1880,7 +1882,8 @@
 	if (state->buf_kern_start == NULL)
 		goto count_only;
 
-	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
+	if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
+		return -EINVAL;
 
 	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
 
@@ -1893,7 +1896,8 @@
 {
 	char *b = state->buf_kern_start;
 
-	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
+	if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
+		return -EINVAL;
 
 	if (b != NULL && sz > 0)
 		memset(b + state->buf_kern_offset, 0, sz);
@@ -1970,8 +1974,10 @@
 	pad = XT_ALIGN(size_kern) - size_kern;
 
 	if (pad > 0 && dst) {
-		BUG_ON(state->buf_kern_len <= pad);
-		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
+		if (WARN_ON(state->buf_kern_len <= pad))
+			return -EINVAL;
+		if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
+			return -EINVAL;
 		memset(dst + size_kern, 0, pad);
 	}
 	return off + match_size;
@@ -2021,7 +2027,8 @@
 		if (ret < 0)
 			return ret;
 
-		BUG_ON(ret < match32->match_size);
+		if (WARN_ON(ret < match32->match_size))
+			return -EINVAL;
 		growth += ret - match32->match_size;
 		growth += ebt_compat_entry_padsize();
 
@@ -2090,8 +2097,12 @@
 	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
 	 */
 	for (i = 0; i < 4 ; ++i) {
-		if (offsets[i] >= *total)
+		if (offsets[i] > *total)
 			return -EINVAL;
+
+		if (i < 3 && offsets[i] == *total)
+			return -EINVAL;
+
 		if (i == 0)
 			continue;
 		if (offsets[i-1] > offsets[i])
@@ -2130,7 +2141,8 @@
 
 	startoff = state->buf_user_offset - startoff;
 
-	BUG_ON(*total < startoff);
+	if (WARN_ON(*total < startoff))
+		return -EINVAL;
 	*total -= startoff;
 	return 0;
 }
@@ -2257,7 +2269,8 @@
 	state.buf_kern_len = size64;
 
 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
-	BUG_ON(ret < 0);	/* parses same data again */
+	if (WARN_ON(ret < 0))
+		goto out_unlock;
 
 	vfree(entries_tmp);
 	tmp.entries_size = size64;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 25a30be..98ea28d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2512,6 +2512,11 @@
 	int ret = 1;
 
 	dout("try_write start %p state %lu\n", con, con->state);
+	if (con->state != CON_STATE_PREOPEN &&
+	    con->state != CON_STATE_CONNECTING &&
+	    con->state != CON_STATE_NEGOTIATING &&
+	    con->state != CON_STATE_OPEN)
+		return 0;
 
 more:
 	dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -2537,6 +2542,8 @@
 	}
 
 more_kvec:
+	BUG_ON(!con->sock);
+
 	/* kvec data queued? */
 	if (con->out_kvec_left) {
 		ret = write_partial_kvec(con);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index a8effc8..5004810 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -209,6 +209,14 @@
 	__open_session(monc);
 }
 
+static void un_backoff(struct ceph_mon_client *monc)
+{
+	monc->hunt_mult /= 2; /* reduce by 50% */
+	if (monc->hunt_mult < 1)
+		monc->hunt_mult = 1;
+	dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
+}
+
 /*
  * Reschedule delayed work timer.
  */
@@ -955,6 +963,7 @@
 		if (!monc->hunting) {
 			ceph_con_keepalive(&monc->con);
 			__validate_auth(monc);
+			un_backoff(monc);
 		}
 
 		if (is_auth) {
@@ -1114,9 +1123,8 @@
 		dout("%s found mon%d\n", __func__, monc->cur_mon);
 		monc->hunting = false;
 		monc->had_a_connection = true;
-		monc->hunt_mult /= 2; /* reduce by 50% */
-		if (monc->hunt_mult < 1)
-			monc->hunt_mult = 1;
+		un_backoff(monc);
+		__schedule_delayed(monc);
 	}
 }
 
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d3f6c26..255c0a0 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -295,6 +295,7 @@
 		u32 yes;
 		struct crush_rule *r;
 
+		err = -EINVAL;
 		ceph_decode_32_safe(p, end, yes, bad);
 		if (!yes) {
 			dout("crush_decode NO rule %d off %x %p to %p\n",
diff --git a/net/compat.c b/net/compat.c
index a96fd2f..73671e6 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -372,7 +372,8 @@
 	    optname == SO_ATTACH_REUSEPORT_CBPF)
 		return do_set_attach_filter(sock, level, optname,
 					    optval, optlen);
-	if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+	if (!COMPAT_USE_64BIT_TIME &&
+	    (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
 		return do_set_sock_timeout(sock, level, optname, optval, optlen);
 
 	return sock_setsockopt(sock, level, optname, optval, optlen);
@@ -437,7 +438,8 @@
 static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
 				char __user *optval, int __user *optlen)
 {
-	if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+	if (!COMPAT_USE_64BIT_TIME &&
+	    (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
 		return do_get_sock_timeout(sock, level, optname, optval, optlen);
 	return sock_getsockopt(sock, level, optname, optval, optlen);
 }
diff --git a/net/core/dev.c b/net/core/dev.c
index fe1dc21..c6a8932 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -995,7 +995,7 @@
 {
 	if (*name == '\0')
 		return false;
-	if (strlen(name) >= IFNAMSIZ)
+	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
 		return false;
 	if (!strcmp(name, ".") || !strcmp(name, ".."))
 		return false;
@@ -2669,7 +2669,7 @@
 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
 			return 0;
 
-		eth = (struct ethhdr *)skb_mac_header(skb);
+		eth = (struct ethhdr *)skb->data;
 		type = eth->h_proto;
 	}
 
@@ -2873,7 +2873,7 @@
 }
 EXPORT_SYMBOL(passthru_features_check);
 
-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
+static netdev_features_t dflt_features_check(struct sk_buff *skb,
 					     struct net_device *dev,
 					     netdev_features_t features)
 {
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index c0548d2..e3e6a3e 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -57,8 +57,8 @@
 		return -EINVAL;
 
 	list_for_each_entry(ha, &list->list, list) {
-		if (!memcmp(ha->addr, addr, addr_len) &&
-		    ha->type == addr_type) {
+		if (ha->type == addr_type &&
+		    !memcmp(ha->addr, addr, addr_len)) {
 			if (global) {
 				/* check if addr is already used as global */
 				if (ha->global_use)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index eecad95..59c1581 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -54,7 +54,8 @@
 static void neigh_timer_handler(unsigned long arg);
 static void __neigh_notify(struct neighbour *n, int type, int flags);
 static void neigh_update_notify(struct neighbour *neigh);
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+				    struct net_device *dev);
 
 static unsigned int neigh_probe_enable;
 #ifdef CONFIG_PROC_FS
@@ -255,8 +256,7 @@
 {
 	write_lock_bh(&tbl->lock);
 	neigh_flush_dev(tbl, dev);
-	pneigh_ifdown(tbl, dev);
-	write_unlock_bh(&tbl->lock);
+	pneigh_ifdown_and_unlock(tbl, dev);
 
 	del_timer_sync(&tbl->proxy_timer);
 	pneigh_queue_purge(&tbl->proxy_queue);
@@ -646,9 +646,10 @@
 	return -ENOENT;
 }
 
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+				    struct net_device *dev)
 {
-	struct pneigh_entry *n, **np;
+	struct pneigh_entry *n, **np, *freelist = NULL;
 	u32 h;
 
 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
@@ -656,16 +657,23 @@
 		while ((n = *np) != NULL) {
 			if (!dev || n->dev == dev) {
 				*np = n->next;
-				if (tbl->pdestructor)
-					tbl->pdestructor(n);
-				if (n->dev)
-					dev_put(n->dev);
-				kfree(n);
+				n->next = freelist;
+				freelist = n;
 				continue;
 			}
 			np = &n->next;
 		}
 	}
+	write_unlock_bh(&tbl->lock);
+	while ((n = freelist)) {
+		freelist = n->next;
+		n->next = NULL;
+		if (tbl->pdestructor)
+			tbl->pdestructor(n);
+		if (n->dev)
+			dev_put(n->dev);
+		kfree(n);
+	}
 	return -ENOENT;
 }
 
@@ -881,9 +889,12 @@
 	now = jiffies;
 	next = now + HZ;
 
-	if (!(state & NUD_IN_TIMER))
-		goto out;
-
+	if (!(state & NUD_IN_TIMER)) {
+		if (neigh_probe_enable && (state & NUD_STALE))
+			neigh_dbg(2, "neigh %pK is still alive\n", neigh);
+		else
+			goto out;
+	}
 	if (state & NUD_REACHABLE) {
 		if (time_before_eq(now,
 				   neigh->confirmed + neigh->parms->reachable_time)) {
@@ -1139,10 +1150,6 @@
 		lladdr = neigh->ha;
 	}
 
-	if (new & NUD_CONNECTED)
-		neigh->confirmed = jiffies;
-	neigh->updated = jiffies;
-
 	/* If entry was valid and address is not changed,
 	   do not change entry state, if new one is STALE.
 	 */
@@ -1164,11 +1171,24 @@
 		}
 	}
 
+	/* Update timestamps only once we know we will make a change to the
+	 * neighbour entry. Otherwise we risk to move the locktime window with
+	 * noop updates and ignore relevant ARP updates.
+	 */
+	if (new != old || lladdr != neigh->ha) {
+		if (new & NUD_CONNECTED)
+			neigh->confirmed = jiffies;
+		neigh->updated = jiffies;
+	}
+
 	if (new != old) {
 		neigh_del_timer(neigh);
 		if (new & NUD_PROBE)
 			atomic_set(&neigh->probes, 0);
-		if (new & NUD_IN_TIMER)
+		if (new & NUD_IN_TIMER || (
+			neigh_probe_enable &&
+			(neigh->tbl->family == AF_INET6) &&
+			(new & NUD_STALE)))
 			neigh_add_timer(neigh, (jiffies +
 						((new & NUD_REACHABLE) ?
 						 neigh->parms->reachable_time :
@@ -2293,12 +2313,16 @@
 
 	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
 	if (!err) {
-		if (tb[NDA_IFINDEX])
+		if (tb[NDA_IFINDEX]) {
+			if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
+				return -EINVAL;
 			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
-
-		if (tb[NDA_MASTER])
+		}
+		if (tb[NDA_MASTER]) {
+			if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
+				return -EINVAL;
 			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
-
+		}
 		if (filter_idx || filter_master_idx)
 			flags |= NLM_F_DUMP_FILTERED;
 	}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b7efe2f..04fd04c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -312,6 +312,25 @@
 	goto out;
 }
 
+static int __net_init net_defaults_init_net(struct net *net)
+{
+	net->core.sysctl_somaxconn = SOMAXCONN;
+	return 0;
+}
+
+static struct pernet_operations net_defaults_ops = {
+	.init = net_defaults_init_net,
+};
+
+static __init int net_defaults_init(void)
+{
+	if (register_pernet_subsys(&net_defaults_ops))
+		panic("Cannot initialize net default settings");
+
+	return 0;
+}
+
+core_initcall(net_defaults_init);
 
 #ifdef CONFIG_NET_NS
 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 457f882..9b2d611 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -666,7 +666,7 @@
 	int err;
 
 	rtnl_lock();
-	if (np->dev_name) {
+	if (np->dev_name[0]) {
 		struct net *net = current->nsproxy->net_ns;
 		ndev = __dev_get_by_name(net, np->dev_name);
 	}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2cc4a1b..89f0fbc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -909,6 +909,7 @@
 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
 	n->cloned = 1;
 	n->nohdr = 0;
+	n->peeked = 0;
 	n->destructor = NULL;
 	C(tail);
 	C(end);
@@ -2621,7 +2622,8 @@
 {
 	int pos = skb_headlen(skb);
 
-	skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+	skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
+				      SKBTX_SHARED_FRAG;
 	if (len < pos)	/* Split line is inside header. */
 		skb_split_inside_header(skb, skb1, len, pos);
 	else		/* Second chunk has no header, nothing to copy. */
@@ -3234,8 +3236,8 @@
 		skb_copy_from_linear_data_offset(head_skb, offset,
 						 skb_put(nskb, hsize), hsize);
 
-		skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
-			SKBTX_SHARED_FRAG;
+		skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
+					      SKBTX_SHARED_FRAG;
 
 		while (pos < offset + len) {
 			if (i >= nfrags) {
@@ -3481,24 +3483,18 @@
 						NULL);
 }
 
-/**
- *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
- *	@skb: Socket buffer containing the buffers to be mapped
- *	@sg: The scatter-gather list to map into
- *	@offset: The offset into the buffer's contents to start mapping
- *	@len: Length of buffer space to be mapped
- *
- *	Fill the specified scatter-gather list with mappings/pointers into a
- *	region of the buffer space attached to a socket buffer.
- */
 static int
-__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
+	       unsigned int recursion_level)
 {
 	int start = skb_headlen(skb);
 	int i, copy = start - offset;
 	struct sk_buff *frag_iter;
 	int elt = 0;
 
+	if (unlikely(recursion_level >= 24))
+		return -EMSGSIZE;
+
 	if (copy > 0) {
 		if (copy > len)
 			copy = len;
@@ -3517,6 +3513,8 @@
 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
 		if ((copy = end - offset) > 0) {
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
+				return -EMSGSIZE;
 
 			if (copy > len)
 				copy = len;
@@ -3531,16 +3529,22 @@
 	}
 
 	skb_walk_frags(skb, frag_iter) {
-		int end;
+		int end, ret;
 
 		WARN_ON(start > offset + len);
 
 		end = start + frag_iter->len;
 		if ((copy = end - offset) > 0) {
+			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
+				return -EMSGSIZE;
+
 			if (copy > len)
 				copy = len;
-			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
-					      copy);
+			ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
+					      copy, recursion_level + 1);
+			if (unlikely(ret < 0))
+				return ret;
+			elt += ret;
 			if ((len -= copy) == 0)
 				return elt;
 			offset += copy;
@@ -3551,6 +3555,31 @@
 	return elt;
 }
 
+/**
+ *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
+ *	@skb: Socket buffer containing the buffers to be mapped
+ *	@sg: The scatter-gather list to map into
+ *	@offset: The offset into the buffer's contents to start mapping
+ *	@len: Length of buffer space to be mapped
+ *
+ *	Fill the specified scatter-gather list with mappings/pointers into a
+ *	region of the buffer space attached to a socket buffer. Returns either
+ *	the number of scatterlist items used, or -EMSGSIZE if the contents
+ *	could not fit.
+ */
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+	int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
+
+	if (nsg <= 0)
+		return nsg;
+
+	sg_mark_end(&sg[nsg - 1]);
+
+	return nsg;
+}
+EXPORT_SYMBOL_GPL(skb_to_sgvec);
+
 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
  * sglist without mark the sg which contain last skb data as the end.
  * So the caller can mannipulate sg list as will when padding new data after
@@ -3573,19 +3602,11 @@
 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
 			int offset, int len)
 {
-	return __skb_to_sgvec(skb, sg, offset, len);
+	return __skb_to_sgvec(skb, sg, offset, len, 0);
 }
 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
 
-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
-{
-	int nsg = __skb_to_sgvec(skb, sg, offset, len);
 
-	sg_mark_end(&sg[nsg - 1]);
-
-	return nsg;
-}
-EXPORT_SYMBOL_GPL(skb_to_sgvec);
 
 /**
  *	skb_cow_data - Check that a socket buffer's data buffers are writable
@@ -3868,7 +3889,8 @@
 		return;
 
 	if (tsonly) {
-		skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
+		skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
+					     SKBTX_ANY_TSTAMP;
 		skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
 	}
 
@@ -4459,13 +4481,18 @@
 
 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
 {
+	int mac_len;
+
 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
 		kfree_skb(skb);
 		return NULL;
 	}
 
-	memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
-		2 * ETH_ALEN);
+	mac_len = skb->data - skb_mac_header(skb);
+	if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
+		memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
+			mac_len - VLAN_HLEN - ETH_TLEN);
+	}
 	skb->mac_header += VLAN_HLEN;
 	return skb;
 }
diff --git a/net/core/sock.c b/net/core/sock.c
index 1d88335..0e82197 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1465,7 +1465,7 @@
 
 static void __sk_free(struct sock *sk)
 {
-	if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
+	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
 		sock_diag_broadcast_destroy(sk);
 	else
 		sk_destruct(sk);
diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c
index 04f61fc..f238edb 100644
--- a/net/core/sockev_nlmcast.c
+++ b/net/core/sockev_nlmcast.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015, 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -36,6 +36,8 @@
 
 static void _sockev_event(unsigned long event, __u8 *evstr, int buflen)
 {
+	memset(evstr, 0, buflen);
+
 	switch (event) {
 	case SOCKEV_SOCKET:
 		strlcpy(evstr, "SOCKEV_SOCKET", buflen);
@@ -67,14 +69,17 @@
 	struct nlmsghdr *nlh;
 	struct sknlsockevmsg *smsg;
 	struct socket *sock;
+	struct sock *sk;
 
 	sock = (struct socket *)data;
-	if (socknlmsgsk == 0)
-		goto done;
-	if ((!socknlmsgsk) || (!sock) || (!sock->sk))
+	if (!socknlmsgsk || !sock)
 		goto done;
 
-	if (sock->sk->sk_family != AF_INET && sock->sk->sk_family != AF_INET6)
+	sk = sock->sk;
+	if (!sk)
+		goto done;
+
+	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
 		goto done;
 
 	if (event != SOCKEV_BIND && event != SOCKEV_LISTEN)
@@ -95,12 +100,11 @@
 	smsg = nlmsg_data(nlh);
 	smsg->pid = current->pid;
 	_sockev_event(event, smsg->event, sizeof(smsg->event));
-	smsg->skfamily = sock->sk->sk_family;
-	smsg->skstate = sock->sk->sk_state;
-	smsg->skprotocol = sock->sk->sk_protocol;
-	smsg->sktype = sock->sk->sk_type;
-	smsg->skflags = sock->sk->sk_flags;
-
+	smsg->skfamily = sk->sk_family;
+	smsg->skstate = sk->sk_state;
+	smsg->skprotocol = sk->sk_protocol;
+	smsg->sktype = sk->sk_type;
+	smsg->skflags = sk->sk_flags;
 	nlmsg_notify(socknlmsgsk, skb, 0, SKNLGRP_SOCKEV, 0, GFP_KERNEL);
 done:
 	return 0;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 1b46190..546ba76 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -438,8 +438,6 @@
 {
 	struct ctl_table *tbl;
 
-	net->core.sysctl_somaxconn = SOMAXCONN;
-
 	tbl = netns_core_table;
 	if (!net_eq(net, &init_net)) {
 		tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 7753681..86a2ed0 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -126,6 +126,16 @@
 						  DCCPF_SEQ_WMAX));
 }
 
+static void dccp_tasklet_schedule(struct sock *sk)
+{
+	struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
+
+	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+		sock_hold(sk);
+		__tasklet_schedule(t);
+	}
+}
+
 static void ccid2_hc_tx_rto_expire(unsigned long data)
 {
 	struct sock *sk = (struct sock *)data;
@@ -166,7 +176,7 @@
 
 	/* if we were blocked before, we may now send cwnd=1 packet */
 	if (sender_was_blocked)
-		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+		dccp_tasklet_schedule(sk);
 	/* restart backed-off timer */
 	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
 out:
@@ -706,7 +716,7 @@
 done:
 	/* check if incoming Acks allow pending packets to be sent */
 	if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
-		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+		dccp_tasklet_schedule(sk);
 	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8c7799cd..6697b18 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -620,6 +620,7 @@
 	ireq = inet_rsk(req);
 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+	ireq->ir_mark = inet_request_mark(sk, skb);
 	ireq->ireq_family = AF_INET;
 	ireq->ir_iif = sk->sk_bound_dev_if;
 
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 28e8252..6cbcf39 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -349,6 +349,7 @@
 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 	ireq->ireq_family = AF_INET6;
+	ireq->ir_mark = inet_request_mark(sk, skb);
 
 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 3a2c340..2a952cb 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -230,12 +230,12 @@
 	else
 		dccp_write_xmit(sk);
 	bh_unlock_sock(sk);
+	sock_put(sk);
 }
 
 static void dccp_write_xmit_timer(unsigned long data)
 {
 	dccp_write_xmitlet(data);
-	sock_put((struct sock *)data);
 }
 
 void dccp_init_xmit_timers(struct sock *sk)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index e1d4d89..f025276 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -25,6 +25,7 @@
 #include <linux/moduleparam.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/ratelimit.h>
 #include <linux/kernel.h>
 #include <linux/keyctl.h>
 #include <linux/err.h>
@@ -91,9 +92,9 @@
 
 			next_opt = memchr(opt, '#', end - opt) ?: end;
 			opt_len = next_opt - opt;
-			if (!opt_len) {
-				printk(KERN_WARNING
-				       "Empty option to dns_resolver key\n");
+			if (opt_len <= 0 || opt_len > 128) {
+				pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
+						    opt_len);
 				return -EINVAL;
 			}
 
@@ -127,10 +128,8 @@
 			}
 
 		bad_option_value:
-			printk(KERN_WARNING
-			       "Option '%*.*s' to dns_resolver key:"
-			       " bad/missing value\n",
-			       opt_nlen, opt_nlen, opt);
+			pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
+					    opt_nlen, opt_nlen, opt);
 			return -EINVAL;
 		} while (opt = next_opt + 1, opt < end);
 	}
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 4ebe2aa..04b5450 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -324,8 +324,7 @@
 	unsigned long irqflags;
 
 	frame->is_supervision = is_supervision_frame(port->hsr, skb);
-	frame->node_src = hsr_get_node(&port->hsr->node_db, skb,
-				       frame->is_supervision);
+	frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
 	if (frame->node_src == NULL)
 		return -1; /* Unknown node and !is_supervision, or no mem */
 
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 7ea9258..284a9b8 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -158,9 +158,10 @@
 
 /* Get the hsr_node from which 'skb' was sent.
  */
-struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
 			      bool is_sup)
 {
+	struct list_head *node_db = &port->hsr->node_db;
 	struct hsr_node *node;
 	struct ethhdr *ethhdr;
 	u16 seq_out;
@@ -186,7 +187,11 @@
 		 */
 		seq_out = hsr_get_skb_sequence_nr(skb) - 1;
 	} else {
-		WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
+		/* this is called also for frames from master port and
+		 * so warn only for non master ports
+		 */
+		if (port->type != HSR_PT_MASTER)
+			WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
 		seq_out = HSR_SEQNR_START;
 	}
 
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 438b40f..4e04f0e 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -18,7 +18,7 @@
 
 struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
 			      u16 seq_out);
-struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
 			      bool is_sup);
 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
 			  struct hsr_port *port);
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index e0bd013..bf5d26d 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -304,12 +304,12 @@
 	skb->sk  = sk;
 	skb->protocol = htons(ETH_P_IEEE802154);
 
-	dev_put(dev);
-
 	err = dev_queue_xmit(skb);
 	if (err > 0)
 		err = net_xmit_errno(err);
 
+	dev_put(dev);
+
 	return err ?: size;
 
 out_skb:
@@ -693,12 +693,12 @@
 	skb->sk  = sk;
 	skb->protocol = htons(ETH_P_IEEE802154);
 
-	dev_put(dev);
-
 	err = dev_queue_xmit(skb);
 	if (err > 0)
 		err = net_xmit_errno(err);
 
+	dev_put(dev);
+
 	return err ?: size;
 
 out_skb:
diff --git a/net/ipc_router/Kconfig b/net/ipc_router/Kconfig
index 30cd45a..9121667 100644
--- a/net/ipc_router/Kconfig
+++ b/net/ipc_router/Kconfig
@@ -23,3 +23,13 @@
 	  once configured with the security rules will ensure that the
 	  sender of the message to a service belongs to the relevant
 	  Linux group as configured by the security script.
+
+config IPC_ROUTER_NODE_ID
+	depends on IPC_ROUTER
+	int "IPC router local NODE ID"
+	default 1
+	help
+	  This option allows to configure the IPC Router NODE ID dynamically.
+	  The NODE defined here is used as the local NODE ID by IPC Router
+	  core and publish the same NODE ID to other NODES present in the
+	  network.
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
index 3403108..1a6b539 100644
--- a/net/ipc_router/ipc_router_core.c
+++ b/net/ipc_router/ipc_router_core.c
@@ -137,6 +137,7 @@
 	struct msm_ipc_router_xprt *xprt;
 	u32 remote_node_id;
 	u32 initialized;
+	u32 hello_sent;
 	struct list_head pkt_list;
 	struct wakeup_source ws;
 	struct mutex rx_lock_lhb2; /* lock for xprt rx operations */
@@ -2494,12 +2495,36 @@
 	}
 }
 
+static int send_hello_msg(struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int rc = 0;
+	union rr_control_msg ctl;
+
+	if (!xprt_info->hello_sent) {
+		xprt_info->hello_sent = 1;
+		/* Send a HELLO message */
+		memset(&ctl, 0, sizeof(ctl));
+		ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
+		ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
+		ctl.hello.versions = (uint32_t)IPC_ROUTER_VER_BITMASK;
+		ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
+		rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
+					     IPC_ROUTER_DUMMY_DEST_NODE);
+		if (rc < 0) {
+			xprt_info->hello_sent = 0;
+			IPC_RTR_ERR("%s: Error sending HELLO message\n",
+				    __func__);
+			return rc;
+		}
+	}
+	return rc;
+}
+
 static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
 			     union rr_control_msg *msg,
 			     struct rr_header_v1 *hdr)
 {
 	int i, rc = 0;
-	union rr_control_msg ctl;
 	struct msm_ipc_routing_table_entry *rt_entry;
 
 	if (!hdr)
@@ -2514,19 +2539,10 @@
 	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
 
 	do_version_negotiation(xprt_info, msg);
-	/* Send a reply HELLO message */
-	memset(&ctl, 0, sizeof(ctl));
-	ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
-	ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
-	ctl.hello.versions = (u32)IPC_ROUTER_VER_BITMASK;
-	ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
-	rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
-				     IPC_ROUTER_DUMMY_DEST_NODE);
-	if (rc < 0) {
-		IPC_RTR_ERR("%s: Error sending reply HELLO message\n",
-			    __func__);
+	rc = send_hello_msg(xprt_info);
+	if (rc < 0)
 		return rc;
-	}
+
 	xprt_info->initialized = 1;
 
 	/* Send list of servers from the local node and from nodes
@@ -4068,6 +4084,7 @@
 
 	xprt_info->xprt = xprt;
 	xprt_info->initialized = 0;
+	xprt_info->hello_sent = 0;
 	xprt_info->remote_node_id = -1;
 	INIT_LIST_HEAD(&xprt_info->pkt_list);
 	mutex_init(&xprt_info->rx_lock_lhb2);
@@ -4109,6 +4126,7 @@
 	up_write(&routing_table_lock_lha3);
 
 	xprt->priv = xprt_info;
+	send_hello_msg(xprt_info);
 
 	return 0;
 }
diff --git a/net/ipc_router/ipc_router_private.h b/net/ipc_router/ipc_router_private.h
index 3ec9818..6e0c4be 100644
--- a/net/ipc_router/ipc_router_private.h
+++ b/net/ipc_router/ipc_router_private.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2016, 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -37,7 +37,7 @@
 
 #define IPC_ROUTER_ADDRESS			0x0000FFFF
 
-#define IPC_ROUTER_NID_LOCAL			1
+#define IPC_ROUTER_NID_LOCAL	CONFIG_IPC_ROUTER_NODE_ID
 #define MAX_IPC_PKT_SIZE 66000
 
 #define IPC_ROUTER_LOW_RX_QUOTA		5
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 22377c8..e8f8623 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -220,7 +220,9 @@
 	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 
 	sg_init_table(sg, nfrags + sglists);
-	skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+	if (unlikely(err < 0))
+		goto out_free;
 
 	if (x->props.flags & XFRM_STATE_ESN) {
 		/* Attach seqhi sg right after packet payload */
@@ -393,7 +395,9 @@
 	skb_push(skb, ihl);
 
 	sg_init_table(sg, nfrags + sglists);
-	skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+	if (unlikely(err < 0))
+		goto out_free;
 
 	if (x->props.flags & XFRM_STATE_ESN) {
 		/* Attach seqhi sg right after packet payload */
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index e60517e..8cae791 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -437,7 +437,7 @@
 	/*unsigned long now; */
 	struct net *net = dev_net(dev);
 
-	rt = ip_route_output(net, sip, tip, 0, 0);
+	rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
 	if (IS_ERR(rt))
 		return 1;
 	if (rt->dst.dev != dev) {
@@ -658,6 +658,7 @@
 	unsigned char *arp_ptr;
 	struct rtable *rt;
 	unsigned char *sha;
+	unsigned char *tha = NULL;
 	__be32 sip, tip;
 	u16 dev_type = dev->type;
 	int addr_type;
@@ -729,6 +730,7 @@
 		break;
 #endif
 	default:
+		tha = arp_ptr;
 		arp_ptr += dev->addr_len;
 	}
 	memcpy(&tip, arp_ptr, 4);
@@ -847,8 +849,18 @@
 		   It is possible, that this option should be enabled for some
 		   devices (strip is candidate)
 		 */
-		is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
-			  addr_type == RTN_UNICAST;
+		is_garp = tip == sip && addr_type == RTN_UNICAST;
+
+		/* Unsolicited ARP _replies_ also require target hwaddr to be
+		 * the same as source.
+		 */
+		if (is_garp && arp->ar_op == htons(ARPOP_REPLY))
+			is_garp =
+				/* IPv4 over IEEE 1394 doesn't provide target
+				 * hardware address field in its ARP payload.
+				 */
+				tha &&
+				!memcmp(tha, sha, dev->addr_len);
 
 		if (!n &&
 		    ((arp->ar_op == htons(ARPOP_REPLY)  &&
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 20fb25e..3d8021d 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -268,10 +268,11 @@
 	esph->spi = x->id.spi;
 
 	sg_init_table(sg, nfrags);
-	skb_to_sgvec(skb, sg,
-		     (unsigned char *)esph - skb->data,
-		     assoclen + ivlen + clen + alen);
-
+	err = skb_to_sgvec(skb, sg,
+		           (unsigned char *)esph - skb->data,
+		           assoclen + ivlen + clen + alen);
+	if (unlikely(err < 0))
+		goto error;
 	aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
 	aead_request_set_ad(req, assoclen);
 
@@ -481,7 +482,9 @@
 	}
 
 	sg_init_table(sg, nfrags);
-	skb_to_sgvec(skb, sg, 0, skb->len);
+	err = skb_to_sgvec(skb, sg, 0, skb->len);
+	if (unlikely(err < 0))
+		goto out;
 
 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
 	aead_request_set_ad(req, assoclen);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 7e7b7a3..e1be244 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1611,18 +1611,20 @@
 	bool first = false;
 
 	for_nexthops(fi) {
+		if (net->ipv4.sysctl_fib_multipath_use_neigh) {
+			if (!fib_good_nh(nh))
+				continue;
+			if (!first) {
+				res->nh_sel = nhsel;
+				first = true;
+			}
+		}
+
 		if (hash > atomic_read(&nh->nh_upper_bound))
 			continue;
 
-		if (!net->ipv4.sysctl_fib_multipath_use_neigh ||
-		    fib_good_nh(nh)) {
-			res->nh_sel = nhsel;
-			return;
-		}
-		if (!first) {
-			res->nh_sel = nhsel;
-			first = true;
-		}
+		res->nh_sel = nhsel;
+		return;
 	} endfor_nexthops(fi);
 }
 #endif
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index ddcd56c..a6b34ac 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -182,6 +182,7 @@
 		tw->tw_dport	    = inet->inet_dport;
 		tw->tw_family	    = sk->sk_family;
 		tw->tw_reuse	    = sk->sk_reuse;
+		tw->tw_reuseport    = sk->sk_reuseport;
 		tw->tw_hash	    = sk->sk_hash;
 		tw->tw_ipv6only	    = 0;
 		tw->tw_transparent  = inet->transparent;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e60f9fa..d0bd98f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1098,7 +1098,8 @@
 		if (copy > length)
 			copy = length;
 
-		if (!(rt->dst.dev->features&NETIF_F_SG)) {
+		if (!(rt->dst.dev->features&NETIF_F_SG) &&
+		    skb_tailroom(skb) >= copy) {
 			unsigned int off;
 
 			off = skb->len;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 96536a0..e1271e75 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -253,13 +253,14 @@
 	struct net_device *dev;
 	char name[IFNAMSIZ];
 
-	if (parms->name[0])
-		strlcpy(name, parms->name, IFNAMSIZ);
-	else {
-		if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
-			err = -E2BIG;
+	err = -E2BIG;
+	if (parms->name[0]) {
+		if (!dev_valid_name(parms->name))
 			goto failed;
-		}
+		strlcpy(name, parms->name, IFNAMSIZ);
+	} else {
+		if (strlen(ops->kind) > (IFNAMSIZ - 3))
+			goto failed;
 		strlcpy(name, ops->kind, IFNAMSIZ);
 		strncat(name, "%d", 2);
 	}
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index b120b9b..cbff0d6 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -396,7 +396,6 @@
 	memcpy(dev->dev_addr, &iph->saddr, 4);
 	memcpy(dev->broadcast, &iph->daddr, 4);
 
-	dev->hard_header_len	= LL_MAX_HEADER + sizeof(struct iphdr);
 	dev->mtu		= ETH_DATA_LEN;
 	dev->flags		= IFF_NOARP;
 	dev->addr_len		= 4;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 27089f5..742a343 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1929,6 +1929,20 @@
 	struct net *net = dev_net(skb->dev);
 	int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
 	struct mr_table *mrt;
+	struct net_device *dev;
+
+	/* skb->dev passed in is the loX master dev for vrfs.
+	 * As there are no vifs associated with loopback devices,
+	 * get the proper interface that does have a vif associated with it.
+	 */
+	dev = skb->dev;
+	if (netif_is_l3_master(skb->dev)) {
+		dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
+		if (!dev) {
+			kfree_skb(skb);
+			return -ENODEV;
+		}
+	}
 
 	/* Packet is looped back after forward, it should not be
 	 * forwarded second time, but still can be delivered locally.
@@ -1966,7 +1980,7 @@
 	/* already under rcu_read_lock() */
 	cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
 	if (!cache) {
-		int vif = ipmr_find_vif(mrt, skb->dev);
+		int vif = ipmr_find_vif(mrt, dev);
 
 		if (vif >= 0)
 			cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
@@ -1986,7 +2000,7 @@
 		}
 
 		read_lock(&mrt_lock);
-		vif = ipmr_find_vif(mrt, skb->dev);
+		vif = ipmr_find_vif(mrt, dev);
 		if (vif >= 0) {
 			int err2 = ipmr_cache_unresolved(mrt, vif, skb);
 			read_unlock(&mrt_lock);
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 574f7eb..ac8342d 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -252,16 +252,16 @@
 	if (set_h245_addr(skb, protoff, data, dataoff, taddr,
 			  &ct->tuplehash[!dir].tuple.dst.u3,
 			  htons((port & htons(1)) ? nated_port + 1 :
-						    nated_port)) == 0) {
-		/* Save ports */
-		info->rtp_port[i][dir] = rtp_port;
-		info->rtp_port[i][!dir] = htons(nated_port);
-	} else {
+						    nated_port))) {
 		nf_ct_unexpect_related(rtp_exp);
 		nf_ct_unexpect_related(rtcp_exp);
 		return -1;
 	}
 
+	/* Save ports */
+	info->rtp_port[i][dir] = rtp_port;
+	info->rtp_port[i][!dir] = htons(nated_port);
+
 	/* Success */
 	pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n",
 		 &rtp_exp->tuple.src.u3.ip,
@@ -370,15 +370,15 @@
 	/* Modify signal */
 	if (set_h225_addr(skb, protoff, data, dataoff, taddr,
 			  &ct->tuplehash[!dir].tuple.dst.u3,
-			  htons(nated_port)) == 0) {
-		/* Save ports */
-		info->sig_port[dir] = port;
-		info->sig_port[!dir] = htons(nated_port);
-	} else {
+			  htons(nated_port))) {
 		nf_ct_unexpect_related(exp);
 		return -1;
 	}
 
+	/* Save ports */
+	info->sig_port[dir] = port;
+	info->sig_port[!dir] = htons(nated_port);
+
 	pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n",
 		 &exp->tuple.src.u3.ip,
 		 ntohs(exp->tuple.src.u.tcp.port),
@@ -462,24 +462,27 @@
 	/* Modify signal */
 	if (set_h225_addr(skb, protoff, data, 0, &taddr[idx],
 			  &ct->tuplehash[!dir].tuple.dst.u3,
-			  htons(nated_port)) == 0) {
-		/* Save ports */
-		info->sig_port[dir] = port;
-		info->sig_port[!dir] = htons(nated_port);
-
-		/* Fix for Gnomemeeting */
-		if (idx > 0 &&
-		    get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
-		    (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
-			set_h225_addr(skb, protoff, data, 0, &taddr[0],
-				      &ct->tuplehash[!dir].tuple.dst.u3,
-				      info->sig_port[!dir]);
-		}
-	} else {
+			  htons(nated_port))) {
 		nf_ct_unexpect_related(exp);
 		return -1;
 	}
 
+	/* Save ports */
+	info->sig_port[dir] = port;
+	info->sig_port[!dir] = htons(nated_port);
+
+	/* Fix for Gnomemeeting */
+	if (idx > 0 &&
+	    get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
+	    (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
+		if (set_h225_addr(skb, protoff, data, 0, &taddr[0],
+				  &ct->tuplehash[!dir].tuple.dst.u3,
+				  info->sig_port[!dir])) {
+			nf_ct_unexpect_related(exp);
+			return -1;
+		}
+	}
+
 	/* Success */
 	pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n",
 		 &exp->tuple.src.u3.ip,
@@ -550,9 +553,9 @@
 	}
 
 	/* Modify signal */
-	if (!set_h225_addr(skb, protoff, data, dataoff, taddr,
-			   &ct->tuplehash[!dir].tuple.dst.u3,
-			   htons(nated_port)) == 0) {
+	if (set_h225_addr(skb, protoff, data, dataoff, taddr,
+			  &ct->tuplehash[!dir].tuple.dst.u3,
+			  htons(nated_port))) {
 		nf_ct_unexpect_related(exp);
 		return -1;
 	}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 93bfadf..8fa153c 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -775,8 +775,10 @@
 	ipc.addr = faddr = daddr;
 
 	if (ipc.opt && ipc.opt->opt.srr) {
-		if (!daddr)
-			return -EINVAL;
+		if (!daddr) {
+			err = -EINVAL;
+			goto out_free;
+		}
 		faddr = ipc.opt->opt.faddr;
 	}
 	tos = get_rttos(&ipc, inet);
@@ -842,6 +844,7 @@
 
 out:
 	ip_rt_put(rt);
+out_free:
 	if (free)
 		kfree(ipc.opt);
 	if (!err) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e83fdf6..583967b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -621,6 +621,7 @@
 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
 {
 	rt->rt_pmtu = fnhe->fnhe_pmtu;
+	rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
 	rt->dst.expires = fnhe->fnhe_expires;
 
 	if (fnhe->fnhe_gw) {
@@ -631,7 +632,7 @@
 }
 
 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
-				  u32 pmtu, unsigned long expires)
+				  u32 pmtu, bool lock, unsigned long expires)
 {
 	struct fnhe_hash_bucket *hash;
 	struct fib_nh_exception *fnhe;
@@ -668,8 +669,10 @@
 			fnhe->fnhe_genid = genid;
 		if (gw)
 			fnhe->fnhe_gw = gw;
-		if (pmtu)
+		if (pmtu) {
 			fnhe->fnhe_pmtu = pmtu;
+			fnhe->fnhe_mtu_locked = lock;
+		}
 		fnhe->fnhe_expires = max(1UL, expires);
 		/* Update all cached dsts too */
 		rt = rcu_dereference(fnhe->fnhe_rth_input);
@@ -693,6 +696,7 @@
 		fnhe->fnhe_daddr = daddr;
 		fnhe->fnhe_gw = gw;
 		fnhe->fnhe_pmtu = pmtu;
+		fnhe->fnhe_mtu_locked = lock;
 		fnhe->fnhe_expires = expires;
 
 		/* Exception created; mark the cached routes for the nexthop
@@ -774,7 +778,8 @@
 				struct fib_nh *nh = &FIB_RES_NH(res);
 
 				update_or_create_fnhe(nh, fl4->daddr, new_gw,
-						0, jiffies + ip_rt_gc_timeout);
+						0, false,
+						jiffies + ip_rt_gc_timeout);
 			}
 			if (kill_route)
 				rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -987,15 +992,18 @@
 {
 	struct dst_entry *dst = &rt->dst;
 	struct fib_result res;
+	bool lock = false;
 
-	if (dst_metric_locked(dst, RTAX_MTU))
+	if (ip_mtu_locked(dst))
 		return;
 
 	if (ipv4_mtu(dst) < mtu)
 		return;
 
-	if (mtu < ip_rt_min_pmtu)
+	if (mtu < ip_rt_min_pmtu) {
+		lock = true;
 		mtu = ip_rt_min_pmtu;
+	}
 
 	if (rt->rt_pmtu == mtu &&
 	    time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
@@ -1005,7 +1013,7 @@
 	if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
 		struct fib_nh *nh = &FIB_RES_NH(res);
 
-		update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
+		update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
 				      jiffies + ip_rt_mtu_expires);
 	}
 	rcu_read_unlock();
@@ -1262,7 +1270,7 @@
 
 	mtu = READ_ONCE(dst->dev->mtu);
 
-	if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
+	if (unlikely(ip_mtu_locked(dst))) {
 		if (rt->rt_uses_gateway && mtu > 576)
 			mtu = 576;
 	}
@@ -1487,6 +1495,7 @@
 		rt->rt_is_input = 0;
 		rt->rt_iif = 0;
 		rt->rt_pmtu = 0;
+		rt->rt_mtu_locked = 0;
 		rt->rt_gateway = 0;
 		rt->rt_uses_gateway = 0;
 		rt->rt_table_id = 0;
@@ -2410,6 +2419,7 @@
 		rt->rt_is_input = ort->rt_is_input;
 		rt->rt_iif = ort->rt_iif;
 		rt->rt_pmtu = ort->rt_pmtu;
+		rt->rt_mtu_locked = ort->rt_mtu_locked;
 
 		rt->rt_genid = rt_genid_ipv4(net);
 		rt->rt_flags = ort->rt_flags;
@@ -2512,6 +2522,8 @@
 	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
 	if (rt->rt_pmtu && expires)
 		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
+	if (rt->rt_mtu_locked && expires)
+		metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
 	if (rtnetlink_put_metrics(skb, metrics) < 0)
 		goto nla_put_failure;
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index af0b324..fdfaaf0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1146,7 +1146,8 @@
 	lock_sock(sk);
 
 	flags = msg->msg_flags;
-	if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
+	if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
+	    !tp->repair) {
 		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
 		if (err == -EINPROGRESS && copied_syn > 0)
 			goto out;
@@ -2558,7 +2559,7 @@
 	case TCP_REPAIR_QUEUE:
 		if (!tp->repair)
 			err = -EPERM;
-		else if (val < TCP_QUEUES_NR)
+		else if ((unsigned int)val < TCP_QUEUES_NR)
 			tp->repair_queue = val;
 		else
 			err = -EINVAL;
@@ -2697,8 +2698,10 @@
 
 #ifdef CONFIG_TCP_MD5SIG
 	case TCP_MD5SIG:
-		/* Read the IP->Key mappings from userspace */
-		err = tp->af_specific->md5_parse(sk, optval, optlen);
+		if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
+			err = tp->af_specific->md5_parse(sk, optval, optlen);
+		else
+			err = -EINVAL;
 		break;
 #endif
 	case TCP_USER_TIMEOUT:
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 8ec6053..91698595 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -773,7 +773,9 @@
 			}
 		}
 	}
-	bbr->idle_restart = 0;
+	/* Restart after idle ends only once we process a new S/ACK for data */
+	if (rs->delivered > 0)
+		bbr->idle_restart = 0;
 }
 
 static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index c8e6d86..95ca887 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -6,7 +6,7 @@
  * The algorithm is described in:
  * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
  *  for High-Speed Networks"
- * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
+ * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
  *
  * Implemented from description in paper and ns-2 simulation.
  * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c14c213..c2ad59d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -116,6 +116,7 @@
 #define FLAG_DSACKING_ACK	0x800 /* SACK blocks contained D-SACK info */
 #define FLAG_SACK_RENEGING	0x2000 /* snd_una advanced to a sacked seq */
 #define FLAG_UPDATE_TS_RECENT	0x4000 /* tcp_replace_ts_recent() */
+#define FLAG_NO_CHALLENGE_ACK	0x8000 /* do not call tcp_send_challenge_ack()	*/
 
 #define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 #define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -3619,7 +3620,8 @@
 	if (before(ack, prior_snd_una)) {
 		/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
 		if (before(ack, prior_snd_una - tp->max_window)) {
-			tcp_send_challenge_ack(sk, skb);
+			if (!(flag & FLAG_NO_CHALLENGE_ACK))
+				tcp_send_challenge_ack(sk, skb);
 			return -1;
 		}
 		goto old_ack;
@@ -3942,11 +3944,8 @@
 	int length = (th->doff << 2) - sizeof(*th);
 	const u8 *ptr = (const u8 *)(th + 1);
 
-	/* If the TCP option is too short, we can short cut */
-	if (length < TCPOLEN_MD5SIG)
-		return NULL;
-
-	while (length > 0) {
+	/* If not enough data remaining, we can short cut */
+	while (length >= TCPOLEN_MD5SIG) {
 		int opcode = *ptr++;
 		int opsize;
 
@@ -5971,13 +5970,17 @@
 
 	/* step 5: check the ACK field */
 	acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
-				      FLAG_UPDATE_TS_RECENT) > 0;
+				      FLAG_UPDATE_TS_RECENT |
+				      FLAG_NO_CHALLENGE_ACK) > 0;
 
+	if (!acceptable) {
+		if (sk->sk_state == TCP_SYN_RECV)
+			return 1;	/* send one RST */
+		tcp_send_challenge_ack(sk, skb);
+		goto discard;
+	}
 	switch (sk->sk_state) {
 	case TCP_SYN_RECV:
-		if (!acceptable)
-			return 1;
-
 		if (!tp->srtt_us)
 			tcp_synack_rtt_meas(sk, req);
 
@@ -6047,14 +6050,6 @@
 		 * our SYNACK so stop the SYNACK timer.
 		 */
 		if (req) {
-			/* Return RST if ack_seq is invalid.
-			 * Note that RFC793 only says to generate a
-			 * DUPACK for it but for TCP Fast Open it seems
-			 * better to treat this case like TCP_SYN_RECV
-			 * above.
-			 */
-			if (!acceptable)
-				return 1;
 			/* We no longer need the request sock. */
 			reqsk_fastopen_remove(sk, req, false);
 			tcp_rearm_rto(sk);
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
index e45e2c4..37a3cb9 100644
--- a/net/ipv4/tcp_nv.c
+++ b/net/ipv4/tcp_nv.c
@@ -338,7 +338,7 @@
 		 */
 		cwnd_by_slope = (u32)
 			div64_u64(((u64)ca->nv_rtt_max_rate) * ca->nv_min_rtt,
-				  (u64)(80000 * tp->mss_cache));
+				  80000ULL * tp->mss_cache);
 		max_win = cwnd_by_slope + nv_pad;
 
 		/* If cwnd > max_win, decrease cwnd
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 16a473a..70c7212 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2691,8 +2691,10 @@
 		return -EBUSY;
 
 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
-		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
-			BUG();
+		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
+			WARN_ON_ONCE(1);
+			return -EINVAL;
+		}
 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
 			return -ENOMEM;
 	}
@@ -3236,6 +3238,7 @@
 	sock_reset_flag(sk, SOCK_DONE);
 	tp->snd_wnd = 0;
 	tcp_init_wl(tp, 0);
+	tcp_write_queue_purge(sk);
 	tp->snd_una = tp->write_seq;
 	tp->snd_sml = tp->write_seq;
 	tp->snd_up = tp->write_seq;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5af27b9..885cc39 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -987,8 +987,10 @@
 	sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
 
 	if (ipc.opt && ipc.opt->opt.srr) {
-		if (!daddr)
-			return -EINVAL;
+		if (!daddr) {
+			err = -EINVAL;
+			goto out_free;
+		}
 		faddr = ipc.opt->opt.faddr;
 		connected = 0;
 	}
@@ -1096,6 +1098,7 @@
 
 out:
 	ip_rt_put(rt);
+out_free:
 	if (free)
 		kfree(ipc.opt);
 	if (!err)
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 7f9a8df..e62d76c9 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -101,6 +101,7 @@
 	xdst->u.rt.rt_gateway = rt->rt_gateway;
 	xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
 	xdst->u.rt.rt_pmtu = rt->rt_pmtu;
+	xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
 	xdst->u.rt.rt_table_id = rt->rt_table_id;
 	INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a5c1ff3..9077060 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -994,7 +994,10 @@
 	INIT_HLIST_NODE(&ifa->addr_lst);
 	ifa->scope = scope;
 	ifa->prefix_len = pfxlen;
-	ifa->flags = flags | IFA_F_TENTATIVE;
+	ifa->flags = flags;
+	/* No need to add the TENTATIVE flag for addresses with NODAD */
+	if (!(flags & IFA_F_NODAD))
+		ifa->flags |= IFA_F_TENTATIVE;
 	ifa->valid_lft = valid_lft;
 	ifa->prefered_lft = prefered_lft;
 	ifa->cstamp = ifa->tstamp = jiffies;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 189eb10..e742c4d 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -423,7 +423,9 @@
 	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 
 	sg_init_table(sg, nfrags + sglists);
-	skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+	if (unlikely(err < 0))
+		goto out_free;
 
 	if (x->props.flags & XFRM_STATE_ESN) {
 		/* Attach seqhi sg right after packet payload */
@@ -603,7 +605,9 @@
 	ip6h->hop_limit   = 0;
 
 	sg_init_table(sg, nfrags + sglists);
-	skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+	if (unlikely(err < 0))
+		goto out_free;
 
 	if (x->props.flags & XFRM_STATE_ESN) {
 		/* Attach seqhi sg right after packet payload */
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index cbcdd5d..44a2010 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -248,9 +248,11 @@
 	esph->spi = x->id.spi;
 
 	sg_init_table(sg, nfrags);
-	skb_to_sgvec(skb, sg,
-		     (unsigned char *)esph - skb->data,
-		     assoclen + ivlen + clen + alen);
+	err = skb_to_sgvec(skb, sg,
+		           (unsigned char *)esph - skb->data,
+		           assoclen + ivlen + clen + alen);
+	if (unlikely(err < 0))
+		goto error;
 
 	aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
 	aead_request_set_ad(req, assoclen);
@@ -423,7 +425,9 @@
 	}
 
 	sg_init_table(sg, nfrags);
-	skb_to_sgvec(skb, sg, 0, skb->len);
+	ret = skb_to_sgvec(skb, sg, 0, skb->len);
+	if (unlikely(ret < 0))
+		goto out;
 
 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
 	aead_request_set_ad(req, assoclen);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 21bd54f..a88aff0 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -319,11 +319,13 @@
 	if (t || !create)
 		return t;
 
-	if (parms->name[0])
+	if (parms->name[0]) {
+		if (!dev_valid_name(parms->name))
+			return NULL;
 		strlcpy(name, parms->name, IFNAMSIZ);
-	else
+	} else {
 		strcpy(name, "ip6gre%d");
-
+	}
 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
 			   ip6gre_tunnel_setup);
 	if (!dev)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 7e24ced..0b5a75b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -365,6 +365,11 @@
 static inline int ip6_forward_finish(struct net *net, struct sock *sk,
 				     struct sk_buff *skb)
 {
+	struct dst_entry *dst = skb_dst(skb);
+
+	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
+
 	return dst_output(net, sk, skb);
 }
 
@@ -558,8 +563,6 @@
 
 	hdr->hop_limit--;
 
-	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
-	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
 		       net, NULL, skb, skb->dev, dst->dev,
 		       ip6_forward_finish);
@@ -1299,7 +1302,7 @@
 			     const struct sockcm_cookie *sockc)
 {
 	struct sk_buff *skb, *skb_prev = NULL;
-	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
+	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
 	int exthdrlen = 0;
 	int dst_exthdrlen = 0;
 	int hh_len;
@@ -1335,6 +1338,12 @@
 		      sizeof(struct frag_hdr) : 0) +
 		     rt->rt6i_nfheader_len;
 
+	/* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
+	 * the first fragment
+	 */
+	if (headersize + transhdrlen > mtu)
+		goto emsgsize;
+
 	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
 	    (sk->sk_protocol == IPPROTO_UDP ||
 	     sk->sk_protocol == IPPROTO_RAW)) {
@@ -1350,9 +1359,8 @@
 
 	if (cork->length + length > maxnonfragsize - headersize) {
 emsgsize:
-		ipv6_local_error(sk, EMSGSIZE, fl6,
-				 mtu - headersize +
-				 sizeof(struct ipv6hdr));
+		pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
+		ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
 		return -EMSGSIZE;
 	}
 
@@ -1545,7 +1553,8 @@
 		if (copy > length)
 			copy = length;
 
-		if (!(rt->dst.dev->features&NETIF_F_SG)) {
+		if (!(rt->dst.dev->features&NETIF_F_SG) &&
+		    skb_tailroom(skb) >= copy) {
 			unsigned int off;
 
 			off = skb->len;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8ff62e4..5603410 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -298,13 +298,16 @@
 	struct net_device *dev;
 	struct ip6_tnl *t;
 	char name[IFNAMSIZ];
-	int err = -ENOMEM;
+	int err = -E2BIG;
 
-	if (p->name[0])
+	if (p->name[0]) {
+		if (!dev_valid_name(p->name))
+			goto failed;
 		strlcpy(name, p->name, IFNAMSIZ);
-	else
+	} else {
 		sprintf(name, "ip6tnl%%d");
-
+	}
+	err = -ENOMEM;
 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
 			   ip6_tnl_dev_setup);
 	if (!dev)
@@ -1097,6 +1100,9 @@
 
 	if (!dst) {
 route_lookup:
+		/* add dsfield to flowlabel for route lookup */
+		fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
+
 		dst = ip6_route_output(net, NULL, fl6);
 
 		if (dst->error)
@@ -1970,14 +1976,14 @@
 {
 	struct net *net = dev_net(dev);
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
-	struct ip6_tnl *nt, *t;
 	struct ip_tunnel_encap ipencap;
+	struct ip6_tnl *nt, *t;
+	int err;
 
 	nt = netdev_priv(dev);
 
 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
-		int err = ip6_tnl_encap_setup(nt, &ipencap);
-
+		err = ip6_tnl_encap_setup(nt, &ipencap);
 		if (err < 0)
 			return err;
 	}
@@ -1993,7 +1999,11 @@
 			return -EEXIST;
 	}
 
-	return ip6_tnl_create2(dev);
+	err = ip6_tnl_create2(dev);
+	if (!err && tb[IFLA_MTU])
+		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+
+	return err;
 }
 
 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 2e3f4f1..5ae1681 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -212,10 +212,13 @@
 	char name[IFNAMSIZ];
 	int err;
 
-	if (p->name[0])
+	if (p->name[0]) {
+		if (!dev_valid_name(p->name))
+			goto failed;
 		strlcpy(name, p->name, IFNAMSIZ);
-	else
+	} else {
 		sprintf(name, "ip6_vti%%d");
+	}
 
 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
 	if (!dev)
@@ -626,7 +629,6 @@
 {
 	struct net_device *dev = t->dev;
 	struct __ip6_tnl_parm *p = &t->parms;
-	struct net_device *tdev = NULL;
 
 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@@ -639,25 +641,6 @@
 		dev->flags |= IFF_POINTOPOINT;
 	else
 		dev->flags &= ~IFF_POINTOPOINT;
-
-	if (p->flags & IP6_TNL_F_CAP_XMIT) {
-		int strict = (ipv6_addr_type(&p->raddr) &
-			      (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
-		struct rt6_info *rt = rt6_lookup(t->net,
-						 &p->raddr, &p->laddr,
-						 p->link, strict);
-
-		if (rt)
-			tdev = rt->dst.dev;
-		ip6_rt_put(rt);
-	}
-
-	if (!tdev && p->link)
-		tdev = __dev_get_by_index(t->net, p->link);
-
-	if (tdev)
-		dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len,
-				 IPV6_MIN_MTU);
 }
 
 /**
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0d54cc5..0fbc5ba 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -847,6 +847,9 @@
 	struct fib6_node *fn;
 	struct rt6_info *rt;
 
+	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
+		flags &= ~RT6_LOOKUP_F_IFACE;
+
 	read_lock_bh(&table->tb6_lock);
 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 restart:
@@ -2773,6 +2776,7 @@
 
 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
+	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
 	[RTA_OIF]               = { .type = NLA_U32 },
 	[RTA_IIF]		= { .type = NLA_U32 },
 	[RTA_PRIORITY]          = { .type = NLA_U32 },
@@ -2783,6 +2787,7 @@
 	[RTA_ENCAP]		= { .type = NLA_NESTED },
 	[RTA_EXPIRES]		= { .type = NLA_U32 },
 	[RTA_UID]		= { .type = NLA_U32 },
+	[RTA_TABLE]		= { .type = NLA_U32 },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index d4d84da..ae0485d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -244,11 +244,13 @@
 	if (!create)
 		goto failed;
 
-	if (parms->name[0])
+	if (parms->name[0]) {
+		if (!dev_valid_name(parms->name))
+			goto failed;
 		strlcpy(name, parms->name, IFNAMSIZ);
-	else
+	} else {
 		strcpy(name, "sit%d");
-
+	}
 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
 			   ipip6_tunnel_setup);
 	if (!dev)
@@ -657,6 +659,7 @@
 		if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6),
 		    !net_eq(tunnel->net, dev_net(tunnel->dev))))
 			goto out;
+		iph = ip_hdr(skb);
 
 		err = IP_ECN_decapsulate(iph, skb);
 		if (unlikely(err)) {
@@ -1569,6 +1572,13 @@
 	if (err < 0)
 		return err;
 
+	if (tb[IFLA_MTU]) {
+		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+
+		if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
+			dev->mtu = mtu;
+	}
+
 #ifdef CONFIG_IPV6_SIT_6RD
 	if (ipip6_netlink_6rd_parms(data, &ip6rd))
 		err = ipip6_tunnel_update_6rd(nt, &ip6rd);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 63e6d08..cc306de 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1424,6 +1424,7 @@
 	 */
 	if (csk->sk_user_data) {
 		write_unlock_bh(&csk->sk_callback_lock);
+		strp_stop(&psock->strp);
 		strp_done(&psock->strp);
 		kmem_cache_free(kcm_psockp, psock);
 		err = -EALREADY;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 6482b00..15150b4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3305,7 +3305,7 @@
 		p += pol->sadb_x_policy_len*8;
 		sec_ctx = (struct sadb_x_sec_ctx *)p;
 		if (len < pol->sadb_x_policy_len*8 +
-		    sec_ctx->sadb_x_sec_len) {
+		    sec_ctx->sadb_x_sec_len*8) {
 			*dir = -EINVAL;
 			goto out;
 		}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 163f1fa..9b214f3 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -590,6 +590,13 @@
 	lock_sock(sk);
 
 	error = -EINVAL;
+
+	if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
+	    sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
+	    sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
+	    sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
+		goto end;
+
 	if (sp->sa_protocol != PX_PROTO_OL2TP)
 		goto end;
 
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index db916cf..85aae8c 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -197,9 +197,19 @@
 		llc->laddr.lsap, llc->daddr.lsap);
 	if (!llc_send_disc(sk))
 		llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
-	if (!sock_flag(sk, SOCK_ZAPPED))
+	if (!sock_flag(sk, SOCK_ZAPPED)) {
+		struct llc_sap *sap = llc->sap;
+
+		/* Hold this for release_sock(), so that llc_backlog_rcv()
+		 * could still use it.
+		 */
+		llc_sap_hold(sap);
 		llc_sap_remove_socket(llc->sap, sk);
-	release_sock(sk);
+		release_sock(sk);
+		llc_sap_put(sap);
+	} else {
+		release_sock(sk);
+	}
 	if (llc->dev)
 		dev_put(llc->dev);
 	sock_put(sk);
@@ -309,6 +319,8 @@
 	int rc = -EINVAL;
 
 	dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
+
+	lock_sock(sk);
 	if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
 		goto out;
 	rc = -EAFNOSUPPORT;
@@ -380,6 +392,7 @@
 out_put:
 	llc_sap_put(sap);
 out:
+	release_sock(sk);
 	return rc;
 }
 
@@ -913,6 +926,9 @@
 	if (size > llc->dev->mtu)
 		size = llc->dev->mtu;
 	copied = size - hdrlen;
+	rc = -EINVAL;
+	if (copied < 0)
+		goto release;
 	release_sock(sk);
 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
 	lock_sock(sk);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index ea225bd..4b60f68 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -389,7 +389,7 @@
 	llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
 	rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
 	if (likely(!rc)) {
-		llc_conn_send_pdu(sk, skb);
+		rc = llc_conn_send_pdu(sk, skb);
 		llc_conn_ac_inc_vs_by_1(sk, skb);
 	}
 	return rc;
@@ -916,7 +916,7 @@
 	llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
 	rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
 	if (likely(!rc)) {
-		llc_conn_send_pdu(sk, skb);
+		rc = llc_conn_send_pdu(sk, skb);
 		llc_conn_ac_inc_vs_by_1(sk, skb);
 	}
 	return rc;
@@ -935,14 +935,17 @@
 int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb)
 {
 	struct llc_sock *llc = llc_sk(sk);
+	int ret;
 
 	if (llc->ack_must_be_send) {
-		llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
+		ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
 		llc->ack_must_be_send = 0 ;
 		llc->ack_pf = 0;
-	} else
-		llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
-	return 0;
+	} else {
+		ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
+	}
+
+	return ret;
 }
 
 /**
@@ -1096,14 +1099,7 @@
 
 int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
 {
-	struct llc_sock *llc = llc_sk(sk);
-
-	del_timer(&llc->pf_cycle_timer.timer);
-	del_timer(&llc->ack_timer.timer);
-	del_timer(&llc->rej_sent_timer.timer);
-	del_timer(&llc->busy_state_timer.timer);
-	llc->ack_must_be_send = 0;
-	llc->ack_pf = 0;
+	llc_sk_stop_all_timers(sk, false);
 	return 0;
 }
 
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8bc5a1b..79c346f 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -30,7 +30,7 @@
 #endif
 
 static int llc_find_offset(int state, int ev_type);
-static void llc_conn_send_pdus(struct sock *sk);
+static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
 static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
 static int llc_exec_conn_trans_actions(struct sock *sk,
 				       struct llc_conn_state_trans *trans,
@@ -193,11 +193,11 @@
 	return rc;
 }
 
-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
+int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
 {
 	/* queue PDU to send to MAC layer */
 	skb_queue_tail(&sk->sk_write_queue, skb);
-	llc_conn_send_pdus(sk);
+	return llc_conn_send_pdus(sk, skb);
 }
 
 /**
@@ -255,7 +255,7 @@
 	if (howmany_resend > 0)
 		llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
 	/* any PDUs to re-send are queued up; start sending to MAC */
-	llc_conn_send_pdus(sk);
+	llc_conn_send_pdus(sk, NULL);
 out:;
 }
 
@@ -296,7 +296,7 @@
 	if (howmany_resend > 0)
 		llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
 	/* any PDUs to re-send are queued up; start sending to MAC */
-	llc_conn_send_pdus(sk);
+	llc_conn_send_pdus(sk, NULL);
 out:;
 }
 
@@ -340,12 +340,16 @@
 /**
  *	llc_conn_send_pdus - Sends queued PDUs
  *	@sk: active connection
+ *	@hold_skb: the skb held by caller, or NULL if does not care
  *
- *	Sends queued pdus to MAC layer for transmission.
+ *	Sends queued pdus to MAC layer for transmission. When @hold_skb is
+ *	NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
+ *	successfully, or 1 for failure.
  */
-static void llc_conn_send_pdus(struct sock *sk)
+static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
 {
 	struct sk_buff *skb;
+	int ret = 0;
 
 	while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
 		struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
@@ -357,10 +361,20 @@
 			skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
 			if (!skb2)
 				break;
-			skb = skb2;
+			dev_queue_xmit(skb2);
+		} else {
+			bool is_target = skb == hold_skb;
+			int rc;
+
+			if (is_target)
+				skb_get(skb);
+			rc = dev_queue_xmit(skb);
+			if (is_target)
+				ret = rc;
 		}
-		dev_queue_xmit(skb);
 	}
+
+	return ret;
 }
 
 /**
@@ -951,6 +965,26 @@
 	return sk;
 }
 
+void llc_sk_stop_all_timers(struct sock *sk, bool sync)
+{
+	struct llc_sock *llc = llc_sk(sk);
+
+	if (sync) {
+		del_timer_sync(&llc->pf_cycle_timer.timer);
+		del_timer_sync(&llc->ack_timer.timer);
+		del_timer_sync(&llc->rej_sent_timer.timer);
+		del_timer_sync(&llc->busy_state_timer.timer);
+	} else {
+		del_timer(&llc->pf_cycle_timer.timer);
+		del_timer(&llc->ack_timer.timer);
+		del_timer(&llc->rej_sent_timer.timer);
+		del_timer(&llc->busy_state_timer.timer);
+	}
+
+	llc->ack_must_be_send = 0;
+	llc->ack_pf = 0;
+}
+
 /**
  *	llc_sk_free - Frees a LLC socket
  *	@sk - socket to free
@@ -963,7 +997,7 @@
 
 	llc->state = LLC_CONN_OUT_OF_SVC;
 	/* Stop all (possibly) running timers */
-	llc_conn_ac_stop_all_timers(sk, NULL);
+	llc_sk_stop_all_timers(sk, true);
 #ifdef DEBUG_LLC_CONN_ALLOC
 	printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
 		skb_queue_len(&llc->pdu_unack_q),
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index efa2a2f..d7801f6 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2341,10 +2341,17 @@
 	struct ieee80211_sub_if_data *sdata;
 	enum nl80211_tx_power_setting txp_type = type;
 	bool update_txp_type = false;
+	bool has_monitor = false;
 
 	if (wdev) {
 		sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+			sdata = rtnl_dereference(local->monitor_sdata);
+			if (!sdata)
+				return -EOPNOTSUPP;
+		}
+
 		switch (type) {
 		case NL80211_TX_POWER_AUTOMATIC:
 			sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@ -2383,15 +2390,34 @@
 
 	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+			has_monitor = true;
+			continue;
+		}
 		sdata->user_power_level = local->user_power_level;
 		if (txp_type != sdata->vif.bss_conf.txpower_type)
 			update_txp_type = true;
 		sdata->vif.bss_conf.txpower_type = txp_type;
 	}
-	list_for_each_entry(sdata, &local->interfaces, list)
+	list_for_each_entry(sdata, &local->interfaces, list) {
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+			continue;
 		ieee80211_recalc_txpower(sdata, update_txp_type);
+	}
 	mutex_unlock(&local->iflist_mtx);
 
+	if (has_monitor) {
+		sdata = rtnl_dereference(local->monitor_sdata);
+		if (sdata) {
+			sdata->user_power_level = local->user_power_level;
+			if (txp_type != sdata->vif.bss_conf.txpower_type)
+				update_txp_type = true;
+			sdata->vif.bss_conf.txpower_type = txp_type;
+
+			ieee80211_recalc_txpower(sdata, update_txp_type);
+		}
+	}
+
 	return 0;
 }
 
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 09f77e4..49c8a9c 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -164,7 +164,8 @@
 	if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
 			 sdata->vif.type == NL80211_IFTYPE_NAN ||
 			 (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
-			  !sdata->vif.mu_mimo_owner)))
+			  !sdata->vif.mu_mimo_owner &&
+			  !(changed & BSS_CHANGED_TXPOWER))))
 		return;
 
 	if (!check_sdata_in_driver(sdata))
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index d31818e..a5acaf1 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -427,7 +427,7 @@
 	case NL80211_CHAN_WIDTH_5:
 	case NL80211_CHAN_WIDTH_10:
 		cfg80211_chandef_create(&chandef, cbss->channel,
-					NL80211_CHAN_WIDTH_20_NOHT);
+					NL80211_CHAN_NO_HT);
 		chandef.width = sdata->u.ibss.chandef.width;
 		break;
 	case NL80211_CHAN_WIDTH_80:
@@ -439,7 +439,7 @@
 	default:
 		/* fall back to 20 MHz for unsupported modes */
 		cfg80211_chandef_create(&chandef, cbss->channel,
-					NL80211_CHAN_WIDTH_20_NOHT);
+					NL80211_CHAN_NO_HT);
 		break;
 	}
 
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0519edb..973adf3 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4332,6 +4332,10 @@
 	if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
 		return -EINVAL;
 
+	/* If a reconfig is happening, bail out */
+	if (local->in_reconfig)
+		return -EBUSY;
+
 	if (assoc) {
 		rcu_read_lock();
 		have_sta = sta_info_get(sdata, cbss->bssid);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index dbceb42..e6096df 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -173,9 +173,11 @@
 		/* try default if specific alg requested but not found */
 		ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo);
 
-	/* try built-in one if specific alg requested but not found */
-	if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT))
+	/* Note: check for > 0 is intentional to avoid clang warning */
+	if (!ops && (strlen(CONFIG_MAC80211_RC_DEFAULT) > 0))
+		/* try built-in one if specific alg requested but not found */
 		ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT);
+
 	kernel_param_unlock(THIS_MODULE);
 
 	return ops;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 404284a..474655a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3907,7 +3907,7 @@
 	if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
 					      IEEE80211_FCTL_TODS)) !=
 	    fast_rx->expected_ds_bits)
-		goto drop;
+		return false;
 
 	/* assign the key to drop unencrypted frames (later)
 	 * and strip the IV/MIC if necessary
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 97f4c9d..9249712 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -8,6 +8,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2008, Intel Corporation
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -27,7 +28,7 @@
 				 u32 sta_flags, u8 *bssid,
 				 struct ieee80211_csa_ie *csa_ie)
 {
-	enum nl80211_band new_band;
+	enum nl80211_band new_band = current_band;
 	int new_freq;
 	u8 new_chan_no;
 	struct ieee80211_channel *new_chan;
@@ -53,15 +54,13 @@
 				elems->ext_chansw_ie->new_operating_class,
 				&new_band)) {
 			sdata_info(sdata,
-				   "cannot understand ECSA IE operating class %d, disconnecting\n",
+				   "cannot understand ECSA IE operating class, %d, ignoring\n",
 				   elems->ext_chansw_ie->new_operating_class);
-			return -EINVAL;
 		}
 		new_chan_no = elems->ext_chansw_ie->new_ch_num;
 		csa_ie->count = elems->ext_chansw_ie->count;
 		csa_ie->mode = elems->ext_chansw_ie->mode;
 	} else if (elems->ch_switch_ie) {
-		new_band = current_band;
 		new_chan_no = elems->ch_switch_ie->new_ch_num;
 		csa_ie->count = elems->ch_switch_ie->count;
 		csa_ie->mode = elems->ch_switch_ie->mode;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 1ecf3d0..892c392 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -313,7 +313,7 @@
 
 	if (ieee80211_hw_check(hw, USES_RSS)) {
 		sta->pcpu_rx_stats =
-			alloc_percpu(struct ieee80211_sta_rx_stats);
+			alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
 		if (!sta->pcpu_rx_stats)
 			goto free;
 	}
@@ -433,6 +433,7 @@
 	if (sta->sta.txq[0])
 		kfree(to_txq_info(sta->sta.txq[0]));
 free:
+	free_percpu(sta->pcpu_rx_stats);
 #ifdef CONFIG_MAC80211_MESH
 	kfree(sta->mesh);
 #endif
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 74d1195..c5f2350 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2393,11 +2393,7 @@
 			strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
 				sizeof(cfg.mcast_ifn));
 			cfg.syncid = dm->syncid;
-			rtnl_lock();
-			mutex_lock(&ipvs->sync_mutex);
 			ret = start_sync_thread(ipvs, &cfg, dm->state);
-			mutex_unlock(&ipvs->sync_mutex);
-			rtnl_unlock();
 		} else {
 			mutex_lock(&ipvs->sync_mutex);
 			ret = stop_sync_thread(ipvs, dm->state);
@@ -3495,12 +3491,8 @@
 	if (ipvs->mixed_address_family_dests > 0)
 		return -EINVAL;
 
-	rtnl_lock();
-	mutex_lock(&ipvs->sync_mutex);
 	ret = start_sync_thread(ipvs, &c,
 				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
-	mutex_unlock(&ipvs->sync_mutex);
-	rtnl_unlock();
 	return ret;
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 9350530..5fbf4b2 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -48,6 +48,7 @@
 #include <linux/kthread.h>
 #include <linux/wait.h>
 #include <linux/kernel.h>
+#include <linux/sched.h>
 
 #include <asm/unaligned.h>		/* Used for ntoh_seq and hton_seq */
 
@@ -1359,15 +1360,9 @@
 /*
  *      Specifiy default interface for outgoing multicasts
  */
-static int set_mcast_if(struct sock *sk, char *ifname)
+static int set_mcast_if(struct sock *sk, struct net_device *dev)
 {
-	struct net_device *dev;
 	struct inet_sock *inet = inet_sk(sk);
-	struct net *net = sock_net(sk);
-
-	dev = __dev_get_by_name(net, ifname);
-	if (!dev)
-		return -ENODEV;
 
 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
 		return -EINVAL;
@@ -1395,19 +1390,14 @@
  *      in the in_addr structure passed in as a parameter.
  */
 static int
-join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
+join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
 {
-	struct net *net = sock_net(sk);
 	struct ip_mreqn mreq;
-	struct net_device *dev;
 	int ret;
 
 	memset(&mreq, 0, sizeof(mreq));
 	memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
 
-	dev = __dev_get_by_name(net, ifname);
-	if (!dev)
-		return -ENODEV;
 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
 		return -EINVAL;
 
@@ -1422,15 +1412,10 @@
 
 #ifdef CONFIG_IP_VS_IPV6
 static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
-			     char *ifname)
+			     struct net_device *dev)
 {
-	struct net *net = sock_net(sk);
-	struct net_device *dev;
 	int ret;
 
-	dev = __dev_get_by_name(net, ifname);
-	if (!dev)
-		return -ENODEV;
 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
 		return -EINVAL;
 
@@ -1442,24 +1427,18 @@
 }
 #endif
 
-static int bind_mcastif_addr(struct socket *sock, char *ifname)
+static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
 {
-	struct net *net = sock_net(sock->sk);
-	struct net_device *dev;
 	__be32 addr;
 	struct sockaddr_in sin;
 
-	dev = __dev_get_by_name(net, ifname);
-	if (!dev)
-		return -ENODEV;
-
 	addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
 	if (!addr)
 		pr_err("You probably need to specify IP address on "
 		       "multicast interface.\n");
 
 	IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
-		  ifname, &addr);
+		  dev->name, &addr);
 
 	/* Now bind the socket with the address of multicast interface */
 	sin.sin_family	     = AF_INET;
@@ -1492,7 +1471,8 @@
 /*
  *      Set up sending multicast socket over UDP
  */
-static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+static int make_send_sock(struct netns_ipvs *ipvs, int id,
+			  struct net_device *dev, struct socket **sock_ret)
 {
 	/* multicast addr */
 	union ipvs_sockaddr mcast_addr;
@@ -1504,9 +1484,10 @@
 				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
-		return ERR_PTR(result);
+		goto error;
 	}
-	result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
+	*sock_ret = sock;
+	result = set_mcast_if(sock->sk, dev);
 	if (result < 0) {
 		pr_err("Error setting outbound mcast interface\n");
 		goto error;
@@ -1521,7 +1502,7 @@
 		set_sock_size(sock->sk, 1, result);
 
 	if (AF_INET == ipvs->mcfg.mcast_af)
-		result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
+		result = bind_mcastif_addr(sock, dev);
 	else
 		result = 0;
 	if (result < 0) {
@@ -1537,19 +1518,18 @@
 		goto error;
 	}
 
-	return sock;
+	return 0;
 
 error:
-	sock_release(sock);
-	return ERR_PTR(result);
+	return result;
 }
 
 
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
-					int ifindex)
+static int make_receive_sock(struct netns_ipvs *ipvs, int id,
+			     struct net_device *dev, struct socket **sock_ret)
 {
 	/* multicast addr */
 	union ipvs_sockaddr mcast_addr;
@@ -1561,8 +1541,9 @@
 				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
-		return ERR_PTR(result);
+		goto error;
 	}
+	*sock_ret = sock;
 	/* it is equivalent to the REUSEADDR option in user-space */
 	sock->sk->sk_reuse = SK_CAN_REUSE;
 	result = sysctl_sync_sock_size(ipvs);
@@ -1570,7 +1551,7 @@
 		set_sock_size(sock->sk, 0, result);
 
 	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
-	sock->sk->sk_bound_dev_if = ifindex;
+	sock->sk->sk_bound_dev_if = dev->ifindex;
 	result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
 	if (result < 0) {
 		pr_err("Error binding to the multicast addr\n");
@@ -1581,21 +1562,20 @@
 #ifdef CONFIG_IP_VS_IPV6
 	if (ipvs->bcfg.mcast_af == AF_INET6)
 		result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
-					   ipvs->bcfg.mcast_ifn);
+					   dev);
 	else
 #endif
 		result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
-					  ipvs->bcfg.mcast_ifn);
+					  dev);
 	if (result < 0) {
 		pr_err("Error joining to the multicast group\n");
 		goto error;
 	}
 
-	return sock;
+	return 0;
 
 error:
-	sock_release(sock);
-	return ERR_PTR(result);
+	return result;
 }
 
 
@@ -1780,13 +1760,12 @@
 int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
 		      int state)
 {
-	struct ip_vs_sync_thread_data *tinfo;
+	struct ip_vs_sync_thread_data *tinfo = NULL;
 	struct task_struct **array = NULL, *task;
-	struct socket *sock;
 	struct net_device *dev;
 	char *name;
 	int (*threadfn)(void *data);
-	int id, count, hlen;
+	int id = 0, count, hlen;
 	int result = -ENOMEM;
 	u16 mtu, min_mtu;
 
@@ -1794,6 +1773,18 @@
 	IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
 		  sizeof(struct ip_vs_sync_conn_v0));
 
+	/* Do not hold one mutex and then to block on another */
+	for (;;) {
+		rtnl_lock();
+		if (mutex_trylock(&ipvs->sync_mutex))
+			break;
+		rtnl_unlock();
+		mutex_lock(&ipvs->sync_mutex);
+		if (rtnl_trylock())
+			break;
+		mutex_unlock(&ipvs->sync_mutex);
+	}
+
 	if (!ipvs->sync_state) {
 		count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
 		ipvs->threads_mask = count - 1;
@@ -1812,7 +1803,8 @@
 	dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
 	if (!dev) {
 		pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
-		return -ENODEV;
+		result = -ENODEV;
+		goto out_early;
 	}
 	hlen = (AF_INET6 == c->mcast_af) ?
 	       sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
@@ -1829,26 +1821,30 @@
 		c->sync_maxlen = mtu - hlen;
 
 	if (state == IP_VS_STATE_MASTER) {
+		result = -EEXIST;
 		if (ipvs->ms)
-			return -EEXIST;
+			goto out_early;
 
 		ipvs->mcfg = *c;
 		name = "ipvs-m:%d:%d";
 		threadfn = sync_thread_master;
 	} else if (state == IP_VS_STATE_BACKUP) {
+		result = -EEXIST;
 		if (ipvs->backup_threads)
-			return -EEXIST;
+			goto out_early;
 
 		ipvs->bcfg = *c;
 		name = "ipvs-b:%d:%d";
 		threadfn = sync_thread_backup;
 	} else {
-		return -EINVAL;
+		result = -EINVAL;
+		goto out_early;
 	}
 
 	if (state == IP_VS_STATE_MASTER) {
 		struct ipvs_master_sync_state *ms;
 
+		result = -ENOMEM;
 		ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL);
 		if (!ipvs->ms)
 			goto out;
@@ -1864,39 +1860,38 @@
 	} else {
 		array = kzalloc(count * sizeof(struct task_struct *),
 				GFP_KERNEL);
+		result = -ENOMEM;
 		if (!array)
 			goto out;
 	}
 
-	tinfo = NULL;
 	for (id = 0; id < count; id++) {
-		if (state == IP_VS_STATE_MASTER)
-			sock = make_send_sock(ipvs, id);
-		else
-			sock = make_receive_sock(ipvs, id, dev->ifindex);
-		if (IS_ERR(sock)) {
-			result = PTR_ERR(sock);
-			goto outtinfo;
-		}
+		result = -ENOMEM;
 		tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
 		if (!tinfo)
-			goto outsocket;
+			goto out;
 		tinfo->ipvs = ipvs;
-		tinfo->sock = sock;
+		tinfo->sock = NULL;
 		if (state == IP_VS_STATE_BACKUP) {
 			tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
 					     GFP_KERNEL);
 			if (!tinfo->buf)
-				goto outtinfo;
+				goto out;
 		} else {
 			tinfo->buf = NULL;
 		}
 		tinfo->id = id;
+		if (state == IP_VS_STATE_MASTER)
+			result = make_send_sock(ipvs, id, dev, &tinfo->sock);
+		else
+			result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
+		if (result < 0)
+			goto out;
 
 		task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
 		if (IS_ERR(task)) {
 			result = PTR_ERR(task);
-			goto outtinfo;
+			goto out;
 		}
 		tinfo = NULL;
 		if (state == IP_VS_STATE_MASTER)
@@ -1913,20 +1908,20 @@
 	ipvs->sync_state |= state;
 	spin_unlock_bh(&ipvs->sync_buff_lock);
 
+	mutex_unlock(&ipvs->sync_mutex);
+	rtnl_unlock();
+
 	/* increase the module use count */
 	ip_vs_use_count_inc();
 
 	return 0;
 
-outsocket:
-	sock_release(sock);
-
-outtinfo:
-	if (tinfo) {
-		sock_release(tinfo->sock);
-		kfree(tinfo->buf);
-		kfree(tinfo);
-	}
+out:
+	/* We do not need RTNL lock anymore, release it here so that
+	 * sock_release below and in the kthreads can use rtnl_lock
+	 * to leave the mcast group.
+	 */
+	rtnl_unlock();
 	count = id;
 	while (count-- > 0) {
 		if (state == IP_VS_STATE_MASTER)
@@ -1934,13 +1929,23 @@
 		else
 			kthread_stop(array[count]);
 	}
-	kfree(array);
-
-out:
 	if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
 		kfree(ipvs->ms);
 		ipvs->ms = NULL;
 	}
+	mutex_unlock(&ipvs->sync_mutex);
+	if (tinfo) {
+		if (tinfo->sock)
+			sock_release(tinfo->sock);
+		kfree(tinfo->buf);
+		kfree(tinfo);
+	}
+	kfree(array);
+	return result;
+
+out_early:
+	mutex_unlock(&ipvs->sync_mutex);
+	rtnl_unlock();
 	return result;
 }
 
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f66c08f..18e96a2 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1621,7 +1621,6 @@
 	struct nf_conntrack_tuple_hash *h;
 	struct nf_conn *ct;
 	struct hlist_nulls_node *n;
-	int cpu;
 	spinlock_t *lockp;
 
 	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
@@ -1643,18 +1642,6 @@
 		cond_resched();
 	}
 
-	for_each_possible_cpu(cpu) {
-		struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
-
-		spin_lock_bh(&pcpu->lock);
-		hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
-			ct = nf_ct_tuplehash_to_ctrack(h);
-			if (iter(ct, data))
-				set_bit(IPS_DYING_BIT, &ct->status);
-		}
-		spin_unlock_bh(&pcpu->lock);
-		cond_resched();
-	}
 	return NULL;
 found:
 	atomic_inc(&ct->ct_general.use);
@@ -1663,6 +1650,34 @@
 	return ct;
 }
 
+static void
+__nf_ct_unconfirmed_destroy(struct net *net)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct nf_conntrack_tuple_hash *h;
+		struct hlist_nulls_node *n;
+		struct ct_pcpu *pcpu;
+
+		pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+		spin_lock_bh(&pcpu->lock);
+		hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
+			struct nf_conn *ct;
+
+			ct = nf_ct_tuplehash_to_ctrack(h);
+
+			/* we cannot call iter() on unconfirmed list, the
+			 * owning cpu can reallocate ct->ext at any time.
+			 */
+			set_bit(IPS_DYING_BIT, &ct->status);
+		}
+		spin_unlock_bh(&pcpu->lock);
+		cond_resched();
+	}
+}
+
 void nf_ct_iterate_cleanup(struct net *net,
 			   int (*iter)(struct nf_conn *i, void *data),
 			   void *data, u32 portid, int report)
@@ -1675,6 +1690,10 @@
 	if (atomic_read(&net->ct.count) == 0)
 		return;
 
+	__nf_ct_unconfirmed_destroy(net);
+
+	synchronize_net();
+
 	while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
 		/* Time to push up daises... */
 
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 3c37253..c9ca529 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -894,8 +894,13 @@
 	}
 out:
 	local_bh_enable();
-	if (last)
+	if (last) {
+		/* nf ct hash resize happened, now clear the leftover. */
+		if ((struct nf_conn *)cb->args[1] == last)
+			cb->args[1] = 0;
+
 		nf_ct_put(last);
+	}
 
 	while (i) {
 		i--;
@@ -1012,9 +1017,8 @@
 
 static int
 ctnetlink_parse_tuple(const struct nlattr * const cda[],
-		      struct nf_conntrack_tuple *tuple,
-		      enum ctattr_type type, u_int8_t l3num,
-		      struct nf_conntrack_zone *zone)
+		      struct nf_conntrack_tuple *tuple, u32 type,
+		      u_int8_t l3num, struct nf_conntrack_zone *zone)
 {
 	struct nlattr *tb[CTA_TUPLE_MAX+1];
 	int err;
@@ -2424,7 +2428,7 @@
 
 static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
 				    const struct nf_conntrack_tuple *tuple,
-				    enum ctattr_expect type)
+				    u32 type)
 {
 	struct nlattr *nest_parms;
 
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fa3ef25..762f31f 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2200,41 +2200,46 @@
 	}
 
 	if (nlh->nlmsg_flags & NLM_F_REPLACE) {
-		if (nft_is_active_next(net, old_rule)) {
-			trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
-						   old_rule);
-			if (trans == NULL) {
-				err = -ENOMEM;
-				goto err2;
-			}
-			nft_deactivate_next(net, old_rule);
-			chain->use--;
-			list_add_tail_rcu(&rule->list, &old_rule->list);
-		} else {
+		if (!nft_is_active_next(net, old_rule)) {
 			err = -ENOENT;
 			goto err2;
 		}
-	} else if (nlh->nlmsg_flags & NLM_F_APPEND)
-		if (old_rule)
-			list_add_rcu(&rule->list, &old_rule->list);
-		else
-			list_add_tail_rcu(&rule->list, &chain->rules);
-	else {
-		if (old_rule)
-			list_add_tail_rcu(&rule->list, &old_rule->list);
-		else
-			list_add_rcu(&rule->list, &chain->rules);
-	}
+		trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
+					   old_rule);
+		if (trans == NULL) {
+			err = -ENOMEM;
+			goto err2;
+		}
+		nft_deactivate_next(net, old_rule);
+		chain->use--;
 
-	if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
-		err = -ENOMEM;
-		goto err3;
+		if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
+			err = -ENOMEM;
+			goto err2;
+		}
+
+		list_add_tail_rcu(&rule->list, &old_rule->list);
+	} else {
+		if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
+			err = -ENOMEM;
+			goto err2;
+		}
+
+		if (nlh->nlmsg_flags & NLM_F_APPEND) {
+			if (old_rule)
+				list_add_rcu(&rule->list, &old_rule->list);
+			else
+				list_add_tail_rcu(&rule->list, &chain->rules);
+		 } else {
+			if (old_rule)
+				list_add_tail_rcu(&rule->list, &old_rule->list);
+			else
+				list_add_rcu(&rule->list, &chain->rules);
+		}
 	}
 	chain->use++;
 	return 0;
 
-err3:
-	list_del_rcu(&rule->list);
 err2:
 	nf_tables_rule_destroy(&ctx, rule);
 err1:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 7ad1a86..59be898 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -367,6 +367,36 @@
 	return buf;
 }
 
+/**
+ * xt_check_proc_name - check that name is suitable for /proc file creation
+ *
+ * @name: file name candidate
+ * @size: length of buffer
+ *
+ * some x_tables modules wish to create a file in /proc.
+ * This function makes sure that the name is suitable for this
+ * purpose, it checks that name is NUL terminated and isn't a 'special'
+ * name, like "..".
+ *
+ * returns negative number on error or 0 if name is useable.
+ */
+int xt_check_proc_name(const char *name, unsigned int size)
+{
+	if (name[0] == '\0')
+		return -EINVAL;
+
+	if (strnlen(name, size) == size)
+		return -ENAMETOOLONG;
+
+	if (strcmp(name, ".") == 0 ||
+	    strcmp(name, "..") == 0 ||
+	    strchr(name, '/'))
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL(xt_check_proc_name);
+
 int xt_check_match(struct xt_mtchk_param *par,
 		   unsigned int size, u_int8_t proto, bool inv_proto)
 {
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index b89b688..a1a29cd 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -794,8 +794,9 @@
 	struct hashlimit_cfg2 cfg = {};
 	int ret;
 
-	if (info->name[sizeof(info->name) - 1] != '\0')
-		return -EINVAL;
+	ret = xt_check_proc_name(info->name, sizeof(info->name));
+	if (ret)
+		return ret;
 
 	ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
 
@@ -809,9 +810,11 @@
 static int hashlimit_mt_check(const struct xt_mtchk_param *par)
 {
 	struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
+	int ret;
 
-	if (info->name[sizeof(info->name) - 1] != '\0')
-		return -EINVAL;
+	ret = xt_check_proc_name(info->name, sizeof(info->name));
+	if (ret)
+		return ret;
 
 	return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg,
 					 info->name, 2);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index e3b7a09..79d7ad6 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -361,9 +361,9 @@
 			info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
 		return -EINVAL;
 	}
-	if (info->name[0] == '\0' ||
-	    strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
-		return -EINVAL;
+	ret = xt_check_proc_name(info->name, sizeof(info->name));
+	if (ret)
+		return ret;
 
 	if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
 		nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 4528cff..a123d0d 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1469,6 +1469,16 @@
 		iface = rcu_dereference(netlbl_unlhsh_def);
 	if (iface == NULL || !iface->valid)
 		goto unlabel_getattr_nolabel;
+
+#if IS_ENABLED(CONFIG_IPV6)
+	/* When resolving a fallback label, check the sk_buff version as
+	 * it is possible (e.g. SCTP) to have family = PF_INET6 while
+	 * receiving ip_hdr(skb)->version = 4.
+	 */
+	if (family == PF_INET6 && ip_hdr(skb)->version == 4)
+		family = PF_INET;
+#endif /* IPv6 */
+
 	switch (family) {
 	case PF_INET: {
 		struct iphdr *hdr4;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c1f59a0..15e6e7b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1054,6 +1054,9 @@
 	if (addr->sa_family != AF_NETLINK)
 		return -EINVAL;
 
+	if (alen < sizeof(struct sockaddr_nl))
+		return -EINVAL;
+
 	if ((nladdr->nl_groups || nladdr->nl_pid) &&
 	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
 		return -EPERM;
@@ -1792,6 +1795,8 @@
 
 	if (msg->msg_namelen) {
 		err = -EINVAL;
+		if (msg->msg_namelen < sizeof(struct sockaddr_nl))
+			goto out;
 		if (addr->nl_family != AF_NETLINK)
 			goto out;
 		dst_portid = addr->nl_pid;
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index c5959ce..3f26611 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -149,6 +149,10 @@
 
 	pr_debug("uri: %s, len: %zu\n", uri, uri_len);
 
+	/* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
+	if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
+		return NULL;
+
 	sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
 	if (sdreq == NULL)
 		return NULL;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 102c681..dbf74af 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -68,7 +68,8 @@
 };
 
 static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
-	[NFC_SDP_ATTR_URI] = { .type = NLA_STRING },
+	[NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
+			       .len = U8_MAX - 4 },
 	[NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
 };
 
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 4663939..f135814 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -906,6 +906,36 @@
 	return 0;
 }
 
+/* Trim the skb to the length specified by the IP/IPv6 header,
+ * removing any trailing lower-layer padding. This prepares the skb
+ * for higher-layer processing that assumes skb->len excludes padding
+ * (such as nf_ip_checksum). The caller needs to pull the skb to the
+ * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
+ */
+static int ovs_skb_network_trim(struct sk_buff *skb)
+{
+	unsigned int len;
+	int err;
+
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		len = ntohs(ip_hdr(skb)->tot_len);
+		break;
+	case htons(ETH_P_IPV6):
+		len = sizeof(struct ipv6hdr)
+			+ ntohs(ipv6_hdr(skb)->payload_len);
+		break;
+	default:
+		len = skb->len;
+	}
+
+	err = pskb_trim_rcsum(skb, len);
+	if (err)
+		kfree_skb(skb);
+
+	return err;
+}
+
 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
  * value if 'skb' is freed.
  */
@@ -920,6 +950,10 @@
 	nh_ofs = skb_network_offset(skb);
 	skb_pull_rcsum(skb, nh_ofs);
 
+	err = ovs_skb_network_trim(skb);
+	if (err)
+		return err;
+
 	if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
 		err = handle_fragments(net, key, info->zone.id, skb);
 		if (err)
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 1668916..326945d 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1296,13 +1296,10 @@
 
 	/* The nlattr stream should already have been validated */
 	nla_for_each_nested(nla, attr, rem) {
-		if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
-			if (tbl[nla_type(nla)].next)
-				tbl = tbl[nla_type(nla)].next;
-			nlattr_set(nla, val, tbl);
-		} else {
+		if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
+			nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
+		else
 			memset(nla_data(nla), val, nla_len(nla));
-		}
 
 		if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
 			*(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 267db0d..430a7c7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -333,11 +333,11 @@
 	skb_set_queue_mapping(skb, queue_index);
 }
 
-/* register_prot_hook must be invoked with the po->bind_lock held,
+/* __register_prot_hook must be invoked through register_prot_hook
  * or from a context in which asynchronous accesses to the packet
  * socket is not possible (packet_create()).
  */
-static void register_prot_hook(struct sock *sk)
+static void __register_prot_hook(struct sock *sk)
 {
 	struct packet_sock *po = pkt_sk(sk);
 
@@ -352,8 +352,13 @@
 	}
 }
 
-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
- * held.   If the sync parameter is true, we will temporarily drop
+static void register_prot_hook(struct sock *sk)
+{
+	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
+	__register_prot_hook(sk);
+}
+
+/* If the sync parameter is true, we will temporarily drop
  * the po->bind_lock and do a synchronize_net to make sure no
  * asynchronous packet processing paths still refer to the elements
  * of po->prot_hook.  If the sync parameter is false, it is the
@@ -363,6 +368,8 @@
 {
 	struct packet_sock *po = pkt_sk(sk);
 
+	lockdep_assert_held_once(&po->bind_lock);
+
 	po->running = 0;
 
 	if (po->fanout)
@@ -1685,7 +1692,7 @@
 		match->flags = flags;
 		INIT_LIST_HEAD(&match->list);
 		spin_lock_init(&match->lock);
-		atomic_set(&match->sk_ref, 0);
+		refcount_set(&match->sk_ref, 0);
 		fanout_init_data(match);
 		match->prot_hook.type = po->prot_hook.type;
 		match->prot_hook.dev = po->prot_hook.dev;
@@ -1702,19 +1709,19 @@
 	    match->prot_hook.type == po->prot_hook.type &&
 	    match->prot_hook.dev == po->prot_hook.dev) {
 		err = -ENOSPC;
-		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
+		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
 			__dev_remove_pack(&po->prot_hook);
 			po->fanout = match;
 			po->rollover = rollover;
 			rollover = NULL;
-			atomic_inc(&match->sk_ref);
+			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
 			__fanout_link(sk, po);
 			err = 0;
 		}
 	}
 	spin_unlock(&po->bind_lock);
 
-	if (err && !atomic_read(&match->sk_ref)) {
+	if (err && !refcount_read(&match->sk_ref)) {
 		list_del(&match->list);
 		kfree(match);
 	}
@@ -1740,7 +1747,7 @@
 	if (f) {
 		po->fanout = NULL;
 
-		if (atomic_dec_and_test(&f->sk_ref))
+		if (refcount_dec_and_test(&f->sk_ref))
 			list_del(&f->list);
 		else
 			f = NULL;
@@ -2903,13 +2910,15 @@
 	if (skb == NULL)
 		goto out_unlock;
 
-	skb_set_network_header(skb, reserve);
+	skb_reset_network_header(skb);
 
 	err = -EINVAL;
 	if (sock->type == SOCK_DGRAM) {
 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
 		if (unlikely(offset < 0))
 			goto out_free;
+	} else if (reserve) {
+		skb_push(skb, reserve);
 	}
 
 	/* Returns -EFAULT on error */
@@ -3017,6 +3026,7 @@
 
 	packet_flush_mclist(sk);
 
+	lock_sock(sk);
 	if (po->rx_ring.pg_vec) {
 		memset(&req_u, 0, sizeof(req_u));
 		packet_set_ring(sk, &req_u, 1, 0);
@@ -3026,6 +3036,7 @@
 		memset(&req_u, 0, sizeof(req_u));
 		packet_set_ring(sk, &req_u, 1, 1);
 	}
+	release_sock(sk);
 
 	f = fanout_release(sk);
 
@@ -3259,7 +3270,7 @@
 
 	if (proto) {
 		po->prot_hook.type = proto;
-		register_prot_hook(sk);
+		__register_prot_hook(sk);
 	}
 
 	mutex_lock(&net->packet.sklist_lock);
@@ -3654,6 +3665,7 @@
 		union tpacket_req_u req_u;
 		int len;
 
+		lock_sock(sk);
 		switch (po->tp_version) {
 		case TPACKET_V1:
 		case TPACKET_V2:
@@ -3664,12 +3676,17 @@
 			len = sizeof(req_u.req3);
 			break;
 		}
-		if (optlen < len)
-			return -EINVAL;
-		if (copy_from_user(&req_u.req, optval, len))
-			return -EFAULT;
-		return packet_set_ring(sk, &req_u, 0,
-			optname == PACKET_TX_RING);
+		if (optlen < len) {
+			ret = -EINVAL;
+		} else {
+			if (copy_from_user(&req_u.req, optval, len))
+				ret = -EFAULT;
+			else
+				ret = packet_set_ring(sk, &req_u, 0,
+						    optname == PACKET_TX_RING);
+		}
+		release_sock(sk);
+		return ret;
 	}
 	case PACKET_COPY_THRESH:
 	{
@@ -3735,12 +3752,18 @@
 
 		if (optlen != sizeof(val))
 			return -EINVAL;
-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-			return -EBUSY;
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
-		po->tp_loss = !!val;
-		return 0;
+
+		lock_sock(sk);
+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+			ret = -EBUSY;
+		} else {
+			po->tp_loss = !!val;
+			ret = 0;
+		}
+		release_sock(sk);
+		return ret;
 	}
 	case PACKET_AUXDATA:
 	{
@@ -3751,7 +3774,9 @@
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
 
+		lock_sock(sk);
 		po->auxdata = !!val;
+		release_sock(sk);
 		return 0;
 	}
 	case PACKET_ORIGDEV:
@@ -3763,7 +3788,9 @@
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
 
+		lock_sock(sk);
 		po->origdev = !!val;
+		release_sock(sk);
 		return 0;
 	}
 	case PACKET_VNET_HDR:
@@ -3772,15 +3799,20 @@
 
 		if (sock->type != SOCK_RAW)
 			return -EINVAL;
-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-			return -EBUSY;
 		if (optlen < sizeof(val))
 			return -EINVAL;
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
 
-		po->has_vnet_hdr = !!val;
-		return 0;
+		lock_sock(sk);
+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+			ret = -EBUSY;
+		} else {
+			po->has_vnet_hdr = !!val;
+			ret = 0;
+		}
+		release_sock(sk);
+		return ret;
 	}
 	case PACKET_TIMESTAMP:
 	{
@@ -3818,11 +3850,17 @@
 
 		if (optlen != sizeof(val))
 			return -EINVAL;
-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-			return -EBUSY;
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
-		po->tp_tx_has_off = !!val;
+
+		lock_sock(sk);
+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+			ret = -EBUSY;
+		} else {
+			po->tp_tx_has_off = !!val;
+			ret = 0;
+		}
+		release_sock(sk);
 		return 0;
 	}
 	case PACKET_QDISC_BYPASS:
@@ -4219,7 +4257,6 @@
 	/* Added to avoid minimal code churn */
 	struct tpacket_req *req = &req_u->req;
 
-	lock_sock(sk);
 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
 		net_warn_ratelimited("Tx-ring is not supported.\n");
@@ -4355,7 +4392,6 @@
 	if (pg_vec)
 		free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
-	release_sock(sk);
 	return err;
 }
 
diff --git a/net/packet/internal.h b/net/packet/internal.h
index d55bfc3..b8d5618 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -1,6 +1,8 @@
 #ifndef __PACKET_INTERNAL_H__
 #define __PACKET_INTERNAL_H__
 
+#include <linux/refcount.h>
+
 struct packet_mclist {
 	struct packet_mclist	*next;
 	int			ifindex;
@@ -86,7 +88,7 @@
 	struct list_head	list;
 	struct sock		*arr[PACKET_FANOUT_MAX];
 	spinlock_t		lock;
-	atomic_t		sk_ref;
+	refcount_t		sk_ref;
 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 };
 
@@ -109,10 +111,12 @@
 	int			copy_thresh;
 	spinlock_t		bind_lock;
 	struct mutex		pg_vec_lock;
-	unsigned int		running:1,	/* prot_hook is attached*/
-				auxdata:1,
+	unsigned int		running;	/* bind_lock must be held */
+	unsigned int		auxdata:1,	/* writer must hold sock lock */
 				origdev:1,
-				has_vnet_hdr:1;
+				has_vnet_hdr:1,
+				tp_loss:1,
+				tp_tx_has_off:1;
 	int			pressure;
 	int			ifindex;	/* bound device		*/
 	__be16			num;
@@ -122,8 +126,6 @@
 	enum tpacket_versions	tp_version;
 	unsigned int		tp_hdrlen;
 	unsigned int		tp_reserve;
-	unsigned int		tp_loss:1;
-	unsigned int		tp_tx_has_off:1;
 	unsigned int		tp_tstamp;
 	struct net_device __rcu	*cached_dev;
 	int			(*xmit)(struct sk_buff *skb);
diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c
index 0d11132..ff0112b 100644
--- a/net/qrtr/smd.c
+++ b/net/qrtr/smd.c
@@ -116,5 +116,6 @@
 
 module_qcom_smd_driver(qcom_smd_qrtr_driver);
 
+MODULE_ALIAS("rpmsg:IPCRTR");
 MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
 MODULE_LICENSE("GPL v2");
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 095f6ce..adb53ae 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -114,6 +114,7 @@
 			  rs, &addr, (int)ntohs(*port));
 			break;
 		} else {
+			rs->rs_bound_addr = 0;
 			rds_sock_put(rs);
 			ret = -ENOMEM;
 			break;
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 5680d90..0efb3d2 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -336,7 +336,8 @@
 	/* Create a CMA ID and try to bind it. This catches both
 	 * IB and iWARP capable NICs.
 	 */
-	cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
+	cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler,
+			       NULL, RDMA_PS_TCP, IB_QPT_RC);
 	if (IS_ERR(cm_id))
 		return PTR_ERR(cm_id);
 
diff --git a/net/rds/send.c b/net/rds/send.c
index ef53d164..50241d3 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 Oracle.  All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -983,10 +983,15 @@
 	if (conn->c_npaths == 0 && hash != 0) {
 		rds_send_ping(conn);
 
-		if (conn->c_npaths == 0) {
-			wait_event_interruptible(conn->c_hs_waitq,
-						 (conn->c_npaths != 0));
-		}
+		/* The underlying connection is not up yet.  Need to wait
+		 * until it is up to be sure that the non-zero c_path can be
+		 * used.  But if we are interrupted, we have to use the zero
+		 * c_path in case the connection ends up being non-MP capable.
+		 */
+		if (conn->c_npaths == 0)
+			if (wait_event_interruptible(conn->c_hs_waitq,
+						     conn->c_npaths != 0))
+				hash = 0;
 		if (conn->c_npaths == 1)
 			hash = 0;
 	}
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 76c01cb..d6d8b34 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -138,13 +138,18 @@
 
 	ret = rfkill_register(rfkill->rfkill_dev);
 	if (ret < 0)
-		return ret;
+		goto err_destroy;
 
 	platform_set_drvdata(pdev, rfkill);
 
 	dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
 
 	return 0;
+
+err_destroy:
+	rfkill_destroy(rfkill->rfkill_dev);
+
+	return ret;
 }
 
 static int rfkill_gpio_remove(struct platform_device *pdev)
diff --git a/net/rmnet_data/rmnet_data_vnd.c b/net/rmnet_data/rmnet_data_vnd.c
index c4ef460..3603c5e 100644
--- a/net/rmnet_data/rmnet_data_vnd.c
+++ b/net/rmnet_data/rmnet_data_vnd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -578,7 +578,7 @@
 		LOGE("Failed to to register netdev [%s]", dev->name);
 		free_netdev(dev);
 		*new_device = 0;
-		rc = RMNET_CONFIG_UNKNOWN_ERROR;
+		return RMNET_CONFIG_UNKNOWN_ERROR;
 	} else {
 		rmnet_devices[id] = dev;
 		*new_device = dev;
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 1060d14..f3ac85a 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1166,16 +1166,19 @@
 			goto discard_unlock;
 
 		if (sp->hdr.callNumber == chan->last_call) {
-			/* For the previous service call, if completed successfully, we
-			 * discard all further packets.
-			 */
-			if (rxrpc_conn_is_service(conn) &&
-			    (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
-			     sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
+			if (chan->call ||
+			    sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
 				goto discard_unlock;
 
-			/* But otherwise we need to retransmit the final packet from
-			 * data cached in the connection record.
+			/* For the previous service call, if completed
+			 * successfully, we discard all further packets.
+			 */
+			if (rxrpc_conn_is_service(conn) &&
+			    chan->last_type == RXRPC_PACKET_TYPE_ACK)
+				goto discard_unlock;
+
+			/* But otherwise we need to retransmit the final packet
+			 * from data cached in the connection record.
 			 */
 			rxrpc_post_packet_to_conn(conn, skb);
 			goto out_unlock;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index c29362d..3e52b7fd 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -493,9 +493,10 @@
 			ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
 				       sizeof(unsigned int), &id32);
 		} else {
+			unsigned long idl = call->user_call_ID;
+
 			ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
-				       sizeof(unsigned long),
-				       &call->user_call_ID);
+				       sizeof(unsigned long), &idl);
 		}
 		if (ret < 0)
 			goto error;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 4374e7b..90df95e 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -229,7 +229,9 @@
 	len &= ~(call->conn->size_align - 1);
 
 	sg_init_table(sg, nsg);
-	skb_to_sgvec(skb, sg, 0, len);
+	err = skb_to_sgvec(skb, sg, 0, len);
+	if (unlikely(err < 0))
+		goto out;
 	skcipher_request_set_crypt(req, sg, sg, len, iv.x);
 	crypto_skcipher_encrypt(req);
 
@@ -325,7 +327,7 @@
 	struct sk_buff *trailer;
 	u32 data_size, buf;
 	u16 check;
-	int nsg;
+	int nsg, ret;
 
 	_enter("");
 
@@ -342,7 +344,9 @@
 		goto nomem;
 
 	sg_init_table(sg, nsg);
-	skb_to_sgvec(skb, sg, offset, 8);
+	ret = skb_to_sgvec(skb, sg, offset, 8);
+	if (unlikely(ret < 0))
+		return ret;
 
 	/* start the decryption afresh */
 	memset(&iv, 0, sizeof(iv));
@@ -405,7 +409,7 @@
 	struct sk_buff *trailer;
 	u32 data_size, buf;
 	u16 check;
-	int nsg;
+	int nsg, ret;
 
 	_enter(",{%d}", skb->len);
 
@@ -429,7 +433,12 @@
 	}
 
 	sg_init_table(sg, nsg);
-	skb_to_sgvec(skb, sg, offset, len);
+	ret = skb_to_sgvec(skb, sg, offset, len);
+	if (unlikely(ret < 0)) {
+		if (sg != _sg)
+			kfree(sg);
+		return ret;
+	}
 
 	/* decrypt from the session key */
 	token = call->conn->params.key->payload.data[0];
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index b214a4d..1de27c3 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -78,7 +78,9 @@
 	spin_lock_bh(&call->lock);
 
 	if (call->state < RXRPC_CALL_COMPLETE) {
-		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
+		call->rxtx_annotations[ix] =
+			(call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
+			RXRPC_TX_ANNO_RETRANS;
 		if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
 			rxrpc_queue_call(call);
 	}
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f311732..67adb4e 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -95,8 +95,10 @@
 				continue;
 
 			nest = nla_nest_start(skb, n_i);
-			if (nest == NULL)
+			if (nest == NULL) {
+				index--;
 				goto nla_put_failure;
+			}
 			err = tcf_action_dump_1(skb, p, 0, 0);
 			if (err < 0) {
 				index--;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1d39600..40496f3 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -245,10 +245,14 @@
 
 static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
 {
-	if (cfg->is_ebpf)
-		bpf_prog_put(cfg->filter);
-	else
-		bpf_prog_destroy(cfg->filter);
+	struct bpf_prog *filter = cfg->filter;
+
+	if (filter) {
+		if (cfg->is_ebpf)
+			bpf_prog_put(filter);
+		else
+			bpf_prog_destroy(filter);
+	}
 
 	kfree(cfg->bpf_ops);
 	kfree(cfg->bpf_name);
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 95c463c..235db2c 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -634,7 +634,7 @@
 		}
 	}
 
-	return 0;
+	return -ENOENT;
 }
 
 struct ifeheadr {
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index f85313d..bd8e86c 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -192,7 +192,8 @@
 	struct tcf_skbmod_params  *p;
 
 	p = rcu_dereference_protected(d->skbmod_p, 1);
-	kfree_rcu(p, rcu);
+	if (p)
+		kfree_rcu(p, rcu);
 }
 
 static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index b6e3abe..901fb8b 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -196,11 +196,12 @@
 	struct tcf_tunnel_key_params *params;
 
 	params = rcu_dereference_protected(t->params, 1);
+	if (params) {
+		if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
+			dst_release(&params->tcft_enc_metadata->dst);
 
-	if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
-		dst_release(&params->tcft_enc_metadata->dst);
-
-	kfree_rcu(params, rcu);
+		kfree_rcu(params, rcu);
+	}
 }
 
 static int tunnel_key_dump_addresses(struct sk_buff *skb,
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 18e7524..b57b4de 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -128,6 +128,28 @@
 	return f->next == &detached;
 }
 
+static bool fq_flow_is_throttled(const struct fq_flow *f)
+{
+	return f->next == &throttled;
+}
+
+static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+{
+	if (head->first)
+		head->last->next = flow;
+	else
+		head->first = flow;
+	head->last = flow;
+	flow->next = NULL;
+}
+
+static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
+{
+	rb_erase(&f->rate_node, &q->delayed);
+	q->throttled_flows--;
+	fq_flow_add_tail(&q->old_flows, f);
+}
+
 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
 {
 	struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -155,15 +177,6 @@
 
 static struct kmem_cache *fq_flow_cachep __read_mostly;
 
-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
-{
-	if (head->first)
-		head->last->next = flow;
-	else
-		head->first = flow;
-	head->last = flow;
-	flow->next = NULL;
-}
 
 /* limit number of collected flows per round */
 #define FQ_GC_MAX 8
@@ -267,6 +280,8 @@
 				     f->socket_hash != sk->sk_hash)) {
 				f->credit = q->initial_quantum;
 				f->socket_hash = sk->sk_hash;
+				if (fq_flow_is_throttled(f))
+					fq_flow_unset_throttled(q, f);
 				f->time_next_packet = 0ULL;
 			}
 			return f;
@@ -430,9 +445,7 @@
 			q->time_next_delayed_flow = f->time_next_packet;
 			break;
 		}
-		rb_erase(p, &q->delayed);
-		q->throttled_flows--;
-		fq_flow_add_tail(&q->old_flows, f);
+		fq_flow_unset_throttled(q, f);
 	}
 }
 
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f10d339..738c55e 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1006,9 +1006,10 @@
 	struct sctp_endpoint *ep;
 	struct sctp_chunk *chunk;
 	struct sctp_inq *inqueue;
-	int state;
 	sctp_subtype_t subtype;
+	int first_time = 1;	/* is this the first time through the loop */
 	int error = 0;
+	int state;
 
 	/* The association should be held so we should be safe. */
 	ep = asoc->ep;
@@ -1019,6 +1020,30 @@
 		state = asoc->state;
 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
 
+		/* If the first chunk in the packet is AUTH, do special
+		 * processing specified in Section 6.3 of SCTP-AUTH spec
+		 */
+		if (first_time && subtype.chunk == SCTP_CID_AUTH) {
+			struct sctp_chunkhdr *next_hdr;
+
+			next_hdr = sctp_inq_peek(inqueue);
+			if (!next_hdr)
+				goto normal;
+
+			/* If the next chunk is COOKIE-ECHO, skip the AUTH
+			 * chunk while saving a pointer to it so we can do
+			 * Authentication later (during cookie-echo
+			 * processing).
+			 */
+			if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
+				chunk->auth_chunk = skb_clone(chunk->skb,
+							      GFP_ATOMIC);
+				chunk->auth = 1;
+				continue;
+			}
+		}
+
+normal:
 		/* SCTP-AUTH, Section 6.3:
 		 *    The receiver has a list of chunk types which it expects
 		 *    to be received only after an AUTH-chunk.  This list has
@@ -1057,6 +1082,9 @@
 		/* If there is an error on chunk, discard this packet. */
 		if (error && chunk)
 			chunk->pdiscard = 1;
+
+		if (first_time)
+			first_time = 0;
 	}
 	sctp_association_put(asoc);
 }
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index f731de3..e06083c 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -217,7 +217,7 @@
 	skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
 	chunk->subh.v = NULL; /* Subheader is no longer valid.  */
 
-	if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
+	if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <=
 	    skb_tail_pointer(chunk->skb)) {
 		/* This is not a singleton */
 		chunk->singleton = 0;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 11f69d4..f4d5efb 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -521,44 +521,47 @@
 	addr->v6.sin6_scope_id = 0;
 }
 
+static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
+			      const union sctp_addr *addr2)
+{
+	if (addr1->sa.sa_family != addr2->sa.sa_family) {
+		if (addr1->sa.sa_family == AF_INET &&
+		    addr2->sa.sa_family == AF_INET6 &&
+		    ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
+		    addr2->v6.sin6_addr.s6_addr32[3] ==
+		    addr1->v4.sin_addr.s_addr)
+			return 1;
+
+		if (addr2->sa.sa_family == AF_INET &&
+		    addr1->sa.sa_family == AF_INET6 &&
+		    ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
+		    addr1->v6.sin6_addr.s6_addr32[3] ==
+		    addr2->v4.sin_addr.s_addr)
+			return 1;
+
+		return 0;
+	}
+
+	if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
+		return 0;
+
+	/* If this is a linklocal address, compare the scope_id. */
+	if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+	    addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+	    addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
+		return 0;
+
+	return 1;
+}
+
 /* Compare addresses exactly.
  * v4-mapped-v6 is also in consideration.
  */
 static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
 			    const union sctp_addr *addr2)
 {
-	if (addr1->sa.sa_family != addr2->sa.sa_family) {
-		if (addr1->sa.sa_family == AF_INET &&
-		    addr2->sa.sa_family == AF_INET6 &&
-		    ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
-			if (addr2->v6.sin6_port == addr1->v4.sin_port &&
-			    addr2->v6.sin6_addr.s6_addr32[3] ==
-			    addr1->v4.sin_addr.s_addr)
-				return 1;
-		}
-		if (addr2->sa.sa_family == AF_INET &&
-		    addr1->sa.sa_family == AF_INET6 &&
-		    ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
-			if (addr1->v6.sin6_port == addr2->v4.sin_port &&
-			    addr1->v6.sin6_addr.s6_addr32[3] ==
-			    addr2->v4.sin_addr.s_addr)
-				return 1;
-		}
-		return 0;
-	}
-	if (addr1->v6.sin6_port != addr2->v6.sin6_port)
-		return 0;
-	if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
-		return 0;
-	/* If this is a linklocal address, compare the scope_id. */
-	if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
-		if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
-		    (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
-			return 0;
-		}
-	}
-
-	return 1;
+	return __sctp_v6_cmp_addr(addr1, addr2) &&
+	       addr1->v6.sin6_port == addr2->v6.sin6_port;
 }
 
 /* Initialize addr struct to INADDR_ANY. */
@@ -727,8 +730,10 @@
 			sctp_v6_map_v4(addr);
 	}
 
-	if (addr->sa.sa_family == AF_INET)
+	if (addr->sa.sa_family == AF_INET) {
+		memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
 		return sizeof(struct sockaddr_in);
+	}
 	return sizeof(struct sockaddr_in6);
 }
 
@@ -842,8 +847,8 @@
 			       const union sctp_addr *addr2,
 			       struct sctp_sock *opt)
 {
-	struct sctp_af *af1, *af2;
 	struct sock *sk = sctp_opt2sk(opt);
+	struct sctp_af *af1, *af2;
 
 	af1 = sctp_get_af_specific(addr1->sa.sa_family);
 	af2 = sctp_get_af_specific(addr2->sa.sa_family);
@@ -859,10 +864,10 @@
 	if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
 		return 1;
 
-	if (addr1->sa.sa_family != addr2->sa.sa_family)
-		return 0;
+	if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
+		return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
 
-	return af1->cmp_addr(addr1, addr2);
+	return __sctp_v6_cmp_addr(addr1, addr2);
 }
 
 /* Verify that the provided sockaddr looks bindable.   Common verification,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8ec20a6..bfd0686 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -144,10 +144,8 @@
 				     void *arg,
 				     sctp_cmd_seq_t *commands);
 
-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
-				    const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(
 				    const struct sctp_association *asoc,
-				    const sctp_subtype_t type,
 				    struct sctp_chunk *chunk);
 
 static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
@@ -615,6 +613,38 @@
 	return SCTP_DISPOSITION_CONSUME;
 }
 
+static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
+				   const struct sctp_association *asoc)
+{
+	struct sctp_chunk auth;
+
+	if (!chunk->auth_chunk)
+		return true;
+
+	/* SCTP-AUTH:  auth_chunk pointer is only set when the cookie-echo
+	 * is supposed to be authenticated and we have to do delayed
+	 * authentication.  We've just recreated the association using
+	 * the information in the cookie and now it's much easier to
+	 * do the authentication.
+	 */
+
+	/* Make sure that we and the peer are AUTH capable */
+	if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
+		return false;
+
+	/* set-up our fake chunk so that we can process it */
+	auth.skb = chunk->auth_chunk;
+	auth.asoc = chunk->asoc;
+	auth.sctp_hdr = chunk->sctp_hdr;
+	auth.chunk_hdr = (struct sctp_chunkhdr *)
+				skb_push(chunk->auth_chunk,
+					 sizeof(struct sctp_chunkhdr));
+	skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
+	auth.transport = chunk->transport;
+
+	return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
+}
+
 /*
  * Respond to a normal COOKIE ECHO chunk.
  * We are the side that is being asked for an association.
@@ -751,36 +781,9 @@
 	if (error)
 		goto nomem_init;
 
-	/* SCTP-AUTH:  auth_chunk pointer is only set when the cookie-echo
-	 * is supposed to be authenticated and we have to do delayed
-	 * authentication.  We've just recreated the association using
-	 * the information in the cookie and now it's much easier to
-	 * do the authentication.
-	 */
-	if (chunk->auth_chunk) {
-		struct sctp_chunk auth;
-		sctp_ierror_t ret;
-
-		/* Make sure that we and the peer are AUTH capable */
-		if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
-			sctp_association_free(new_asoc);
-			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-		}
-
-		/* set-up our fake chunk so that we can process it */
-		auth.skb = chunk->auth_chunk;
-		auth.asoc = chunk->asoc;
-		auth.sctp_hdr = chunk->sctp_hdr;
-		auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
-					    sizeof(sctp_chunkhdr_t));
-		skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
-		auth.transport = chunk->transport;
-
-		ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
-		if (ret != SCTP_IERROR_NO_ERROR) {
-			sctp_association_free(new_asoc);
-			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-		}
+	if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
+		sctp_association_free(new_asoc);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
 	repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1717,13 +1720,15 @@
 			       GFP_ATOMIC))
 		goto nomem;
 
+	if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+		return SCTP_DISPOSITION_DISCARD;
+
 	/* Make sure no new addresses are being added during the
 	 * restart.  Though this is a pretty complicated attack
 	 * since you'd have to get inside the cookie.
 	 */
-	if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
+	if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
 		return SCTP_DISPOSITION_CONSUME;
-	}
 
 	/* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
 	 * the peer has restarted (Action A), it MUST NOT setup a new
@@ -1828,6 +1833,9 @@
 			       GFP_ATOMIC))
 		goto nomem;
 
+	if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+		return SCTP_DISPOSITION_DISCARD;
+
 	/* Update the content of current association.  */
 	sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@@ -1920,6 +1928,9 @@
 	 * a COOKIE ACK.
 	 */
 
+	if (!sctp_auth_chunk_verify(net, chunk, asoc))
+		return SCTP_DISPOSITION_DISCARD;
+
 	/* Don't accidentally move back into established state. */
 	if (asoc->state < SCTP_STATE_ESTABLISHED) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -1959,7 +1970,7 @@
 		}
 	}
 
-	repl = sctp_make_cookie_ack(new_asoc, chunk);
+	repl = sctp_make_cookie_ack(asoc, chunk);
 	if (!repl)
 		goto nomem;
 
@@ -3981,10 +3992,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
-				    const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(
 				    const struct sctp_association *asoc,
-				    const sctp_subtype_t type,
 				    struct sctp_chunk *chunk)
 {
 	struct sctp_authhdr *auth_hdr;
@@ -4083,7 +4092,7 @@
 						  commands);
 
 	auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
-	error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
+	error = sctp_sf_authenticate(asoc, chunk);
 	switch (error) {
 	case SCTP_IERROR_AUTH_BAD_HMAC:
 		/* Generate the ERROR chunk and discard the rest
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 8cdd6bb..78f3805 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -335,11 +335,14 @@
 	if (!opt->pf->af_supported(addr->sa.sa_family, opt))
 		return NULL;
 
-	/* V4 mapped address are really of AF_INET family */
-	if (addr->sa.sa_family == AF_INET6 &&
-	    ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
-	    !opt->pf->af_supported(AF_INET, opt))
-		return NULL;
+	if (addr->sa.sa_family == AF_INET6) {
+		if (len < SIN6_LEN_RFC2133)
+			return NULL;
+		/* V4 mapped address are really of AF_INET family */
+		if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+		    !opt->pf->af_supported(AF_INET, opt))
+			return NULL;
+	}
 
 	/* If we get this far, af is valid. */
 	af = sctp_get_af_specific(addr->sa.sa_family);
@@ -1519,7 +1522,7 @@
 
 	pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
 
-	lock_sock(sk);
+	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 	sk->sk_shutdown = SHUTDOWN_MASK;
 	sk->sk_state = SCTP_SS_CLOSING;
 
@@ -1569,7 +1572,7 @@
 	 * held and that should be grabbed before socket lock.
 	 */
 	spin_lock_bh(&net->sctp.addr_wq_lock);
-	bh_lock_sock(sk);
+	bh_lock_sock_nested(sk);
 
 	/* Hold the sock, since sk_common_release() will put sock_put()
 	 * and we have just a little more cleanup.
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index bea0005..6825e05 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -723,7 +723,6 @@
 	return event;
 
 fail_mark:
-	sctp_chunk_put(chunk);
 	kfree_skb(skb);
 fail:
 	return NULL;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index b5c279b..bbee334 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -59,7 +59,7 @@
 	strp->rx_stopped = 1;
 
 	/* Report an error on the lower socket */
-	csk->sk_err = err;
+	csk->sk_err = -err;
 	csk->sk_error_report(csk);
 }
 
@@ -285,9 +285,9 @@
 					strp_start_rx_timer(strp);
 				}
 
+				rxm->accum_len += cand_len;
 				strp->rx_need_bytes = rxm->strp.full_len -
 						       rxm->accum_len;
-				rxm->accum_len += cand_len;
 				rxm->early_eaten = cand_len;
 				STRP_STATS_ADD(strp->stats.rx_bytes, cand_len);
 				desc->count = 0; /* Stop reading socket */
@@ -310,6 +310,7 @@
 		/* Hurray, we have a new message! */
 		del_timer(&strp->rx_msg_timer);
 		strp->rx_skb_head = NULL;
+		strp->rx_need_bytes = 0;
 		STRP_STATS_INCR(strp->stats.rx_msgs);
 
 		/* Give skb to upper layer */
@@ -374,9 +375,7 @@
 		return;
 
 	if (strp->rx_need_bytes) {
-		if (strp_peek_len(strp) >= strp->rx_need_bytes)
-			strp->rx_need_bytes = 0;
-		else
+		if (strp_peek_len(strp) < strp->rx_need_bytes)
 			return;
 	}
 
@@ -422,7 +421,7 @@
 	/* Message assembly timed out */
 	STRP_STATS_INCR(strp->stats.rx_msg_timeouts);
 	lock_sock(strp->sk);
-	strp->cb.abort_parser(strp, ETIMEDOUT);
+	strp->cb.abort_parser(strp, -ETIMEDOUT);
 	release_sock(strp->sk);
 }
 
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 79aec90..4afd414 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -237,9 +237,6 @@
 
 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
 
-	err = crypto_ahash_init(req);
-	if (err)
-		goto out;
 	err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
 	if (err)
 		goto out;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 61a504f..34f9405 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1375,6 +1375,7 @@
 	struct dentry *clnt_dir = pipe_dentry->d_parent;
 	struct dentry *gssd_dir = clnt_dir->d_parent;
 
+	dget(pipe_dentry);
 	__rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
 	__rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
 	__rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index d24d14e..1bf9153 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2384,7 +2384,12 @@
 	case -EHOSTUNREACH:
 	case -EADDRINUSE:
 	case -ENOBUFS:
-		/* retry with existing socket, after a delay */
+		/*
+		 * xs_tcp_force_close() wakes tasks with -EIO.
+		 * We need to wake them first to ensure the
+		 * correct error code.
+		 */
+		xprt_wake_pending_tasks(xprt, status);
 		xs_tcp_force_close(xprt);
 		goto out;
 	}
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 3200059..9ba3c46 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -79,7 +79,8 @@
 
 const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
 	[TIPC_NLA_NET_UNSPEC]		= { .type = NLA_UNSPEC },
-	[TIPC_NLA_NET_ID]		= { .type = NLA_U32 }
+	[TIPC_NLA_NET_ID]		= { .type = NLA_U32 },
+	[TIPC_NLA_NET_ADDR]		= { .type = NLA_U32 },
 };
 
 const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3181b07..c88874f 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -95,6 +95,9 @@
 
 	ASSERT_RTNL();
 
+	if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN)
+		return -EINVAL;
+
 	/* prohibit calling the thing phy%d when %d is not its number */
 	sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
 	if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) {
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 30cc249..f900f5c 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -37,10 +37,10 @@
 
 country AL: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
-	(5150 - 5250 @ 80), (23), AUTO-BW
-	(5250 - 5350 @ 80), (23), DFS, AUTO-BW
-	(5470 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country AM: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -51,8 +51,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country AR:
 	(2402 - 2482 @ 40), (36)
@@ -75,8 +75,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -105,8 +105,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country BB: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -122,8 +122,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -138,8 +138,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -228,8 +228,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -282,8 +282,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -293,8 +293,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -308,8 +308,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -317,8 +317,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -353,8 +353,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -367,8 +367,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -382,8 +382,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -398,8 +398,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -407,8 +407,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -428,8 +428,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country GH: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -460,8 +460,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -505,8 +505,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -521,8 +521,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -535,8 +535,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -562,8 +562,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -571,8 +571,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -662,8 +662,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -684,8 +684,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -693,8 +693,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -702,8 +702,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -718,22 +718,22 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country MD: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country ME: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country MF: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -752,8 +752,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country MN: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -780,8 +780,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country MR: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -793,17 +793,17 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
-country MU: DFS-FCC
+country MU: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
-	(5170 - 5250 @ 80), (24), AUTO-BW
-	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
-	(5490 - 5730 @ 160), (24), DFS
-	(5735 - 5835 @ 80), (30)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country MV: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -860,8 +860,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -869,8 +869,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -911,8 +911,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country PG: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -938,8 +938,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -947,8 +947,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country PR: DFS-FCC
 	(2402 - 2472 @ 40), (30)
@@ -968,8 +968,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1006,8 +1006,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1017,8 +1017,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
         (5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1047,8 +1047,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1065,8 +1065,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1074,8 +1074,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1134,8 +1134,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country TT:
 	(2402 - 2482 @ 40), (20)
@@ -1209,8 +1209,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country VE: DFS-FCC
 	(2402 - 2482 @ 40), (20)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 76e308f..f00e7d3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4133,7 +4133,7 @@
 	struct nlattr *rate;
 	u32 bitrate;
 	u16 bitrate_compat;
-	enum nl80211_attrs rate_flg;
+	enum nl80211_rate_info rate_flg;
 
 	rate = nla_nest_start(msg, attr);
 	if (!rate)
@@ -14868,7 +14868,8 @@
 	if (!ft_event->target_ap)
 		return;
 
-	msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
+	msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len,
+			GFP_KERNEL);
 	if (!msg)
 		return;
 
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index bc0ebd4..9195f23 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2142,6 +2142,21 @@
 	reg_free_request(reg_request);
 }
 
+static void notify_self_managed_wiphys(struct regulatory_request *request)
+{
+	struct cfg80211_registered_device *rdev;
+	struct wiphy *wiphy;
+
+	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+		wiphy = &rdev->wiphy;
+		if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED &&
+		    request->initiator == NL80211_REGDOM_SET_BY_USER &&
+		    request->user_reg_hint_type ==
+				NL80211_USER_REG_HINT_CELL_BASE)
+			reg_call_notifier(wiphy, request);
+	}
+}
+
 static bool reg_only_self_managed_wiphys(void)
 {
 	struct cfg80211_registered_device *rdev;
@@ -2193,6 +2208,7 @@
 
 	spin_unlock(&reg_requests_lock);
 
+	notify_self_managed_wiphys(reg_request);
 	if (reg_only_self_managed_wiphys()) {
 		reg_free_request(reg_request);
 		return;
@@ -3073,17 +3089,26 @@
 
 void wiphy_regulatory_register(struct wiphy *wiphy)
 {
-	struct regulatory_request *lr;
+	struct regulatory_request *lr = get_last_request();
 
-	/* self-managed devices ignore external hints */
-	if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
+	/* self-managed devices ignore beacon hints and country IE */
+	if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
 		wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS |
 					   REGULATORY_COUNTRY_IE_IGNORE;
 
+		/*
+		 * The last request may have been received before this
+		 * registration call. Call the driver notifier if
+		 * initiator is USER and user type is CELL_BASE.
+		 */
+		if (lr->initiator == NL80211_REGDOM_SET_BY_USER &&
+		    lr->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE)
+			reg_call_notifier(wiphy, lr);
+	}
+
 	if (!reg_dev_ignore_cell_hint(wiphy))
 		reg_num_devs_support_basehint++;
 
-	lr = get_last_request();
 	wiphy_update_regulatory(wiphy, lr->initiator);
 }
 
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 29c5661..13ff407 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -549,7 +549,7 @@
 			    int offset, int len)
 {
 	struct skb_shared_info *sh = skb_shinfo(skb);
-	const skb_frag_t *frag = &sh->frags[-1];
+	const skb_frag_t *frag = &sh->frags[0];
 	struct page *frag_page;
 	void *frag_ptr;
 	int frag_len, frag_size;
@@ -562,10 +562,10 @@
 
 	while (offset >= frag_size) {
 		offset -= frag_size;
-		frag++;
 		frag_page = skb_frag_page(frag);
 		frag_ptr = skb_frag_address(frag);
 		frag_size = skb_frag_size(frag);
+		frag++;
 	}
 
 	frag_ptr += offset;
@@ -577,12 +577,12 @@
 	len -= cur_len;
 
 	while (len > 0) {
-		frag++;
 		frag_len = skb_frag_size(frag);
 		cur_len = min(len, frag_len);
 		__frame_add_frag(frame, skb_frag_page(frag),
 				 skb_frag_address(frag), cur_len, frag_len);
 		len -= cur_len;
+		frag++;
 	}
 }
 
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index f83b74d..0077216 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1790,32 +1790,40 @@
 
 static int __init x25_init(void)
 {
-	int rc = proto_register(&x25_proto, 0);
+	int rc;
 
-	if (rc != 0)
+	rc = proto_register(&x25_proto, 0);
+	if (rc)
 		goto out;
 
 	rc = sock_register(&x25_family_ops);
-	if (rc != 0)
+	if (rc)
 		goto out_proto;
 
 	dev_add_pack(&x25_packet_type);
 
 	rc = register_netdevice_notifier(&x25_dev_notifier);
-	if (rc != 0)
+	if (rc)
 		goto out_sock;
 
+	rc = x25_register_sysctl();
+	if (rc)
+		goto out_dev;
+
+	rc = x25_proc_init();
+	if (rc)
+		goto out_sysctl;
+
 	pr_info("Linux Version 0.2\n");
 
-	x25_register_sysctl();
-	rc = x25_proc_init();
-	if (rc != 0)
-		goto out_dev;
 out:
 	return rc;
+out_sysctl:
+	x25_unregister_sysctl();
 out_dev:
 	unregister_netdevice_notifier(&x25_dev_notifier);
 out_sock:
+	dev_remove_pack(&x25_packet_type);
 	sock_unregister(AF_X25);
 out_proto:
 	proto_unregister(&x25_proto);
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index 4323952..703d46a 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,9 +73,12 @@
 	{ 0, },
 };
 
-void __init x25_register_sysctl(void)
+int __init x25_register_sysctl(void)
 {
 	x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
+	if (!x25_table_header)
+		return -ENOMEM;
+	return 0;
 }
 
 void x25_unregister_sysctl(void)
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index ccfdc71..a00ec71 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -283,7 +283,7 @@
 		struct crypto_comp *tfm;
 
 		/* This can be any valid CPU ID so we don't need locking. */
-		tfm = __this_cpu_read(*pos->tfms);
+		tfm = this_cpu_read(*pos->tfms);
 
 		if (!strcmp(crypto_comp_name(tfm), alg_name)) {
 			pos->users++;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 74f2e8f..3fbe584 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1197,6 +1197,7 @@
 
 	if (orig->aead) {
 		x->aead = xfrm_algo_aead_clone(orig->aead);
+		x->geniv = orig->geniv;
 		if (!x->aead)
 			goto error;
 	}
@@ -1246,6 +1247,8 @@
 	x->curlft.add_time = orig->curlft.add_time;
 	x->km.state = orig->km.state;
 	x->km.seq = orig->km.seq;
+	x->replay = orig->replay;
+	x->preplay = orig->preplay;
 
 	return x;
 
@@ -1883,6 +1886,11 @@
 	struct xfrm_mgr *km;
 	struct xfrm_policy *pol = NULL;
 
+#ifdef CONFIG_COMPAT
+	if (in_compat_syscall())
+		return -EOPNOTSUPP;
+#endif
+
 	if (!optval && !optlen) {
 		xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
 		xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 3b3455a..773c66b 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -121,22 +121,17 @@
 	struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
 	struct xfrm_replay_state_esn *rs;
 
-	if (p->flags & XFRM_STATE_ESN) {
-		if (!rt)
-			return -EINVAL;
-
-		rs = nla_data(rt);
-
-		if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
-			return -EINVAL;
-
-		if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
-		    nla_len(rt) != sizeof(*rs))
-			return -EINVAL;
-	}
-
 	if (!rt)
-		return 0;
+		return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
+
+	rs = nla_data(rt);
+
+	if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
+		return -EINVAL;
+
+	if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
+	    nla_len(rt) != sizeof(*rs))
+		return -EINVAL;
 
 	/* As only ESP and AH support ESN feature. */
 	if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
@@ -1729,10 +1724,6 @@
 	struct sk_buff *skb;
 	int err;
 
-	err = verify_policy_dir(dir);
-	if (err)
-		return ERR_PTR(err);
-
 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
@@ -2254,10 +2245,6 @@
 	int n = 0;
 	struct net *net = sock_net(skb->sk);
 
-	err = verify_policy_dir(pi->dir);
-	if (err)
-		return err;
-
 	if (attrs[XFRMA_MIGRATE] == NULL)
 		return -EINVAL;
 
@@ -2373,11 +2360,6 @@
 {
 	struct net *net = &init_net;
 	struct sk_buff *skb;
-	int err;
-
-	err = verify_policy_dir(dir);
-	if (err)
-		return err;
 
 	skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
 	if (skb == NULL)
@@ -3038,11 +3020,6 @@
 
 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
-	int err;
-
-	err = verify_policy_dir(dir);
-	if (err)
-		return err;
 
 	switch (c->event) {
 	case XFRM_MSG_NEWPOLICY:
diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh
index 8dc1918..564db35 100755
--- a/scripts/adjust_autoksyms.sh
+++ b/scripts/adjust_autoksyms.sh
@@ -83,6 +83,13 @@
 	depfile="include/config/ksym/${sympath}.h"
 	mkdir -p "$(dirname "$depfile")"
 	touch "$depfile"
+	# Filesystems with coarse time precision may create timestamps
+	# equal to the one from a file that was very recently built and that
+	# needs to be rebuild. Let's guard against that by making sure our
+	# dep files are always newer than the first file we created here.
+	while [ ! "$depfile" -nt "$new_ksyms_file" ]; do
+		touch "$depfile"
+	done
 	echo $((count += 1))
 done | tail -1 )
 changed=${changed:-0}
diff --git a/scripts/build-all.py b/scripts/build-all.py
index bd468cd..4a60ebc 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -59,12 +59,8 @@
 
 def check_kernel():
     """Ensure that PWD is a kernel directory"""
-    have_defconfig = any([
-        os.path.isfile('arch/arm64/configs/msm_defconfig'),
-        os.path.isfile('arch/arm64/configs/sdm845_defconfig')])
-
-    if not all([os.path.isfile('MAINTAINERS'), have_defconfig]):
-        fail("This doesn't seem to be an MSM kernel dir")
+    if not os.path.isfile('MAINTAINERS'):
+        fail("This doesn't seem to be a kernel dir")
 
 def check_build():
     """Ensure that the build directory is present."""
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index cdfa754..0534378 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3012,6 +3012,10 @@
 			} elsif ($line =~ /^\+.*\bEFI_GUID\s*\(/) {
 				$msg_type = "";
 
+			# Long copyright statements are another special case
+			} elsif ($rawline =~ /^\+.\*.*copyright.*\(c\).*$/i) {
+				$msg_type = "";
+
 			# Otherwise set the alternate message types
 
 			# a comment starts before $max_line_length
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index cbf4996..ed29bad 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -113,7 +113,7 @@
 		break;
 	case E_NOT:
 		expr_free(e->left.expr);
-		return;
+		break;
 	case E_EQUAL:
 	case E_GEQ:
 	case E_GTH:
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index aed678e..4a61636 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -364,6 +364,7 @@
 			menu->parent = parent;
 			last_menu = menu;
 		}
+		expr_free(basedep);
 		if (last_menu) {
 			parent->list = parent->next;
 			parent->next = last_menu->next;
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 71bf8bf..5122ed2 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -107,7 +107,27 @@
 %%
 input: nl start | start;
 
-start: mainmenu_stmt stmt_list | stmt_list;
+start: mainmenu_stmt stmt_list | no_mainmenu_stmt stmt_list;
+
+/* mainmenu entry */
+
+mainmenu_stmt: T_MAINMENU prompt nl
+{
+	menu_add_prompt(P_MENU, $2, NULL);
+};
+
+/* Default main menu, if there's no mainmenu entry */
+
+no_mainmenu_stmt: /* empty */
+{
+	/*
+	 * Hack: Keep the main menu title on the heap so we can safely free it
+	 * later regardless of whether it comes from the 'prompt' in
+	 * mainmenu_stmt or here
+	 */
+	menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL);
+};
+
 
 stmt_list:
 	  /* empty */
@@ -344,13 +364,6 @@
 	| if_block choice_stmt
 ;
 
-/* mainmenu entry */
-
-mainmenu_stmt: T_MAINMENU prompt nl
-{
-	menu_add_prompt(P_MENU, $2, NULL);
-};
-
 /* menu entry */
 
 menu: T_MENU prompt T_EOL
@@ -495,6 +508,7 @@
 
 void conf_parse(const char *name)
 {
+	const char *tmp;
 	struct symbol *sym;
 	int i;
 
@@ -502,7 +516,6 @@
 
 	sym_init();
 	_menu_init();
-	rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
 
 	if (getenv("ZCONF_DEBUG"))
 		zconfdebug = 1;
@@ -512,8 +525,10 @@
 	if (!modules_sym)
 		modules_sym = sym_find( "n" );
 
+	tmp = rootmenu.prompt->text;
 	rootmenu.prompt->text = _(rootmenu.prompt->text);
 	rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);
+	free((char*)tmp);
 
 	menu_finalize(&rootmenu);
 	for_all_symbols(i, sym) {
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 3c575cd0..0a2a737 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -325,7 +325,7 @@
 
 # Build kernel header package
 (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
-(cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
+(cd $srctree; find arch/*/include include scripts -type f -o -type l) >> "$objtree/debian/hdrsrcfiles"
 (cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
 (cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
 if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
diff --git a/scripts/tags.sh b/scripts/tags.sh
index a2ff338..2a61db3 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -106,6 +106,7 @@
 		case "$i" in
 			*.[cS])
 				j=${i/\.[cS]/\.o}
+				j="${j#$tree}"
 				if [ -e $j ]; then
 					echo $i
 				fi
diff --git a/security/Kconfig b/security/Kconfig
index 2e68fa4..638afc8 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -6,6 +6,11 @@
 
 source security/keys/Kconfig
 
+if ARCH_QCOM
+source security/pfe/Kconfig
+endif
+
+
 config SECURITY_DMESG_RESTRICT
 	bool "Restrict unprivileged access to the kernel syslog"
 	default n
diff --git a/security/Makefile b/security/Makefile
index f2d71cd..79166ba 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -9,6 +9,7 @@
 subdir-$(CONFIG_SECURITY_APPARMOR)	+= apparmor
 subdir-$(CONFIG_SECURITY_YAMA)		+= yama
 subdir-$(CONFIG_SECURITY_LOADPIN)	+= loadpin
+subdir-$(CONFIG_ARCH_QCOM)	+= pfe
 
 # always enable default capabilities
 obj-y					+= commoncap.o
@@ -24,6 +25,7 @@
 obj-$(CONFIG_SECURITY_APPARMOR)		+= apparmor/
 obj-$(CONFIG_SECURITY_YAMA)		+= yama/
 obj-$(CONFIG_SECURITY_LOADPIN)		+= loadpin/
+obj-$(CONFIG_ARCH_QCOM)				+= pfe/
 obj-$(CONFIG_CGROUP_DEVICE)		+= device_cgroup.o
 
 # Object integrity file lists
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 4304372..95433ac 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -18,6 +18,7 @@
 #include <linux/cred.h>
 #include <linux/key-type.h>
 #include <linux/digsig.h>
+#include <linux/vmalloc.h>
 #include <crypto/public_key.h>
 #include <keys/system_keyring.h>
 
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 38f2ed8..93f0917 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -78,6 +78,8 @@
 		       hash_algo_name[ima_hash_algo], rc);
 		return rc;
 	}
+	pr_info("Allocated hash algorithm: %s\n",
+		hash_algo_name[ima_hash_algo]);
 	return 0;
 }
 
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 2b3def1..a71f906 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -16,6 +16,9 @@
  *	implements the IMA hooks: ima_bprm_check, ima_file_mmap,
  *	and ima_file_check.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/file.h>
 #include <linux/binfmts.h>
@@ -426,6 +429,16 @@
 
 	hash_setup(CONFIG_IMA_DEFAULT_HASH);
 	error = ima_init();
+
+	if (error && strcmp(hash_algo_name[ima_hash_algo],
+			    CONFIG_IMA_DEFAULT_HASH) != 0) {
+		pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
+			hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
+		hash_setup_done = 0;
+		hash_setup(CONFIG_IMA_DEFAULT_HASH);
+		error = ima_init();
+	}
+
 	if (!error) {
 		ima_initialized = 1;
 		ima_update_policy_flag();
diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig
new file mode 100644
index 0000000..0cd9e81
--- /dev/null
+++ b/security/pfe/Kconfig
@@ -0,0 +1,28 @@
+menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
+	depends on ARCH_QCOM
+
+config PFT
+	bool "Per-File-Tagger driver"
+	depends on SECURITY
+	default n
+	help
+		This driver is used for tagging enterprise files.
+		It is part of the Per-File-Encryption (PFE) feature.
+		The driver is tagging files when created by
+		registered application.
+		Tagged files are encrypted using the dm-req-crypt driver.
+
+config PFK
+	bool "Per-File-Key driver"
+	depends on SECURITY
+	depends on SECURITY_SELINUX
+	default n
+	help
+		This driver is used for storing eCryptfs information
+		in file node.
+		This is part of eCryptfs hardware enhanced solution
+		provided by Qualcomm Technologies, Inc.
+		Information is used when file is encrypted later using
+		ICE or dm crypto engine
+
+endmenu
diff --git a/security/pfe/Makefile b/security/pfe/Makefile
new file mode 100644
index 0000000..4096aad
--- /dev/null
+++ b/security/pfe/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the MSM specific security device drivers.
+#
+
+ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
+#ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/crypto
+
+obj-$(CONFIG_PFT) += pft.o
+obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o pfk_f2fs.o
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
new file mode 100644
index 0000000..740da32
--- /dev/null
+++ b/security/pfe/pfk.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK).
+ *
+ * This driver is responsible for overall management of various
+ * Per File Encryption variants that work on top of or as part of different
+ * file systems.
+ *
+ * The driver has the following purpose :
+ * 1) Define priorities between PFE's if more than one is enabled
+ * 2) Extract key information from inode
+ * 3) Load and manage various keys in ICE HW engine
+ * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER
+ *    that need to take decision on HW encryption management of the data
+ *    Some examples:
+ *	BLOCK LAYER: when it takes decision on whether 2 chunks can be united
+ *	to one encryption / decryption request sent to the HW
+ *
+ *	UFS DRIVER: when it need to configure ICE HW with a particular key slot
+ *	to be used for encryption / decryption
+ *
+ * PFE variants can differ on particular way of storing the cryptographic info
+ * inside inode, actions to be taken upon file operations, etc., but the common
+ * properties are described above
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt)	"pfk [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/bio.h>
+#include <linux/security.h>
+#include <crypto/algapi.h>
+#include <crypto/ice.h>
+
+#include <linux/pfk.h>
+
+#include "pfk_kc.h"
+#include "objsec.h"
+#include "pfk_ice.h"
+#include "pfk_ext4.h"
+#include "pfk_f2fs.h"
+#include "pfk_internal.h"
+//#include "ext4.h"
+
+static bool pfk_ready;
+
+
+/* might be replaced by a table when more than one cipher is supported */
+#define PFK_SUPPORTED_KEY_SIZE 32
+#define PFK_SUPPORTED_SALT_SIZE 32
+
+/* Various PFE types and function tables to support each one of them */
+enum pfe_type {EXT4_CRYPT_PFE, F2FS_CRYPT_PFE, INVALID_PFE};
+
+typedef int (*pfk_parse_inode_type)(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe);
+
+typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+static const pfk_parse_inode_type pfk_parse_inode_ftable[] = {
+	/* EXT4_CRYPT_PFE */ &pfk_ext4_parse_inode,
+    /* F2FS_CRYPT_PFE */ &pfk_f2fs_parse_inode,
+};
+
+static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = {
+	/* EXT4_CRYPT_PFE */ &pfk_ext4_allow_merge_bio,
+    /* F2FS_CRYPT_PFE */ &pfk_f2fs_allow_merge_bio,
+};
+
+static void __exit pfk_exit(void)
+{
+	pfk_ready = false;
+	pfk_ext4_deinit();
+	pfk_f2fs_deinit();
+	pfk_kc_deinit();
+}
+
+static int __init pfk_init(void)
+{
+
+	int ret = 0;
+
+	ret = pfk_ext4_init();
+	if (ret != 0)
+		goto fail;
+
+	ret = pfk_f2fs_init();
+	if (ret != 0)
+		goto fail;
+
+	ret = pfk_kc_init();
+	if (ret != 0) {
+		pr_err("could init pfk key cache, error %d\n", ret);
+		pfk_ext4_deinit();
+		pfk_f2fs_deinit();
+		goto fail;
+	}
+
+	pfk_ready = true;
+	pr_info("Driver initialized successfully\n");
+
+	return 0;
+
+fail:
+	pr_err("Failed to init driver\n");
+	return -ENODEV;
+}
+
+/*
+ * If more than one type is supported simultaneously, this function will also
+ * set the priority between them
+ */
+static enum pfe_type pfk_get_pfe_type(const struct inode *inode)
+{
+	if (!inode)
+		return INVALID_PFE;
+
+	if (pfk_is_ext4_type(inode))
+		return EXT4_CRYPT_PFE;
+
+	if (pfk_is_f2fs_type(inode))
+		return F2FS_CRYPT_PFE;
+
+	return INVALID_PFE;
+}
+
+/**
+ * inode_to_filename() - get the filename from inode pointer.
+ * @inode: inode pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+char *inode_to_filename(const struct inode *inode)
+{
+	struct dentry *dentry = NULL;
+	char *filename = NULL;
+
+	if (!inode)
+		return "NULL";
+
+	if (hlist_empty(&inode->i_dentry))
+		return "unknown";
+
+	dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+	filename = dentry->d_iname;
+
+	return filename;
+}
+
+/**
+ * pfk_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_is_ready(void)
+{
+	return pfk_ready;
+}
+
+/**
+ * pfk_bio_get_inode() - get the inode from a bio.
+ * @bio: Pointer to BIO structure.
+ *
+ * Walk the bio struct links to get the inode.
+ * Please note, that in general bio may consist of several pages from
+ * several files, but in our case we always assume that all pages come
+ * from the same file, since our logic ensures it. That is why we only
+ * walk through the first page to look for inode.
+ *
+ * Return: pointer to the inode struct if successful, or NULL otherwise.
+ *
+ */
+static struct inode *pfk_bio_get_inode(const struct bio *bio)
+{
+	if (!bio)
+		return NULL;
+	if (!bio_has_data((struct bio *)bio))
+		return NULL;
+	if (!bio->bi_io_vec)
+		return NULL;
+	if (!bio->bi_io_vec->bv_page)
+		return NULL;
+
+	if (PageAnon(bio->bi_io_vec->bv_page)) {
+		struct inode *inode;
+
+		/* Using direct-io (O_DIRECT) without page cache */
+		inode = dio_bio_get_inode((struct bio *)bio);
+		pr_debug("inode on direct-io, inode = 0x%pK.\n", inode);
+
+		return inode;
+	}
+
+	if (!page_mapping(bio->bi_io_vec->bv_page))
+		return NULL;
+
+	if (!bio->bi_io_vec->bv_page->mapping->host)
+
+		return NULL;
+
+	return bio->bi_io_vec->bv_page->mapping->host;
+}
+
+/**
+ * pfk_key_size_to_key_type() - translate key size to key size enum
+ * @key_size: key size in bytes
+ * @key_size_type: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported key size)
+ */
+int pfk_key_size_to_key_type(size_t key_size,
+	enum ice_crpto_key_size *key_size_type)
+{
+	/*
+	 *  currently only 32 bit key size is supported
+	 *  in the future, table with supported key sizes might
+	 *  be introduced
+	 */
+
+	if (key_size != PFK_SUPPORTED_KEY_SIZE) {
+		pr_err("not supported key size %zu\n", key_size);
+		return -EINVAL;
+	}
+
+	if (key_size_type)
+		*key_size_type = ICE_CRYPTO_KEY_SIZE_256;
+
+	return 0;
+}
+
+/*
+ * Retrieves filesystem type from inode's superblock
+ */
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+	const char *fs_type)
+{
+	if (!inode || !fs_type)
+		return false;
+
+	if (!inode->i_sb)
+		return false;
+
+	if (!inode->i_sb->s_type)
+		return false;
+
+	return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+
+/**
+ * pfk_get_key_for_bio() - get the encryption key to be used for a bio
+ *
+ * @bio: pointer to the BIO
+ * @key_info: pointer to the key information which will be filled in
+ * @algo_mode: optional pointer to the algorithm identifier which will be set
+ * @is_pfe: will be set to false if the BIO should be left unencrypted
+ *
+ * Return: 0 if a key is being used, otherwise a -errno value
+ */
+static int pfk_get_key_for_bio(const struct bio *bio,
+		struct pfk_key_info *key_info,
+		enum ice_cryto_algo_mode *algo_mode,
+		bool *is_pfe)
+{
+	const struct inode *inode;
+	enum pfe_type which_pfe;
+	const struct blk_encryption_key *key;
+
+	inode = pfk_bio_get_inode(bio);
+	which_pfe = pfk_get_pfe_type(inode);
+
+	if (which_pfe != INVALID_PFE) {
+		/* Encrypted file; override ->bi_crypt_key */
+		pr_debug("parsing inode %lu with PFE type %d\n",
+			 inode->i_ino, which_pfe);
+		return (*(pfk_parse_inode_ftable[which_pfe]))
+				(bio, inode, key_info, algo_mode, is_pfe);
+	}
+
+	/*
+	 * bio is not for an encrypted file.  Use ->bi_crypt_key if it was set.
+	 * Otherwise, don't encrypt/decrypt the bio.
+	 */
+	key = bio->bi_crypt_key;
+	if (!key) {
+		*is_pfe = false;
+		return -EINVAL;
+	}
+
+	/* Note: the "salt" is really just the second half of the XTS key. */
+	BUILD_BUG_ON(sizeof(key->raw) !=
+		     PFK_SUPPORTED_KEY_SIZE + PFK_SUPPORTED_SALT_SIZE);
+	key_info->key = &key->raw[0];
+	key_info->key_size = PFK_SUPPORTED_KEY_SIZE;
+	key_info->salt = &key->raw[PFK_SUPPORTED_KEY_SIZE];
+	key_info->salt_size = PFK_SUPPORTED_SALT_SIZE;
+	if (algo_mode)
+		*algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+	return 0;
+}
+
+
+/**
+ * pfk_load_key_start() - loads PFE encryption key to the ICE
+ *			  Can also be invoked from non
+ *			  PFE context, in this case it
+ *			  is not relevant and is_pfe
+ *			  flag is set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @ice_setting: Pointer to ice setting structure that will be filled with
+ * ice configuration values, including the index to which the key was loaded
+ *  @is_pfe: will be false if inode is not relevant to PFE, in such a case
+ * it should be treated as non PFE by the block layer
+ *
+ * Returns the index where the key is stored in encryption hw and additional
+ * information that will be used later for configuration of the encryption hw.
+ *
+ * Must be followed by pfk_load_key_end when key is no longer used by ice
+ *
+ */
+int pfk_load_key_start(const struct bio *bio,
+		struct ice_crypto_setting *ice_setting, bool *is_pfe,
+		bool async)
+{
+	int ret = 0;
+	struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+	enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+	enum ice_crpto_key_size key_size_type = 0;
+	u32 key_index = 0;
+
+	if (!is_pfe) {
+		pr_err("is_pfe is NULL\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_is_ready())
+		return -ENODEV;
+
+	if (!ice_setting) {
+		pr_err("ice setting is NULL\n");
+		return -EINVAL;
+	}
+
+	ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe);
+
+	if (ret != 0)
+		return ret;
+
+	ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type);
+	if (ret != 0)
+		return ret;
+
+	ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
+			key_info.salt, key_info.salt_size, &key_index, async);
+	if (ret) {
+		if (ret != -EBUSY && ret != -EAGAIN)
+			pr_err("start: could not load key into pfk key cache, error %d\n",
+					ret);
+
+		return ret;
+	}
+
+	ice_setting->key_size = key_size_type;
+	ice_setting->algo_mode = algo_mode;
+	/* hardcoded for now */
+	ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+	ice_setting->key_index = key_index;
+
+	pr_debug("loaded key for file %s key_index %d\n",
+		inode_to_filename(pfk_bio_get_inode(bio)), key_index);
+
+	return 0;
+}
+
+/**
+ * pfk_load_key_end() - marks the PFE key as no longer used by ICE
+ *			Can also be invoked from non
+ *			PFE context, in this case it is not
+ *			relevant and is_pfe flag is
+ *			set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
+ *			from PFE context
+ */
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+	int ret = 0;
+	struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+
+	if (!is_pfe) {
+		pr_err("is_pfe is NULL\n");
+		return -EINVAL;
+	}
+
+	/* only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_is_ready())
+		return -ENODEV;
+
+	ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe);
+	if (ret != 0)
+		return ret;
+
+	pfk_kc_load_key_end(key_info.key, key_info.key_size,
+		key_info.salt, key_info.salt_size);
+
+	pr_debug("finished using key for file %s\n",
+		inode_to_filename(pfk_bio_get_inode(bio)));
+
+	return 0;
+}
+
+/**
+ * pfk_allow_merge_bio() - Check if 2 BIOs can be merged.
+ * @bio1:	Pointer to first BIO structure.
+ * @bio2:	Pointer to second BIO structure.
+ *
+ * Prevent merging of BIOs from encrypted and non-encrypted
+ * files, or files encrypted with different key.
+ * Also prevent non encrypted and encrypted data from the same file
+ * to be merged (ecryptfs header if stored inside file should be non
+ * encrypted)
+ * This API is called by the file system block layer.
+ *
+ * Return: true if the BIOs allowed to be merged, false
+ * otherwise.
+ */
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
+{
+	const struct blk_encryption_key *key1;
+	const struct blk_encryption_key *key2;
+	const struct inode *inode1;
+	const struct inode *inode2;
+	enum pfe_type which_pfe1;
+	enum pfe_type which_pfe2;
+
+	if (!pfk_is_ready())
+		return false;
+
+	if (!bio1 || !bio2)
+		return false;
+
+	if (bio1 == bio2)
+		return true;
+
+	key1 = bio1->bi_crypt_key;
+	key2 = bio2->bi_crypt_key;
+
+	inode1 = pfk_bio_get_inode(bio1);
+	inode2 = pfk_bio_get_inode(bio2);
+
+	which_pfe1 = pfk_get_pfe_type(inode1);
+	which_pfe2 = pfk_get_pfe_type(inode2);
+
+	/*
+	 * If one bio is for an encrypted file and the other is for a different
+	 * type of encrypted file or for blocks that are not part of an
+	 * encrypted file, do not merge.
+	 */
+	if (which_pfe1 != which_pfe2)
+		return false;
+
+	if (which_pfe1 != INVALID_PFE) {
+		/* Both bios are for the same type of encrypted file. */
+	return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2,
+		inode1, inode2);
+	}
+
+	/*
+	 * Neither bio is for an encrypted file.  Merge only if the default keys
+	 * are the same (or both are NULL).
+	 */
+	return key1 == key2 ||
+		(key1 && key2 &&
+		 !crypto_memneq(key1->raw, key2->raw, sizeof(key1->raw)));
+}
+
+/**
+ * Flush key table on storage core reset. During core reset key configuration
+ * is lost in ICE. We need to flash the cache, so that the keys will be
+ * reconfigured again for every subsequent transaction
+ */
+void pfk_clear_on_reset(void)
+{
+	if (!pfk_is_ready())
+		return;
+
+	pfk_kc_clear_on_reset();
+}
+
+module_init(pfk_init);
+module_exit(pfk_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key driver");
diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c
new file mode 100644
index 0000000..0eb1225
--- /dev/null
+++ b/security/pfe/pfk_ext4.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK) - EXT4
+ *
+ * This driver is used for working with EXT4 crypt extension
+ *
+ * The key information  is stored in node by EXT4 when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt)	"pfk_ext4 [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "fscrypt_ice.h"
+#include "pfk_ext4.h"
+//#include "ext4_ice.h"
+
+static bool pfk_ext4_ready;
+
+/*
+ * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_ext4_deinit(void)
+{
+	pfk_ext4_ready = false;
+}
+
+/*
+ * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_ext4_init(void)
+{
+	pfk_ext4_ready = true;
+	pr_info("PFK EXT4 inited successfully\n");
+
+	return 0;
+}
+
+/**
+ * pfk_ecryptfs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_ext4_is_ready(void)
+{
+	return pfk_ext4_ready;
+}
+
+/**
+ * pfk_ext4_dump_inode() - dumps all interesting info about inode to the screen
+ *
+ *
+ */
+/*
+ * static void pfk_ext4_dump_inode(const struct inode* inode)
+ * {
+ *	struct ext4_crypt_info *ci = ext4_encryption_info((struct inode*)inode);
+ *
+ *	pr_debug("dumping inode with address 0x%p\n", inode);
+ *	pr_debug("S_ISREG is %d\n", S_ISREG(inode->i_mode));
+ *	pr_debug("EXT4_INODE_ENCRYPT flag is %d\n",
+ *		ext4_test_inode_flag((struct inode*)inode, EXT4_INODE_ENCRYPT));
+ *	if (ci) {
+ *		pr_debug("crypt_info address 0x%p\n", ci);
+ *		pr_debug("ci->ci_data_mode %d\n", ci->ci_data_mode);
+ *	} else {
+ *		pr_debug("crypt_info is NULL\n");
+ *	}
+ * }
+ */
+
+/**
+ * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_ext4_type(const struct inode *inode)
+{
+	if (!pfe_is_inode_filesystem_type(inode, "ext4"))
+		return false;
+
+	return fscrypt_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_ext4_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_ext4_parse_cipher(const struct inode *inode,
+	enum ice_cryto_algo_mode *algo)
+{
+	/*
+	 * currently only AES XTS algo is supported
+	 * in the future, table with supported ciphers might
+	 * be introduced
+	 */
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!fscrypt_is_aes_xts_cipher(inode)) {
+		pr_err("ext4 alghoritm is not supported by pfk\n");
+		return -EINVAL;
+	}
+
+	if (algo)
+		*algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+	return 0;
+}
+
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe)
+{
+	int ret = 0;
+
+	if (!is_pfe)
+		return -EINVAL;
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_ext4_is_ready())
+		return -ENODEV;
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!key_info)
+		return -EINVAL;
+
+	key_info->key = fscrypt_get_ice_encryption_key(inode);
+	if (!key_info->key) {
+		pr_err("could not parse key from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
+	if (!key_info->key_size) {
+		pr_err("could not parse key size from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->salt = fscrypt_get_ice_encryption_salt(inode);
+	if (!key_info->salt) {
+		pr_err("could not parse salt from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
+	if (!key_info->salt_size) {
+		pr_err("could not parse salt size from ext4\n");
+		return -EINVAL;
+	}
+
+	ret = pfk_ext4_parse_cipher(inode, algo);
+	if (ret != 0) {
+		pr_err("not supported cipher\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2)
+{
+	/* if there is no ext4 pfk, don't disallow merging blocks */
+	if (!pfk_ext4_is_ready())
+		return true;
+
+	if (!inode1 || !inode2)
+		return false;
+
+	return fscrypt_is_ice_encryption_info_equal(inode1, inode2);
+}
diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h
new file mode 100644
index 0000000..c33232f
--- /dev/null
+++ b/security/pfe/pfk_ext4.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_EXT4_H_
+#define _PFK_EXT4_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_ext4_type(const struct inode *inode);
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe);
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+int __init pfk_ext4_init(void);
+
+void pfk_ext4_deinit(void);
+
+#endif /* _PFK_EXT4_H_ */
diff --git a/security/pfe/pfk_f2fs.c b/security/pfe/pfk_f2fs.c
new file mode 100644
index 0000000..8b9d515
--- /dev/null
+++ b/security/pfe/pfk_f2fs.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK) - f2fs
+ *
+ * This driver is used for working with EXT4/F2FS crypt extension
+ *
+ * The key information  is stored in node by EXT4/F2FS when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+#define DEBUG 1
+#define pr_fmt(fmt)	"pfk_f2fs [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "fscrypt_ice.h"
+#include "pfk_f2fs.h"
+
+static bool pfk_f2fs_ready;
+
+/*
+ * pfk_f2fs_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_f2fs_deinit(void)
+{
+	pfk_f2fs_ready = false;
+}
+
+/*
+ * pfk_f2fs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_f2fs_init(void)
+{
+	pfk_f2fs_ready = true;
+	pr_info("PFK F2FS inited successfully\n");
+
+	return 0;
+}
+
+/**
+ * pfk_f2fs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_f2fs_is_ready(void)
+{
+	return pfk_f2fs_ready;
+}
+
+/**
+ * pfk_is_f2fs_type() - return true if inode belongs to ICE F2FS PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_f2fs_type(const struct inode *inode)
+{
+	if (!pfe_is_inode_filesystem_type(inode, "f2fs"))
+		return false;
+
+	return fscrypt_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_f2fs_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_f2fs_parse_cipher(const struct inode *inode,
+		enum ice_cryto_algo_mode *algo)
+{
+	/*
+	 * currently only AES XTS algo is supported
+	 * in the future, table with supported ciphers might
+	 * be introduced
+	 */
+	if (!inode)
+		return -EINVAL;
+
+	if (!fscrypt_is_aes_xts_cipher(inode)) {
+		pr_err("f2fs alghoritm is not supported by pfk\n");
+		return -EINVAL;
+	}
+
+	if (algo)
+		*algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+	return 0;
+}
+
+
+int pfk_f2fs_parse_inode(const struct bio *bio,
+		const struct inode *inode,
+		struct pfk_key_info *key_info,
+		enum ice_cryto_algo_mode *algo,
+		bool *is_pfe)
+{
+	int ret = 0;
+
+	if (!is_pfe)
+		return -EINVAL;
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_f2fs_is_ready())
+		return -ENODEV;
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!key_info)
+		return -EINVAL;
+
+	key_info->key = fscrypt_get_ice_encryption_key(inode);
+	if (!key_info->key) {
+		pr_err("could not parse key from f2fs\n");
+		return -EINVAL;
+	}
+
+	key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
+	if (!key_info->key_size) {
+		pr_err("could not parse key size from f2fs\n");
+		return -EINVAL;
+	}
+
+	key_info->salt = fscrypt_get_ice_encryption_salt(inode);
+	if (!key_info->salt) {
+		pr_err("could not parse salt from f2fs\n");
+		return -EINVAL;
+	}
+
+	key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
+	if (!key_info->salt_size) {
+		pr_err("could not parse salt size from f2fs\n");
+		return -EINVAL;
+	}
+
+	ret = pfk_f2fs_parse_cipher(inode, algo);
+	if (ret != 0) {
+		pr_err("not supported cipher\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
+		const struct bio *bio2, const struct inode *inode1,
+		const struct inode *inode2)
+{
+	bool mergeable;
+
+	/* if there is no f2fs pfk, don't disallow merging blocks */
+	if (!pfk_f2fs_is_ready())
+		return true;
+
+	if (!inode1 || !inode2)
+		return false;
+
+	mergeable = fscrypt_is_ice_encryption_info_equal(inode1, inode2);
+	if (!mergeable)
+		return false;
+
+
+	/* ICE allows only consecutive iv_key stream. */
+	if (!bio_dun(bio1) && !bio_dun(bio2))
+		return true;
+	else if (!bio_dun(bio1) || !bio_dun(bio2))
+		return false;
+
+	return bio_end_dun(bio1) == bio_dun(bio2);
+}
diff --git a/security/pfe/pfk_f2fs.h b/security/pfe/pfk_f2fs.h
new file mode 100644
index 0000000..551d529
--- /dev/null
+++ b/security/pfe/pfk_f2fs.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_F2FS_H_
+#define _PFK_F2FS_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_f2fs_type(const struct inode *inode);
+
+int pfk_f2fs_parse_inode(const struct bio *bio,
+		const struct inode *inode,
+		struct pfk_key_info *key_info,
+		enum ice_cryto_algo_mode *algo,
+		bool *is_pfe);
+
+bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+int __init pfk_f2fs_init(void);
+
+void pfk_f2fs_deinit(void);
+
+#endif /* _PFK_F2FS_H_ */
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
new file mode 100644
index 0000000..e1d0100
--- /dev/null
+++ b/security/pfe/pfk_ice.c
@@ -0,0 +1,189 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/async.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <soc/qcom/scm.h>
+#include <linux/device-mapper.h>
+#include <soc/qcom/qseecomi.h>
+#include <crypto/ice.h>
+#include "pfk_ice.h"
+
+
+/**********************************/
+/** global definitions		 **/
+/**********************************/
+
+#define TZ_ES_SET_ICE_KEY 0x2
+#define TZ_ES_INVALIDATE_ICE_KEY 0x3
+
+/* index 0 and 1 is reserved for FDE */
+#define MIN_ICE_KEY_INDEX 2
+
+#define MAX_ICE_KEY_INDEX 31
+
+
+#define TZ_ES_SET_ICE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, TZ_ES_SET_ICE_KEY)
+
+
+#define TZ_ES_INVALIDATE_ICE_KEY_ID \
+		TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
+			TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
+
+
+#define TZ_ES_SET_ICE_KEY_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+		TZ_SYSCALL_PARAM_TYPE_VAL, \
+		TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+		TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1( \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define ICE_KEY_SIZE 32
+#define ICE_SALT_SIZE 32
+
+static uint8_t ice_key[ICE_KEY_SIZE];
+static uint8_t ice_salt[ICE_KEY_SIZE];
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+			char *storage_type)
+{
+	struct scm_desc desc = {0};
+	int ret, ret1;
+	char *tzbuf_key = (char *)ice_key;
+	char *tzbuf_salt = (char *)ice_salt;
+	char *s_type = storage_type;
+
+	uint32_t smc_id = 0;
+	u32 tzbuflen_key = sizeof(ice_key);
+	u32 tzbuflen_salt = sizeof(ice_salt);
+
+	if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+		pr_err("%s Invalid index %d\n", __func__, index);
+		return -EINVAL;
+	}
+	if (!key || !salt) {
+		pr_err("%s Invalid key/salt\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!tzbuf_key || !tzbuf_salt) {
+		pr_err("%s No Memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	if (s_type == NULL) {
+		pr_err("%s Invalid Storage type\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(tzbuf_key, 0, tzbuflen_key);
+	memset(tzbuf_salt, 0, tzbuflen_salt);
+
+	memcpy(ice_key, key, tzbuflen_key);
+	memcpy(ice_salt, salt, tzbuflen_salt);
+
+	dmac_flush_range(tzbuf_key, tzbuf_key + tzbuflen_key);
+	dmac_flush_range(tzbuf_salt, tzbuf_salt + tzbuflen_salt);
+
+	smc_id = TZ_ES_SET_ICE_KEY_ID;
+
+	desc.arginfo = TZ_ES_SET_ICE_KEY_PARAM_ID;
+	desc.args[0] = index;
+	desc.args[1] = virt_to_phys(tzbuf_key);
+	desc.args[2] = tzbuflen_key;
+	desc.args[3] = virt_to_phys(tzbuf_salt);
+	desc.args[4] = tzbuflen_salt;
+
+	ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
+
+	if (ret) {
+		pr_err("%s: could not enable clocks: %d\n", __func__, ret);
+		goto out;
+	}
+
+	ret = scm_call2_noretry(smc_id, &desc);
+
+	if (ret) {
+		pr_err("%s: Set Key Error: %d\n", __func__, ret);
+		if (ret == -EBUSY) {
+			if (qcom_ice_setup_ice_hw((const char *)s_type, false))
+				pr_err("%s: clock disable failed\n", __func__);
+			goto out;
+		}
+		/*Try to invalidate the key to keep ICE in proper state*/
+		smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+		desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+		desc.args[0] = index;
+		ret1 = scm_call2_noretry(smc_id, &desc);
+		if (ret1)
+			pr_err("%s: Invalidate Key Error: %d\n", __func__,
+					ret1);
+		goto out;
+	}
+	ret = qcom_ice_setup_ice_hw((const char *)s_type, false);
+
+out:
+	return ret;
+}
+
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
+{
+	struct scm_desc desc = {0};
+	int ret;
+
+	uint32_t smc_id = 0;
+
+	if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+		pr_err("%s Invalid index %d\n", __func__, index);
+		return -EINVAL;
+	}
+
+	if (storage_type == NULL) {
+		pr_err("%s Invalid Storage type\n", __func__);
+		return -EINVAL;
+	}
+
+	smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+
+	desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+	desc.args[0] = index;
+
+	ret = qcom_ice_setup_ice_hw((const char *)storage_type, true);
+
+	if (ret) {
+		pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret);
+		return ret;
+	}
+
+	ret = scm_call2_noretry(smc_id, &desc);
+
+	if (ret) {
+		pr_err("%s: Error: 0x%x\n", __func__, ret);
+		if (qcom_ice_setup_ice_hw((const char *)storage_type, false))
+			pr_err("%s: could not disable clocks\n", __func__);
+	} else {
+		ret = qcom_ice_setup_ice_hw((const char *)storage_type, false);
+	}
+
+	return ret;
+}
diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h
new file mode 100644
index 0000000..31772e7
--- /dev/null
+++ b/security/pfe/pfk_ice.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_ICE_H_
+#define PFK_ICE_H_
+
+/*
+ * PFK ICE
+ *
+ * ICE keys configuration through scm calls.
+ *
+ */
+
+#include <linux/types.h>
+
+int pfk_ice_init(void);
+int pfk_ice_deinit(void);
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+			char *storage_type);
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
+
+
+#endif /* PFK_ICE_H_ */
diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h
new file mode 100644
index 0000000..3214327
--- /dev/null
+++ b/security/pfe/pfk_internal.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_INTERNAL_H_
+#define _PFK_INTERNAL_H_
+
+#include <linux/types.h>
+#include <crypto/ice.h>
+
+struct pfk_key_info {
+	const unsigned char *key;
+	const unsigned char *salt;
+	size_t key_size;
+	size_t salt_size;
+};
+
+int pfk_key_size_to_key_type(size_t key_size,
+	enum ice_crpto_key_size *key_size_type);
+
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+	const char *fs_type);
+
+char *inode_to_filename(const struct inode *inode);
+
+#endif /* _PFK_INTERNAL_H_ */
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
new file mode 100644
index 0000000..eecc026
--- /dev/null
+++ b/security/pfe/pfk_kc.c
@@ -0,0 +1,906 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * PFK Key Cache
+ *
+ * Key Cache used internally in PFK.
+ * The purpose of the cache is to save access time to QSEE when loading keys.
+ * Currently the cache is the same size as the total number of keys that can
+ * be loaded to ICE. Since this number is relatively small, the algorithms for
+ * cache eviction are simple, linear and based on last usage timestamp, i.e
+ * the node that will be evicted is the one with the oldest timestamp.
+ * Empty entries always have the oldest timestamp.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <crypto/ice.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+
+#include "pfk_kc.h"
+#include "pfk_ice.h"
+
+
+/** the first available index in ice engine */
+#define PFK_KC_STARTING_INDEX 2
+
+/** currently the only supported key and salt sizes */
+#define PFK_KC_KEY_SIZE 32
+#define PFK_KC_SALT_SIZE 32
+
+/** Table size */
+/* TODO replace by some constant from ice.h */
+#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
+
+/** The maximum key and salt size */
+#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
+#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
+#define PFK_UFS "ufs"
+
+static DEFINE_SPINLOCK(kc_lock);
+static unsigned long flags;
+static bool kc_ready;
+static char *s_type = "sdcc";
+
+/**
+ * enum pfk_kc_entry_state - state of the entry inside kc table
+ *
+ * @FREE:		   entry is free
+ * @ACTIVE_ICE_PRELOAD:    entry is actively used by ICE engine
+			   and cannot be used by others. SCM call
+			   to load key to ICE is pending to be performed
+ * @ACTIVE_ICE_LOADED:     entry is actively used by ICE engine and
+			   cannot be used by others. SCM call to load the
+			   key to ICE was successfully executed and key is
+			   now loaded
+ * @INACTIVE_INVALIDATING: entry is being invalidated during file close
+			   and cannot be used by others until invalidation
+			   is complete
+ * @INACTIVE:		   entry's key is already loaded, but is not
+			   currently being used. It can be re-used for
+			   optimization and to avoid SCM call cost or
+			   it can be taken by another key if there are
+			   no FREE entries
+ * @SCM_ERROR:		   error occurred while scm call was performed to
+			   load the key to ICE
+ */
+enum pfk_kc_entry_state {
+	FREE,
+	ACTIVE_ICE_PRELOAD,
+	ACTIVE_ICE_LOADED,
+	INACTIVE_INVALIDATING,
+	INACTIVE,
+	SCM_ERROR
+};
+
+struct kc_entry {
+	 unsigned char key[PFK_MAX_KEY_SIZE];
+	 size_t key_size;
+
+	 unsigned char salt[PFK_MAX_SALT_SIZE];
+	 size_t salt_size;
+
+	 u64 time_stamp;
+	 u32 key_index;
+
+	 struct task_struct *thread_pending;
+
+	 enum pfk_kc_entry_state state;
+
+	 /* ref count for the number of requests in the HW queue for this key */
+	 int loaded_ref_cnt;
+	 int scm_error;
+};
+
+static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
+
+/**
+ * kc_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the key cache is ready.
+ */
+static inline bool kc_is_ready(void)
+{
+	return kc_ready;
+}
+
+static inline void kc_spin_lock(void)
+{
+	spin_lock_irqsave(&kc_lock, flags);
+}
+
+static inline void kc_spin_unlock(void)
+{
+	spin_unlock_irqrestore(&kc_lock, flags);
+}
+
+/**
+ * kc_entry_is_available() - checks whether the entry is available
+ *
+ * Return true if it is , false otherwise or if invalid
+ * Should be invoked under spinlock
+ */
+static bool kc_entry_is_available(const struct kc_entry *entry)
+{
+	if (!entry)
+		return false;
+
+	return (entry->state == FREE || entry->state == INACTIVE);
+}
+
+/**
+ * kc_entry_wait_till_available() - waits till entry is available
+ *
+ * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
+ * by signal
+ *
+ * Should be invoked under spinlock
+ */
+static int kc_entry_wait_till_available(struct kc_entry *entry)
+{
+	int res = 0;
+
+	while (!kc_entry_is_available(entry)) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			res = -ERESTARTSYS;
+			break;
+		}
+		/* assuming only one thread can try to invalidate
+		 * the same entry
+		 */
+		entry->thread_pending = current;
+		kc_spin_unlock();
+		schedule();
+		kc_spin_lock();
+	}
+	set_current_state(TASK_RUNNING);
+
+	return res;
+}
+
+/**
+ * kc_entry_start_invalidating() - moves entry to state
+ *			           INACTIVE_INVALIDATING
+ *				   If entry is in use, waits till
+ *				   it gets available
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static int kc_entry_start_invalidating(struct kc_entry *entry)
+{
+	int res;
+
+	res = kc_entry_wait_till_available(entry);
+	if (res)
+		return res;
+
+	entry->state = INACTIVE_INVALIDATING;
+
+	return 0;
+}
+
+/**
+ * kc_entry_finish_invalidating() - moves entry to state FREE
+ *				    wakes up all the tasks waiting
+ *				    on it
+ *
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static void kc_entry_finish_invalidating(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	if (entry->state != INACTIVE_INVALIDATING)
+		return;
+
+	entry->state = FREE;
+}
+
+/**
+ * kc_min_entry() - compare two entries to find one with minimal time
+ * @a: ptr to the first entry. If NULL the other entry will be returned
+ * @b: pointer to the second entry
+ *
+ * Return the entry which timestamp is the minimal, or b if a is NULL
+ */
+static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
+		struct kc_entry *b)
+{
+	if (!a)
+		return b;
+
+	if (time_before64(b->time_stamp, a->time_stamp))
+		return b;
+
+	return a;
+}
+
+/**
+ * kc_entry_at_index() - return entry at specific index
+ * @index: index of entry to be accessed
+ *
+ * Return entry
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_entry_at_index(int index)
+{
+	return &(kc_table[index]);
+}
+
+/**
+ * kc_find_key_at_index() - find kc entry starting at specific index
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ * @sarting_index: index to start search with, if entry found, updated with
+ * index of that entry
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
+	size_t key_size, const unsigned char *salt, size_t salt_size,
+	int *starting_index)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+
+		if (salt != NULL) {
+			if (entry->salt_size != salt_size)
+				continue;
+
+			if (memcmp(entry->salt, salt, salt_size) != 0)
+				continue;
+		}
+
+		if (entry->key_size != key_size)
+			continue;
+
+		if (memcmp(entry->key, key, key_size) == 0) {
+			*starting_index = i;
+			return entry;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * kc_find_key() - find kc entry
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	int index = 0;
+
+	return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
+}
+
+/**
+ * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
+ * that is not locked
+ *
+ * Returns entry with minimal timestamp. Empty entries have timestamp
+ * of 0, therefore they are returned first.
+ * If all the entries are locked, will return NULL
+ * Should be invoked under spin lock
+ */
+static struct kc_entry *kc_find_oldest_entry_non_locked(void)
+{
+	struct kc_entry *curr_min_entry = NULL;
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+
+		if (entry->state == FREE)
+			return entry;
+
+		if (entry->state == INACTIVE)
+			curr_min_entry = kc_min_entry(curr_min_entry, entry);
+	}
+
+	return curr_min_entry;
+}
+
+/**
+ * kc_update_timestamp() - updates timestamp of entry to current
+ *
+ * @entry: entry to update
+ *
+ */
+static void kc_update_timestamp(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	entry->time_stamp = get_jiffies_64();
+}
+
+/**
+ * kc_clear_entry() - clear the key from entry and mark entry not in use
+ *
+ * @entry: pointer to entry
+ *
+ * Should be invoked under spinlock
+ */
+static void kc_clear_entry(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	memset(entry->key, 0, entry->key_size);
+	memset(entry->salt, 0, entry->salt_size);
+
+	entry->key_size = 0;
+	entry->salt_size = 0;
+
+	entry->time_stamp = 0;
+	entry->scm_error = 0;
+
+	entry->state = FREE;
+
+	entry->loaded_ref_cnt = 0;
+	entry->thread_pending = NULL;
+}
+
+/**
+ * kc_update_entry() - replaces the key in given entry and
+ *			loads the new key to ICE
+ *
+ * @entry: entry to replace key in
+ * @key: key
+ * @key_size: key_size
+ * @salt: salt
+ * @salt_size: salt_size
+ *
+ * The previous key is securely released and wiped, the new one is loaded
+ * to ICE.
+ * Should be invoked under spinlock
+ */
+static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
+	size_t key_size, const unsigned char *salt, size_t salt_size)
+{
+	int ret;
+
+	kc_clear_entry(entry);
+
+	memcpy(entry->key, key, key_size);
+	entry->key_size = key_size;
+
+	memcpy(entry->salt, salt, salt_size);
+	entry->salt_size = salt_size;
+
+	/* Mark entry as no longer free before releasing the lock */
+	entry->state = ACTIVE_ICE_PRELOAD;
+	kc_spin_unlock();
+
+	ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
+			entry->salt, s_type);
+
+	kc_spin_lock();
+	return ret;
+}
+
+/**
+ * pfk_kc_init() - init function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_init(void)
+{
+	int i = 0;
+	struct kc_entry *entry = NULL;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		entry->key_index = PFK_KC_STARTING_INDEX + i;
+	}
+	kc_ready = true;
+	kc_spin_unlock();
+
+	return 0;
+}
+
+/**
+ * pfk_kc_denit() - deinit function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_deinit(void)
+{
+	int res = pfk_kc_clear();
+	kc_ready = false;
+
+	return res;
+}
+
+/**
+ * pfk_kc_load_key_start() - retrieve the key from cache or add it if
+ * it's not there and return the ICE hw key index in @key_index.
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ * @key_index: the pointer to key_index where the output will be stored
+ * @async: whether scm calls are allowed in the caller context
+ *
+ * If key is present in cache, than the key_index will be retrieved from cache.
+ * If it is not present, the oldest entry from kc table will be evicted,
+ * the key will be loaded to ICE via QSEE to the index that is the evicted
+ * entry number and stored in cache.
+ * Entry that is going to be used is marked as being used, it will mark
+ * as not being used when ICE finishes using it and pfk_kc_load_key_end
+ * will be invoked.
+ * As QSEE calls can only be done from a non-atomic context, when @async flag
+ * is set to 'false', it specifies that it is ok to make the calls in the
+ * current context. Otherwise, when @async is set, the caller should retry the
+ * call again from a different context, and -EAGAIN error will be returned.
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size, u32 *key_index,
+		bool async)
+{
+	int ret = 0;
+	struct kc_entry *entry = NULL;
+	bool entry_exists = false;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key || !salt || !key_index) {
+		pr_err("%s key/salt/key_index NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (key_size != PFK_KC_KEY_SIZE) {
+		pr_err("unsupported key size %zu\n", key_size);
+		return -EINVAL;
+	}
+
+	if (salt_size != PFK_KC_SALT_SIZE) {
+		pr_err("unsupported salt size %zu\n", salt_size);
+		return -EINVAL;
+	}
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		if (async) {
+			pr_debug("%s task will populate entry\n", __func__);
+			kc_spin_unlock();
+			return -EAGAIN;
+		}
+
+		entry = kc_find_oldest_entry_non_locked();
+		if (!entry) {
+			/* could not find a single non locked entry,
+			 * return EBUSY to upper layers so that the
+			 * request will be rescheduled
+			 */
+			kc_spin_unlock();
+			return -EBUSY;
+		}
+	} else {
+		entry_exists = true;
+	}
+
+	pr_debug("entry with index %d is in state %d\n",
+		entry->key_index, entry->state);
+
+	switch (entry->state) {
+	case (INACTIVE):
+		if (entry_exists) {
+			kc_update_timestamp(entry);
+			entry->state = ACTIVE_ICE_LOADED;
+
+			if (!strcmp(s_type, (char *)PFK_UFS)) {
+				if (async)
+					entry->loaded_ref_cnt++;
+			} else {
+				entry->loaded_ref_cnt++;
+			}
+			break;
+		}
+	case (FREE):
+		ret = kc_update_entry(entry, key, key_size, salt, salt_size);
+		if (ret) {
+			entry->state = SCM_ERROR;
+			entry->scm_error = ret;
+			pr_err("%s: key load error (%d)\n", __func__, ret);
+		} else {
+			kc_update_timestamp(entry);
+			entry->state = ACTIVE_ICE_LOADED;
+
+			/*
+			 * In case of UFS only increase ref cnt for async calls,
+			 * sync calls from within work thread do not pass
+			 * requests further to HW
+			 */
+			if (!strcmp(s_type, (char *)PFK_UFS)) {
+				if (async)
+					entry->loaded_ref_cnt++;
+			} else {
+				entry->loaded_ref_cnt++;
+			}
+		}
+		break;
+	case (ACTIVE_ICE_PRELOAD):
+	case (INACTIVE_INVALIDATING):
+		ret = -EAGAIN;
+		break;
+	case (ACTIVE_ICE_LOADED):
+		kc_update_timestamp(entry);
+
+		if (!strcmp(s_type, (char *)PFK_UFS)) {
+			if (async)
+				entry->loaded_ref_cnt++;
+		} else {
+			entry->loaded_ref_cnt++;
+		}
+		break;
+	case(SCM_ERROR):
+		ret = entry->scm_error;
+		kc_clear_entry(entry);
+		entry->state = FREE;
+		break;
+	default:
+		pr_err("invalid state %d for entry with key index %d\n",
+			entry->state, entry->key_index);
+		ret = -EINVAL;
+	}
+
+	*key_index = entry->key_index;
+	kc_spin_unlock();
+
+	return ret;
+}
+
+/**
+ * pfk_kc_load_key_end() - finish the process of key loading that was started
+ *						   by pfk_kc_load_key_start
+ *						   by marking the entry as not
+ *						   being in use
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ *
+ */
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	struct kc_entry *entry = NULL;
+	struct task_struct *tmp_pending = NULL;
+	int ref_cnt = 0;
+
+	if (!kc_is_ready())
+		return;
+
+	if (!key || !salt)
+		return;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return;
+
+	if (salt_size != PFK_KC_SALT_SIZE)
+		return;
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		kc_spin_unlock();
+		pr_err("internal error, there should an entry to unlock\n");
+
+		return;
+	}
+	ref_cnt = --entry->loaded_ref_cnt;
+
+	if (ref_cnt < 0)
+		pr_err("internal error, ref count should never be negative\n");
+
+	if (!ref_cnt) {
+		entry->state = INACTIVE;
+		/*
+		 * wake-up invalidation if it's waiting
+		 * for the entry to be released
+		 */
+		if (entry->thread_pending) {
+			tmp_pending = entry->thread_pending;
+			entry->thread_pending = NULL;
+
+			kc_spin_unlock();
+			wake_up_process(tmp_pending);
+			return;
+		}
+	}
+
+	kc_spin_unlock();
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the key
+ * @salt_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also in case of non
+ * (existing key)
+ */
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	struct kc_entry *entry = NULL;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key)
+		return -EINVAL;
+
+	if (!salt)
+		return -EINVAL;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return -EINVAL;
+
+	if (salt_size != PFK_KC_SALT_SIZE)
+		return -EINVAL;
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		pr_debug("%s: key does not exist\n", __func__);
+		kc_spin_unlock();
+		return -EINVAL;
+	}
+
+	res = kc_entry_start_invalidating(entry);
+	if (res != 0) {
+		kc_spin_unlock();
+		return res;
+	}
+	kc_clear_entry(entry);
+
+	kc_spin_unlock();
+
+	qti_pfk_ice_invalidate_key(entry->key_index, s_type);
+
+	kc_spin_lock();
+	kc_entry_finish_invalidating(entry);
+	kc_spin_unlock();
+
+	return 0;
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * when no salt is available. Will only search key part, if there are several,
+ * all will be removed
+ *
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also for non-existing key)
+ */
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
+{
+	struct kc_entry *entry = NULL;
+	int index = 0;
+	int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
+	int temp_indexes_size = 0;
+	int i = 0;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key)
+		return -EINVAL;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return -EINVAL;
+
+	memset(temp_indexes, -1, sizeof(temp_indexes));
+
+	kc_spin_lock();
+
+	entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+	if (!entry) {
+		pr_err("%s: key does not exist\n", __func__);
+		kc_spin_unlock();
+		return -EINVAL;
+	}
+
+	res = kc_entry_start_invalidating(entry);
+	if (res != 0) {
+		kc_spin_unlock();
+		return res;
+	}
+
+	temp_indexes[temp_indexes_size++] = index;
+	kc_clear_entry(entry);
+
+	/* let's clean additional entries with the same key if there are any */
+	do {
+		index++;
+		entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+		if (!entry)
+			break;
+
+		res = kc_entry_start_invalidating(entry);
+		if (res != 0) {
+			kc_spin_unlock();
+			goto out;
+		}
+
+		temp_indexes[temp_indexes_size++] = index;
+
+		kc_clear_entry(entry);
+
+
+	} while (true);
+
+	kc_spin_unlock();
+
+	temp_indexes_size--;
+	for (i = temp_indexes_size; i >= 0 ; i--)
+		qti_pfk_ice_invalidate_key(
+			kc_entry_at_index(temp_indexes[i])->key_index,
+					s_type);
+
+	/* fall through */
+	res = 0;
+
+out:
+	kc_spin_lock();
+	for (i = temp_indexes_size; i >= 0 ; i--)
+		kc_entry_finish_invalidating(
+				kc_entry_at_index(temp_indexes[i]));
+	kc_spin_unlock();
+
+	return res;
+}
+
+/**
+ * pfk_kc_clear() - clear the table and remove all keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+int pfk_kc_clear(void)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		res = kc_entry_start_invalidating(entry);
+		if (res != 0) {
+			kc_spin_unlock();
+			goto out;
+		}
+		kc_clear_entry(entry);
+	}
+	kc_spin_unlock();
+
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+		qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
+					s_type);
+
+	/* fall through */
+	res = 0;
+out:
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+		kc_entry_finish_invalidating(kc_entry_at_index(i));
+	kc_spin_unlock();
+
+	return res;
+}
+
+/**
+ * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
+ * The assumption is that at this point we don't have any pending transactions
+ * Also, there is no need to clear keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+void pfk_kc_clear_on_reset(void)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	if (!kc_is_ready())
+		return;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		kc_clear_entry(entry);
+	}
+	kc_spin_unlock();
+}
+
+static int pfk_kc_find_storage_type(char **device)
+{
+	char boot[20] = {'\0'};
+	char *match = (char *)strnstr(saved_command_line,
+				"androidboot.bootdevice=",
+				strlen(saved_command_line));
+	if (match) {
+		memcpy(boot, (match + strlen("androidboot.bootdevice=")),
+			sizeof(boot) - 1);
+		if (strnstr(boot, PFK_UFS, strlen(boot)))
+			*device = PFK_UFS;
+
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int __init pfk_kc_pre_init(void)
+{
+	return pfk_kc_find_storage_type(&s_type);
+}
+
+static void __exit pfk_kc_exit(void)
+{
+	s_type = NULL;
+}
+
+module_init(pfk_kc_pre_init);
+module_exit(pfk_kc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key-KC driver");
diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h
new file mode 100644
index 0000000..6adeee2
--- /dev/null
+++ b/security/pfe/pfk_kc.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_KC_H_
+#define PFK_KC_H_
+
+#include <linux/types.h>
+
+int pfk_kc_init(void);
+int pfk_kc_deinit(void);
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size, u32 *key_index,
+		bool async);
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
+int pfk_kc_clear(void);
+void pfk_kc_clear_on_reset(void);
+extern char *saved_command_line;
+
+
+#endif /* PFK_KC_H_ */
diff --git a/security/security.c b/security/security.c
index e43c50c..35c8dce 100644
--- a/security/security.c
+++ b/security/security.c
@@ -525,6 +525,14 @@
 }
 EXPORT_SYMBOL_GPL(security_inode_create);
 
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+			       umode_t mode)
+{
+	if (unlikely(IS_PRIVATE(dir)))
+		return 0;
+	return call_int_hook(inode_post_create, 0, dir, dentry, mode);
+}
+
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
 			 struct dentry *new_dentry)
 {
@@ -1700,6 +1708,8 @@
 	.inode_init_security =
 		LIST_HEAD_INIT(security_hook_heads.inode_init_security),
 	.inode_create =	LIST_HEAD_INIT(security_hook_heads.inode_create),
+	.inode_post_create =
+		LIST_HEAD_INIT(security_hook_heads.inode_post_create),
 	.inode_link =	LIST_HEAD_INIT(security_hook_heads.inode_link),
 	.inode_unlink =	LIST_HEAD_INIT(security_hook_heads.inode_unlink),
 	.inode_symlink =
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 52f3c55..84d9a2e 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -348,26 +348,27 @@
 	struct avc_xperms_decision_node *xpd_node;
 	struct extended_perms_decision *xpd;
 
-	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
+	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+			GFP_NOWAIT | __GFP_NOWARN);
 	if (!xpd_node)
 		return NULL;
 
 	xpd = &xpd_node->xpd;
 	if (which & XPERMS_ALLOWED) {
 		xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_NOWAIT);
+						GFP_NOWAIT | __GFP_NOWARN);
 		if (!xpd->allowed)
 			goto error;
 	}
 	if (which & XPERMS_AUDITALLOW) {
 		xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_NOWAIT);
+						GFP_NOWAIT | __GFP_NOWARN);
 		if (!xpd->auditallow)
 			goto error;
 	}
 	if (which & XPERMS_DONTAUDIT) {
 		xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_NOWAIT);
+						GFP_NOWAIT | __GFP_NOWARN);
 		if (!xpd->dontaudit)
 			goto error;
 	}
@@ -395,7 +396,8 @@
 {
 	struct avc_xperms_node *xp_node;
 
-	xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
+	xp_node = kmem_cache_zalloc(avc_xperms_cachep,
+			GFP_NOWAIT | __GFP_NOWARN);
 	if (!xp_node)
 		return xp_node;
 	INIT_LIST_HEAD(&xp_node->xpd_head);
@@ -548,7 +550,7 @@
 {
 	struct avc_node *node;
 
-	node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
+	node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN);
 	if (!node)
 		goto out;
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index da939fd..5f3fa60 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -407,18 +407,6 @@
 	kfree(sbsec);
 }
 
-/* The file system's label must be initialized prior to use. */
-
-static const char *labeling_behaviors[7] = {
-	"uses xattr",
-	"uses transition SIDs",
-	"uses task SIDs",
-	"uses genfs_contexts",
-	"not configured for labeling",
-	"uses mountpoint labeling",
-	"uses native labeling",
-};
-
 static inline int inode_doinit(struct inode *inode)
 {
 	return inode_doinit_with_dentry(inode, NULL);
@@ -530,10 +518,6 @@
 		}
 	}
 
-	if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
-		printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
-		       sb->s_id, sb->s_type->name);
-
 	sbsec->flags |= SE_SBINITIALIZED;
 	if (selinux_is_sblabel_mnt(sb))
 		sbsec->flags |= SBLABEL_MNT;
@@ -1108,10 +1092,9 @@
 		goto out_err;
 
 	opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), GFP_ATOMIC);
-	if (!opts->mnt_opts_flags) {
-		kfree(opts->mnt_opts);
+	if (!opts->mnt_opts_flags)
 		goto out_err;
-	}
+
 
 	if (fscontext) {
 		opts->mnt_opts[num_mnt_opts] = fscontext;
@@ -1134,6 +1117,7 @@
 	return 0;
 
 out_err:
+	security_free_mnt_opts(opts);
 	kfree(context);
 	kfree(defcontext);
 	kfree(fscontext);
@@ -2062,8 +2046,9 @@
 static inline u32 open_file_to_av(struct file *file)
 {
 	u32 av = file_to_av(file);
+	struct inode *inode = file_inode(file);
 
-	if (selinux_policycap_openperm)
+	if (selinux_policycap_openperm && inode->i_sb->s_magic != SOCKFS_MAGIC)
 		av |= FILE__OPEN;
 
 	return av;
@@ -3066,6 +3051,7 @@
 static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
 {
 	const struct cred *cred = current_cred();
+	struct inode *inode = d_backing_inode(dentry);
 	unsigned int ia_valid = iattr->ia_valid;
 	__u32 av = FILE__WRITE;
 
@@ -3081,8 +3067,10 @@
 			ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
 		return dentry_has_perm(cred, dentry, FILE__SETATTR);
 
-	if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE)
-			&& !(ia_valid & ATTR_FILE))
+	if (selinux_policycap_openperm &&
+	    inode->i_sb->s_magic != SOCKFS_MAGIC &&
+	    (ia_valid & ATTR_SIZE) &&
+	    !(ia_valid & ATTR_FILE))
 		av |= FILE__OPEN;
 
 	return dentry_has_perm(cred, dentry, av);
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 43535cd..60cdcf4 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -25,8 +25,9 @@
 #include <linux/in.h>
 #include <linux/spinlock.h>
 #include <net/net_namespace.h>
-#include "flask.h"
-#include "avc.h"
+//#include "flask.h"
+//#include "avc.h"
+#include "security.h"
 
 struct task_security_struct {
 	u32 osid;		/* SID prior to last execve */
@@ -52,6 +53,8 @@
 	u32 sid;		/* SID of this object */
 	u16 sclass;		/* security class of this object */
 	unsigned char initialized;	/* initialization flag */
+	u32 tag;		/* Per-File-Encryption tag */
+	void *pfk_data; /* Per-File-Key data from ecryptfs */
 	struct mutex lock;
 };
 
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 308a286..b8e98c1 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -12,7 +12,6 @@
 #include <linux/dcache.h>
 #include <linux/magic.h>
 #include <linux/types.h>
-#include "flask.h"
 
 #define SECSID_NULL			0x00000000 /* unspecified SID */
 #define SECSID_WILD			0xffffffff /* wildcard SID */
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 66ea81c..a6b0970 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -154,7 +154,7 @@
 		}
 
 		k = 0;
-		while (p_in->perms && p_in->perms[k]) {
+		while (p_in->perms[k]) {
 			/* An empty permission string skips ahead */
 			if (!*p_in->perms[k]) {
 				k++;
@@ -1434,7 +1434,7 @@
 				      scontext_len, &context, def_sid);
 	if (rc == -EINVAL && force) {
 		context.str = str;
-		context.len = scontext_len;
+		context.len = strlen(str) + 1;
 		str = NULL;
 	} else if (rc)
 		goto out_unlock;
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index 1fa7076..84ee29c 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -400,8 +400,7 @@
 	if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
 	    copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
 		goto error;
-	if (get_user(data->owner, &data32->owner) ||
-	    get_user(data->type, &data32->type))
+	if (get_user(data->owner, &data32->owner))
 		goto error;
 	switch (data->type) {
 	case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 3e7c357..cfb8f58 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -834,8 +834,25 @@
 	return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
 }
 
-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
-				     bool trylock)
+/* parameter locking: returns immediately if tried during streaming */
+static int lock_params(struct snd_pcm_runtime *runtime)
+{
+	if (mutex_lock_interruptible(&runtime->oss.params_lock))
+		return -ERESTARTSYS;
+	if (atomic_read(&runtime->oss.rw_ref)) {
+		mutex_unlock(&runtime->oss.params_lock);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static void unlock_params(struct snd_pcm_runtime *runtime)
+{
+	mutex_unlock(&runtime->oss.params_lock);
+}
+
+/* call with params_lock held */
+static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct snd_pcm_hw_params *params, *sparams;
@@ -849,11 +866,8 @@
 	struct snd_mask sformat_mask;
 	struct snd_mask mask;
 
-	if (trylock) {
-		if (!(mutex_trylock(&runtime->oss.params_lock)))
-			return -EAGAIN;
-	} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
-		return -EINTR;
+	if (!runtime->oss.params)
+		return 0;
 	sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
 	params = kmalloc(sizeof(*params), GFP_KERNEL);
 	sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
@@ -1079,6 +1093,23 @@
 	kfree(sw_params);
 	kfree(params);
 	kfree(sparams);
+	return err;
+}
+
+/* this one takes the lock by itself */
+static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+				     bool trylock)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int err;
+
+	if (trylock) {
+		if (!(mutex_trylock(&runtime->oss.params_lock)))
+			return -EAGAIN;
+	} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
+		return -ERESTARTSYS;
+
+	err = snd_pcm_oss_change_params_locked(substream);
 	mutex_unlock(&runtime->oss.params_lock);
 	return err;
 }
@@ -1107,6 +1138,10 @@
 	return 0;
 }
 
+/* call with params_lock held */
+/* NOTE: this always call PREPARE unconditionally no matter whether
+ * runtime->oss.prepare is set or not
+ */
 static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
 {
 	int err;
@@ -1131,8 +1166,6 @@
 	struct snd_pcm_runtime *runtime;
 	int err;
 
-	if (substream == NULL)
-		return 0;
 	runtime = substream->runtime;
 	if (runtime->oss.params) {
 		err = snd_pcm_oss_change_params(substream, false);
@@ -1140,6 +1173,29 @@
 			return err;
 	}
 	if (runtime->oss.prepare) {
+		if (mutex_lock_interruptible(&runtime->oss.params_lock))
+			return -ERESTARTSYS;
+		err = snd_pcm_oss_prepare(substream);
+		mutex_unlock(&runtime->oss.params_lock);
+		if (err < 0)
+			return err;
+	}
+	return 0;
+}
+
+/* call with params_lock held */
+static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime;
+	int err;
+
+	runtime = substream->runtime;
+	if (runtime->oss.params) {
+		err = snd_pcm_oss_change_params_locked(substream);
+		if (err < 0)
+			return err;
+	}
+	if (runtime->oss.prepare) {
 		err = snd_pcm_oss_prepare(substream);
 		if (err < 0)
 			return err;
@@ -1361,19 +1417,21 @@
 static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes)
 {
 	size_t xfer = 0;
-	ssize_t tmp;
+	ssize_t tmp = 0;
 	struct snd_pcm_runtime *runtime = substream->runtime;
 
 	if (atomic_read(&substream->mmap_count))
 		return -ENXIO;
 
-	if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
-		return tmp;
+	atomic_inc(&runtime->oss.rw_ref);
 	while (bytes > 0) {
 		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
 			tmp = -ERESTARTSYS;
 			break;
 		}
+		tmp = snd_pcm_oss_make_ready_locked(substream);
+		if (tmp < 0)
+			goto err;
 		if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
 			tmp = bytes;
 			if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1429,6 +1487,7 @@
 		}
 		tmp = 0;
 	}
+	atomic_dec(&runtime->oss.rw_ref);
 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
 }
 
@@ -1468,19 +1527,21 @@
 static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes)
 {
 	size_t xfer = 0;
-	ssize_t tmp;
+	ssize_t tmp = 0;
 	struct snd_pcm_runtime *runtime = substream->runtime;
 
 	if (atomic_read(&substream->mmap_count))
 		return -ENXIO;
 
-	if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
-		return tmp;
+	atomic_inc(&runtime->oss.rw_ref);
 	while (bytes > 0) {
 		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
 			tmp = -ERESTARTSYS;
 			break;
 		}
+		tmp = snd_pcm_oss_make_ready_locked(substream);
+		if (tmp < 0)
+			goto err;
 		if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
 			if (runtime->oss.buffer_used == 0) {
 				tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1521,6 +1582,7 @@
 		}
 		tmp = 0;
 	}
+	atomic_dec(&runtime->oss.rw_ref);
 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
 }
 
@@ -1536,10 +1598,12 @@
 			continue;
 		runtime = substream->runtime;
 		snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+		mutex_lock(&runtime->oss.params_lock);
 		runtime->oss.prepare = 1;
 		runtime->oss.buffer_used = 0;
 		runtime->oss.prev_hw_ptr_period = 0;
 		runtime->oss.period_ptr = 0;
+		mutex_unlock(&runtime->oss.params_lock);
 	}
 	return 0;
 }
@@ -1625,9 +1689,13 @@
 			goto __direct;
 		if ((err = snd_pcm_oss_make_ready(substream)) < 0)
 			return err;
+		atomic_inc(&runtime->oss.rw_ref);
+		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+			atomic_dec(&runtime->oss.rw_ref);
+			return -ERESTARTSYS;
+		}
 		format = snd_pcm_oss_format_from(runtime->oss.format);
 		width = snd_pcm_format_physical_width(format);
-		mutex_lock(&runtime->oss.params_lock);
 		if (runtime->oss.buffer_used > 0) {
 #ifdef OSS_DEBUG
 			pcm_dbg(substream->pcm, "sync: buffer_used\n");
@@ -1637,10 +1705,8 @@
 						   runtime->oss.buffer + runtime->oss.buffer_used,
 						   size);
 			err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes);
-			if (err < 0) {
-				mutex_unlock(&runtime->oss.params_lock);
-				return err;
-			}
+			if (err < 0)
+				goto unlock;
 		} else if (runtime->oss.period_ptr > 0) {
 #ifdef OSS_DEBUG
 			pcm_dbg(substream->pcm, "sync: period_ptr\n");
@@ -1650,10 +1716,8 @@
 						   runtime->oss.buffer,
 						   size * 8 / width);
 			err = snd_pcm_oss_sync1(substream, size);
-			if (err < 0) {
-				mutex_unlock(&runtime->oss.params_lock);
-				return err;
-			}
+			if (err < 0)
+				goto unlock;
 		}
 		/*
 		 * The ALSA's period might be a bit large than OSS one.
@@ -1684,7 +1748,11 @@
 				snd_pcm_lib_writev(substream, buffers, size);
 			}
 		}
+unlock:
 		mutex_unlock(&runtime->oss.params_lock);
+		atomic_dec(&runtime->oss.rw_ref);
+		if (err < 0)
+			return err;
 		/*
 		 * finish sync: drain the buffer
 		 */
@@ -1695,7 +1763,9 @@
 		substream->f_flags = saved_f_flags;
 		if (err < 0)
 			return err;
+		mutex_lock(&runtime->oss.params_lock);
 		runtime->oss.prepare = 1;
+		mutex_unlock(&runtime->oss.params_lock);
 	}
 
 	substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
@@ -1706,8 +1776,10 @@
 		err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
 		if (err < 0)
 			return err;
+		mutex_lock(&runtime->oss.params_lock);
 		runtime->oss.buffer_used = 0;
 		runtime->oss.prepare = 1;
+		mutex_unlock(&runtime->oss.params_lock);
 	}
 	return 0;
 }
@@ -1719,6 +1791,8 @@
 	for (idx = 1; idx >= 0; --idx) {
 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
 		struct snd_pcm_runtime *runtime;
+		int err;
+
 		if (substream == NULL)
 			continue;
 		runtime = substream->runtime;
@@ -1726,10 +1800,14 @@
 			rate = 1000;
 		else if (rate > 192000)
 			rate = 192000;
+		err = lock_params(runtime);
+		if (err < 0)
+			return err;
 		if (runtime->oss.rate != rate) {
 			runtime->oss.params = 1;
 			runtime->oss.rate = rate;
 		}
+		unlock_params(runtime);
 	}
 	return snd_pcm_oss_get_rate(pcm_oss_file);
 }
@@ -1754,13 +1832,19 @@
 	for (idx = 1; idx >= 0; --idx) {
 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
 		struct snd_pcm_runtime *runtime;
+		int err;
+
 		if (substream == NULL)
 			continue;
 		runtime = substream->runtime;
+		err = lock_params(runtime);
+		if (err < 0)
+			return err;
 		if (runtime->oss.channels != channels) {
 			runtime->oss.params = 1;
 			runtime->oss.channels = channels;
 		}
+		unlock_params(runtime);
 	}
 	return snd_pcm_oss_get_channels(pcm_oss_file);
 }
@@ -1833,6 +1917,7 @@
 static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
 {
 	int formats, idx;
+	int err;
 	
 	if (format != AFMT_QUERY) {
 		formats = snd_pcm_oss_get_formats(pcm_oss_file);
@@ -1846,10 +1931,14 @@
 			if (substream == NULL)
 				continue;
 			runtime = substream->runtime;
+			err = lock_params(runtime);
+			if (err < 0)
+				return err;
 			if (runtime->oss.format != format) {
 				runtime->oss.params = 1;
 				runtime->oss.format = format;
 			}
+			unlock_params(runtime);
 		}
 	}
 	return snd_pcm_oss_get_format(pcm_oss_file);
@@ -1869,8 +1958,6 @@
 {
 	struct snd_pcm_runtime *runtime;
 
-	if (substream == NULL)
-		return 0;
 	runtime = substream->runtime;
 	if (subdivide == 0) {
 		subdivide = runtime->oss.subdivision;
@@ -1894,9 +1981,17 @@
 
 	for (idx = 1; idx >= 0; --idx) {
 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
+		struct snd_pcm_runtime *runtime;
+
 		if (substream == NULL)
 			continue;
-		if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0)
+		runtime = substream->runtime;
+		err = lock_params(runtime);
+		if (err < 0)
+			return err;
+		err = snd_pcm_oss_set_subdivide1(substream, subdivide);
+		unlock_params(runtime);
+		if (err < 0)
 			return err;
 	}
 	return err;
@@ -1906,8 +2001,6 @@
 {
 	struct snd_pcm_runtime *runtime;
 
-	if (substream == NULL)
-		return 0;
 	runtime = substream->runtime;
 	if (runtime->oss.subdivision || runtime->oss.fragshift)
 		return -EINVAL;
@@ -1927,9 +2020,17 @@
 
 	for (idx = 1; idx >= 0; --idx) {
 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
+		struct snd_pcm_runtime *runtime;
+
 		if (substream == NULL)
 			continue;
-		if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0)
+		runtime = substream->runtime;
+		err = lock_params(runtime);
+		if (err < 0)
+			return err;
+		err = snd_pcm_oss_set_fragment1(substream, val);
+		unlock_params(runtime);
+		if (err < 0)
 			return err;
 	}
 	return err;
@@ -2013,6 +2114,9 @@
 	}
       	if (psubstream) {
       		runtime = psubstream->runtime;
+		cmd = 0;
+		if (mutex_lock_interruptible(&runtime->oss.params_lock))
+			return -ERESTARTSYS;
 		if (trigger & PCM_ENABLE_OUTPUT) {
 			if (runtime->oss.trigger)
 				goto _skip1;
@@ -2030,13 +2134,19 @@
 			cmd = SNDRV_PCM_IOCTL_DROP;
 			runtime->oss.prepare = 1;
 		}
-		err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
-		if (err < 0)
-			return err;
-	}
  _skip1:
+		mutex_unlock(&runtime->oss.params_lock);
+		if (cmd) {
+			err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
+			if (err < 0)
+				return err;
+		}
+	}
 	if (csubstream) {
       		runtime = csubstream->runtime;
+		cmd = 0;
+		if (mutex_lock_interruptible(&runtime->oss.params_lock))
+			return -ERESTARTSYS;
 		if (trigger & PCM_ENABLE_INPUT) {
 			if (runtime->oss.trigger)
 				goto _skip2;
@@ -2051,11 +2161,14 @@
 			cmd = SNDRV_PCM_IOCTL_DROP;
 			runtime->oss.prepare = 1;
 		}
-		err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
-		if (err < 0)
-			return err;
-	}
  _skip2:
+		mutex_unlock(&runtime->oss.params_lock);
+		if (cmd) {
+			err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
+			if (err < 0)
+				return err;
+		}
+	}
 	return 0;
 }
 
@@ -2307,6 +2420,7 @@
 	runtime->oss.maxfrags = 0;
 	runtime->oss.subdivision = 0;
 	substream->pcm_release = snd_pcm_oss_release_substream;
+	atomic_set(&runtime->oss.rw_ref, 0);
 }
 
 static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 48f6aee..d79a04e 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -28,6 +28,7 @@
 #include <sound/core.h>
 #include <sound/minors.h>
 #include <sound/pcm.h>
+#include <sound/timer.h>
 #include <sound/control.h>
 #include <sound/info.h>
 
@@ -1036,8 +1037,13 @@
 	snd_free_pages((void*)runtime->control,
 		       PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
 	kfree(runtime->hw_constraints.rules);
-	kfree(runtime);
+	/* Avoid concurrent access to runtime via PCM timer interface */
+	if (substream->timer)
+		spin_lock_irq(&substream->timer->lock);
 	substream->runtime = NULL;
+	if (substream->timer)
+		spin_unlock_irq(&substream->timer->lock);
+	kfree(runtime);
 	put_pid(substream->pid);
 	substream->pid = NULL;
 	substream->pstr->substream_opened--;
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index e1512ae..0c81e26 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -426,6 +426,8 @@
 		return -ENOTTY;
 	if (substream->stream != dir)
 		return -EINVAL;
+	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
+		return -EBADFD;
 
 	if ((ch = substream->runtime->channels) > 128)
 		return -EINVAL;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 8e5649a..2df7e6b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2765,6 +2765,7 @@
 	sync_ptr.s.status.hw_ptr = status->hw_ptr;
 	sync_ptr.s.status.tstamp = status->tstamp;
 	sync_ptr.s.status.suspended_state = status->suspended_state;
+	sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
 	snd_pcm_stream_unlock_irq(substream);
 	if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
 		return -EFAULT;
@@ -3460,7 +3461,7 @@
 					 area,
 					 substream->runtime->dma_area,
 					 substream->runtime->dma_addr,
-					 area->vm_end - area->vm_start);
+					 substream->runtime->dma_bytes);
 #endif /* CONFIG_X86 */
 	/* mmap with fault handler */
 	area->vm_ops = &snd_pcm_vm_ops_data_fault;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 5143801..180261d 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -976,9 +976,9 @@
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
 	unsigned long appl_ptr;
 
-	spin_lock_irqsave(&runtime->lock, flags);
 	if (userbuf)
 		mutex_lock(&runtime->realloc_mutex);
+	spin_lock_irqsave(&runtime->lock, flags);
 	while (count > 0 && runtime->avail) {
 		count1 = runtime->buffer_size - runtime->appl_ptr;
 		if (count1 > count)
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
index f69764d..e30e30b 100644
--- a/sound/core/rawmidi_compat.c
+++ b/sound/core/rawmidi_compat.c
@@ -36,8 +36,6 @@
 	struct snd_rawmidi_params params;
 	unsigned int val;
 
-	if (rfile->output == NULL)
-		return -EINVAL;
 	if (get_user(params.stream, &src->stream) ||
 	    get_user(params.buffer_size, &src->buffer_size) ||
 	    get_user(params.avail_min, &src->avail_min) ||
@@ -46,8 +44,12 @@
 	params.no_active_sensing = val;
 	switch (params.stream) {
 	case SNDRV_RAWMIDI_STREAM_OUTPUT:
+		if (!rfile->output)
+			return -EINVAL;
 		return snd_rawmidi_output_params(rfile->output, &params);
 	case SNDRV_RAWMIDI_STREAM_INPUT:
+		if (!rfile->input)
+			return -EINVAL;
 		return snd_rawmidi_input_params(rfile->input, &params);
 	}
 	return -EINVAL;
@@ -67,16 +69,18 @@
 	int err;
 	struct snd_rawmidi_status status;
 
-	if (rfile->output == NULL)
-		return -EINVAL;
 	if (get_user(status.stream, &src->stream))
 		return -EFAULT;
 
 	switch (status.stream) {
 	case SNDRV_RAWMIDI_STREAM_OUTPUT:
+		if (!rfile->output)
+			return -EINVAL;
 		err = snd_rawmidi_output_status(rfile->output, &status);
 		break;
 	case SNDRV_RAWMIDI_STREAM_INPUT:
+		if (!rfile->input)
+			return -EINVAL;
 		err = snd_rawmidi_input_status(rfile->input, &status);
 		break;
 	default:
@@ -112,16 +116,18 @@
 	int err;
 	struct snd_rawmidi_status status;
 
-	if (rfile->output == NULL)
-		return -EINVAL;
 	if (get_user(status.stream, &src->stream))
 		return -EFAULT;
 
 	switch (status.stream) {
 	case SNDRV_RAWMIDI_STREAM_OUTPUT:
+		if (!rfile->output)
+			return -EINVAL;
 		err = snd_rawmidi_output_status(rfile->output, &status);
 		break;
 	case SNDRV_RAWMIDI_STREAM_INPUT:
+		if (!rfile->input)
+			return -EINVAL;
 		err = snd_rawmidi_input_status(rfile->input, &status);
 		break;
 	default:
diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
index c390886..86ca584 100644
--- a/sound/core/seq/oss/seq_oss_event.c
+++ b/sound/core/seq/oss/seq_oss_event.c
@@ -26,6 +26,7 @@
 #include <sound/seq_oss_legacy.h>
 #include "seq_oss_readq.h"
 #include "seq_oss_writeq.h"
+#include <linux/nospec.h>
 
 
 /*
@@ -287,10 +288,10 @@
 {
 	struct seq_oss_synthinfo *info;
 
-	if (!snd_seq_oss_synth_is_valid(dp, dev))
+	info = snd_seq_oss_synth_info(dp, dev);
+	if (!info)
 		return -ENXIO;
 
-	info = &dp->synths[dev];
 	switch (info->arg.event_passing) {
 	case SNDRV_SEQ_OSS_PROCESS_EVENTS:
 		if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -298,6 +299,7 @@
 			return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
 		}
 
+		ch = array_index_nospec(ch, info->nr_voices);
 		if (note == 255 && info->ch[ch].note >= 0) {
 			/* volume control */
 			int type;
@@ -347,10 +349,10 @@
 {
 	struct seq_oss_synthinfo *info;
 
-	if (!snd_seq_oss_synth_is_valid(dp, dev))
+	info = snd_seq_oss_synth_info(dp, dev);
+	if (!info)
 		return -ENXIO;
 
-	info = &dp->synths[dev];
 	switch (info->arg.event_passing) {
 	case SNDRV_SEQ_OSS_PROCESS_EVENTS:
 		if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -358,6 +360,7 @@
 			return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
 		}
 
+		ch = array_index_nospec(ch, info->nr_voices);
 		if (info->ch[ch].note >= 0) {
 			note = info->ch[ch].note;
 			info->ch[ch].vel = 0;
@@ -381,7 +384,7 @@
 static int
 set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
 {
-	if (! snd_seq_oss_synth_is_valid(dp, dev))
+	if (!snd_seq_oss_synth_info(dp, dev))
 		return -ENXIO;
 	
 	ev->type = type;
@@ -399,7 +402,7 @@
 static int
 set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
 {
-	if (! snd_seq_oss_synth_is_valid(dp, dev))
+	if (!snd_seq_oss_synth_info(dp, dev))
 		return -ENXIO;
 	
 	ev->type = type;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index b30b213..9debd1b 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -29,6 +29,7 @@
 #include "../seq_lock.h"
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 
 /*
@@ -315,6 +316,7 @@
 {
 	if (dev < 0 || dev >= dp->max_mididev)
 		return NULL;
+	dev = array_index_nospec(dev, dp->max_mididev);
 	return get_mdev(dev);
 }
 
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index cd0e0eb..278ebb9 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -26,6 +26,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 /*
  * constants
@@ -339,17 +340,13 @@
 	dp->max_synthdev = 0;
 }
 
-/*
- * check if the specified device is MIDI mapped device
- */
-static int
-is_midi_dev(struct seq_oss_devinfo *dp, int dev)
+static struct seq_oss_synthinfo *
+get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
 {
 	if (dev < 0 || dev >= dp->max_synthdev)
-		return 0;
-	if (dp->synths[dev].is_midi)
-		return 1;
-	return 0;
+		return NULL;
+	dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
+	return &dp->synths[dev];
 }
 
 /*
@@ -359,14 +356,20 @@
 get_synthdev(struct seq_oss_devinfo *dp, int dev)
 {
 	struct seq_oss_synth *rec;
-	if (dev < 0 || dev >= dp->max_synthdev)
+	struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
+
+	if (!info)
 		return NULL;
-	if (! dp->synths[dev].opened)
+	if (!info->opened)
 		return NULL;
-	if (dp->synths[dev].is_midi)
-		return &midi_synth_dev;
-	if ((rec = get_sdev(dev)) == NULL)
-		return NULL;
+	if (info->is_midi) {
+		rec = &midi_synth_dev;
+		snd_use_lock_use(&rec->use_lock);
+	} else {
+		rec = get_sdev(dev);
+		if (!rec)
+			return NULL;
+	}
 	if (! rec->opened) {
 		snd_use_lock_free(&rec->use_lock);
 		return NULL;
@@ -402,10 +405,8 @@
 	struct seq_oss_synth *rec;
 	struct seq_oss_synthinfo *info;
 
-	if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev))
-		return;
-	info = &dp->synths[dev];
-	if (! info->opened)
+	info = get_synthinfo_nospec(dp, dev);
+	if (!info || !info->opened)
 		return;
 	if (info->sysex)
 		info->sysex->len = 0; /* reset sysex */
@@ -454,12 +455,14 @@
 			    const char __user *buf, int p, int c)
 {
 	struct seq_oss_synth *rec;
+	struct seq_oss_synthinfo *info;
 	int rc;
 
-	if (dev < 0 || dev >= dp->max_synthdev)
+	info = get_synthinfo_nospec(dp, dev);
+	if (!info)
 		return -ENXIO;
 
-	if (is_midi_dev(dp, dev))
+	if (info->is_midi)
 		return 0;
 	if ((rec = get_synthdev(dp, dev)) == NULL)
 		return -ENXIO;
@@ -467,24 +470,25 @@
 	if (rec->oper.load_patch == NULL)
 		rc = -ENXIO;
 	else
-		rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c);
+		rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
 	snd_use_lock_free(&rec->use_lock);
 	return rc;
 }
 
 /*
- * check if the device is valid synth device
+ * check if the device is valid synth device and return the synth info
  */
-int
-snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev)
+struct seq_oss_synthinfo *
+snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
 {
 	struct seq_oss_synth *rec;
+
 	rec = get_synthdev(dp, dev);
 	if (rec) {
 		snd_use_lock_free(&rec->use_lock);
-		return 1;
+		return get_synthinfo_nospec(dp, dev);
 	}
-	return 0;
+	return NULL;
 }
 
 
@@ -499,16 +503,18 @@
 	int i, send;
 	unsigned char *dest;
 	struct seq_oss_synth_sysex *sysex;
+	struct seq_oss_synthinfo *info;
 
-	if (! snd_seq_oss_synth_is_valid(dp, dev))
+	info = snd_seq_oss_synth_info(dp, dev);
+	if (!info)
 		return -ENXIO;
 
-	sysex = dp->synths[dev].sysex;
+	sysex = info->sysex;
 	if (sysex == NULL) {
 		sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
 		if (sysex == NULL)
 			return -ENOMEM;
-		dp->synths[dev].sysex = sysex;
+		info->sysex = sysex;
 	}
 
 	send = 0;
@@ -553,10 +559,12 @@
 int
 snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
 {
-	if (! snd_seq_oss_synth_is_valid(dp, dev))
+	struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
+
+	if (!info)
 		return -EINVAL;
-	snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client,
-			      dp->synths[dev].arg.addr.port);
+	snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
+			      info->arg.addr.port);
 	return 0;
 }
 
@@ -568,16 +576,18 @@
 snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
 {
 	struct seq_oss_synth *rec;
+	struct seq_oss_synthinfo *info;
 	int rc;
 
-	if (is_midi_dev(dp, dev))
+	info = get_synthinfo_nospec(dp, dev);
+	if (!info || info->is_midi)
 		return -ENXIO;
 	if ((rec = get_synthdev(dp, dev)) == NULL)
 		return -ENXIO;
 	if (rec->oper.ioctl == NULL)
 		rc = -ENXIO;
 	else
-		rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr);
+		rc = rec->oper.ioctl(&info->arg, cmd, addr);
 	snd_use_lock_free(&rec->use_lock);
 	return rc;
 }
@@ -589,7 +599,10 @@
 int
 snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
 {
-	if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev))
+	struct seq_oss_synthinfo *info;
+
+	info = snd_seq_oss_synth_info(dp, dev);
+	if (!info || info->is_midi)
 		return -ENXIO;
 	ev->type = SNDRV_SEQ_EVENT_OSS;
 	memcpy(ev->data.raw8.d, data, 8);
diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
index 74ac55f..a63f9e2 100644
--- a/sound/core/seq/oss/seq_oss_synth.h
+++ b/sound/core/seq/oss/seq_oss_synth.h
@@ -37,7 +37,8 @@
 void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
 int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
 				 const char __user *buf, int p, int c);
-int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev);
+struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
+						 int dev);
 int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
 			    struct snd_seq_event *ev);
 int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 2007649..8bdc4c9 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -174,12 +174,12 @@
 			}
 			return;
 		}
+		spin_lock_irqsave(&substream->runtime->lock, flags);
 		if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
 			if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
-				return;
+				goto out;
 			vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
 		}
-		spin_lock_irqsave(&substream->runtime->lock, flags);
 		while (1) {
 			count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
 			if (count <= 0)
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 7622551..d3b6260 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -547,7 +547,7 @@
 	else
 		timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
 	snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
-			  SNDRV_TIMER_EVENT_CONTINUE);
+			  SNDRV_TIMER_EVENT_PAUSE);
  unlock:
 	spin_unlock_irqrestore(&timer->lock, flags);
 	return result;
@@ -569,7 +569,7 @@
 		list_del_init(&timeri->ack_list);
 		list_del_init(&timeri->active_list);
 		snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
-				  SNDRV_TIMER_EVENT_CONTINUE);
+				  SNDRV_TIMER_EVENT_PAUSE);
 		spin_unlock(&timeri->timer->lock);
 	}
 	spin_unlock_irqrestore(&slave_active_lock, flags);
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 6c58e6f..7c6ef87 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -68,10 +68,13 @@
 		return -ENOMEM;
 	uctl->id = slave->slave.id;
 	err = slave->slave.get(&slave->slave, uctl);
+	if (err < 0)
+		goto error;
 	for (ch = 0; ch < slave->info.count; ch++)
 		slave->vals[ch] = uctl->value.integer.value[ch];
+ error:
 	kfree(uctl);
-	return 0;
+	return err < 0 ? err : 0;
 }
 
 /* get the slave ctl info and save the initial values */
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index dc91002..847f703 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -296,6 +296,8 @@
 		cable->pause |= stream;
 		loopback_timer_stop(dpcm);
 		spin_unlock(&cable->lock);
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			loopback_active_notify(dpcm);
 		break;
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 	case SNDRV_PCM_TRIGGER_RESUME:
@@ -304,6 +306,8 @@
 		cable->pause &= ~stream;
 		loopback_timer_start(dpcm);
 		spin_unlock(&cable->lock);
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			loopback_active_notify(dpcm);
 		break;
 	default:
 		return -EINVAL;
@@ -828,9 +832,11 @@
 {
 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
 	
+	mutex_lock(&loopback->cable_lock);
 	ucontrol->value.integer.value[0] =
 		loopback->setup[kcontrol->id.subdevice]
 			       [kcontrol->id.device].rate_shift;
+	mutex_unlock(&loopback->cable_lock);
 	return 0;
 }
 
@@ -862,9 +868,11 @@
 {
 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
 	
+	mutex_lock(&loopback->cable_lock);
 	ucontrol->value.integer.value[0] =
 		loopback->setup[kcontrol->id.subdevice]
 			       [kcontrol->id.device].notify;
+	mutex_unlock(&loopback->cable_lock);
 	return 0;
 }
 
@@ -876,12 +884,14 @@
 	int change = 0;
 
 	val = ucontrol->value.integer.value[0] ? 1 : 0;
+	mutex_lock(&loopback->cable_lock);
 	if (val != loopback->setup[kcontrol->id.subdevice]
 				[kcontrol->id.device].notify) {
 		loopback->setup[kcontrol->id.subdevice]
 			[kcontrol->id.device].notify = val;
 		change = 1;
 	}
+	mutex_unlock(&loopback->cable_lock);
 	return change;
 }
 
@@ -889,13 +899,18 @@
 			       struct snd_ctl_elem_value *ucontrol)
 {
 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
-	struct loopback_cable *cable = loopback->cables
-			[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
+	struct loopback_cable *cable;
+
 	unsigned int val = 0;
 
-	if (cable != NULL)
-		val = (cable->running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
-									1 : 0;
+	mutex_lock(&loopback->cable_lock);
+	cable = loopback->cables[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
+	if (cable != NULL) {
+		unsigned int running = cable->running ^ cable->pause;
+
+		val = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ? 1 : 0;
+	}
+	mutex_unlock(&loopback->cable_lock);
 	ucontrol->value.integer.value[0] = val;
 	return 0;
 }
@@ -938,9 +953,11 @@
 {
 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
 	
+	mutex_lock(&loopback->cable_lock);
 	ucontrol->value.integer.value[0] =
 		loopback->setup[kcontrol->id.subdevice]
 			       [kcontrol->id.device].rate;
+	mutex_unlock(&loopback->cable_lock);
 	return 0;
 }
 
@@ -960,9 +977,11 @@
 {
 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
 	
+	mutex_lock(&loopback->cable_lock);
 	ucontrol->value.integer.value[0] =
 		loopback->setup[kcontrol->id.subdevice]
 			       [kcontrol->id.device].channels;
+	mutex_unlock(&loopback->cable_lock);
 	return 0;
 }
 
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index ddcc1a3..42920a2 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -21,6 +21,7 @@
 
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/nospec.h>
 #include <sound/opl3.h>
 #include <sound/asound_fm.h>
 
@@ -448,7 +449,7 @@
 {
 	unsigned short reg_side;
 	unsigned char op_offset;
-	unsigned char voice_offset;
+	unsigned char voice_offset, voice_op;
 
 	unsigned short opl3_reg;
 	unsigned char reg_val;
@@ -473,7 +474,9 @@
 		voice_offset = voice->voice - MAX_OPL2_VOICES;
 	}
 	/* Get register offset of operator */
-	op_offset = snd_opl3_regmap[voice_offset][voice->op];
+	voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
+	voice_op = array_index_nospec(voice->op, 4);
+	op_offset = snd_opl3_regmap[voice_offset][voice_op];
 
 	reg_val = 0x00;
 	/* Set amplitude modulation (tremolo) effect */
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index ec4db3a..257cfbf 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -425,7 +425,7 @@
 		err = init_stream(dice, AMDTP_IN_STREAM, i);
 		if (err < 0) {
 			for (; i >= 0; i--)
-				destroy_stream(dice, AMDTP_OUT_STREAM, i);
+				destroy_stream(dice, AMDTP_IN_STREAM, i);
 			goto end;
 		}
 	}
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 25e9f77..0d3d36f 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -14,7 +14,7 @@
 #define OUI_WEISS		0x001c6a
 #define OUI_LOUD		0x000ff2
 #define OUI_FOCUSRITE		0x00130e
-#define OUI_TCELECTRONIC	0x001486
+#define OUI_TCELECTRONIC	0x000166
 
 #define DICE_CATEGORY_ID	0x04
 #define WEISS_CATEGORY_ID	0x00
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 7eb6171..a31a70d 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -23,6 +23,7 @@
 
 #include "hpi_internal.h"
 #include "hpimsginit.h"
+#include <linux/nospec.h>
 
 /* The actual message size for each object type */
 static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
@@ -39,10 +40,12 @@
 {
 	u16 size;
 
-	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+		object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
 		size = msg_size[object];
-	else
+	} else {
 		size = sizeof(*phm);
+	}
 
 	memset(phm, 0, size);
 	phm->size = size;
@@ -66,10 +69,12 @@
 {
 	u16 size;
 
-	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+		object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
 		size = res_size[object];
-	else
+	} else {
 		size = sizeof(*phr);
+	}
 
 	memset(phr, 0, sizeof(*phr));
 	phr->size = size;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 7e3aa50..3ef9af5 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -33,6 +33,7 @@
 #include <linux/stringify.h>
 #include <linux/module.h>
 #include <linux/vmalloc.h>
+#include <linux/nospec.h>
 
 #ifdef MODULE_FIRMWARE
 MODULE_FIRMWARE("asihpi/dsp5000.bin");
@@ -182,7 +183,8 @@
 		struct hpi_adapter *pa = NULL;
 
 		if (hm->h.adapter_index < ARRAY_SIZE(adapters))
-			pa = &adapters[hm->h.adapter_index];
+			pa = &adapters[array_index_nospec(hm->h.adapter_index,
+							  ARRAY_SIZE(adapters))];
 
 		if (!pa || !pa->adapter || !pa->adapter->type) {
 			hpi_init_response(&hr->r0, hm->h.object,
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 7f3b5ed..f7a492c 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -88,7 +88,6 @@
 config SND_HDA_CODEC_REALTEK
 	tristate "Build Realtek HD-audio codec support"
 	select SND_HDA_GENERIC
-	select INPUT
 	help
 	  Say Y or M here to include Realtek HD-audio codec support in
 	  snd-hda-intel driver, such as ALC880.
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 57df06e..cc009a4 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/compat.h>
+#include <linux/nospec.h>
 #include <sound/core.h>
 #include "hda_codec.h"
 #include "hda_local.h"
@@ -51,7 +52,16 @@
 	
 	if (get_user(verb, &arg->verb))
 		return -EFAULT;
-	res = get_wcaps(codec, verb >> 24);
+	/* open-code get_wcaps(verb>>24) with nospec */
+	verb >>= 24;
+	if (verb < codec->core.start_nid ||
+	    verb >= codec->core.start_nid + codec->core.num_nodes) {
+		res = 0;
+	} else {
+		verb -= codec->core.start_nid;
+		verb = array_index_nospec(verb, codec->core.num_nodes);
+		res = codec->wcaps[verb];
+	}
 	if (put_user(res, &arg->res))
 		return -EFAULT;
 	return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 733b342..4e91120 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1514,7 +1514,8 @@
 		 */
 		u8 val;
 		pci_read_config_byte(chip->pci, 0x42, &val);
-		if (!(val & 0x80) && chip->pci->revision == 0x30)
+		if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
+				      chip->pci->revision == 0x20))
 			snoop = false;
 	}
 
@@ -2060,6 +2061,8 @@
 	SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
 	SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
+	/* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+	SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
 	/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
 	SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
 	{}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e2230be..39cd35f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@
 		break;
 	case 0x10ec0225:
 	case 0x10ec0233:
+	case 0x10ec0235:
 	case 0x10ec0236:
 	case 0x10ec0255:
 	case 0x10ec0256:
@@ -3494,6 +3495,7 @@
 	}
 }
 
+#if IS_REACHABLE(INPUT)
 static void gpio2_mic_hotkey_event(struct hda_codec *codec,
 				   struct hda_jack_callback *event)
 {
@@ -3626,6 +3628,10 @@
 		spec->kb_dev = NULL;
 	}
 }
+#else /* INPUT */
+#define alc280_fixup_hp_gpio2_mic_hotkey	NULL
+#define alc233_fixup_lenovo_line2_mic_hotkey	NULL
+#endif /* INPUT */
 
 static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
 				const struct hda_fixup *fix, int action)
@@ -6359,6 +6365,7 @@
 	case 0x10ec0298:
 		spec->codec_variant = ALC269_TYPE_ALC298;
 		break;
+	case 0x10ec0235:
 	case 0x10ec0255:
 		spec->codec_variant = ALC269_TYPE_ALC255;
 		break;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 14bbf55..9899ef4 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -137,6 +137,7 @@
 #include <linux/pci.h>
 #include <linux/math64.h>
 #include <linux/io.h>
+#include <linux/nospec.h>
 
 #include <sound/core.h>
 #include <sound/control.h>
@@ -5692,40 +5693,43 @@
 		struct snd_pcm_channel_info *info)
 {
 	struct hdspm *hdspm = snd_pcm_substream_chip(substream);
+	unsigned int channel = info->channel;
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) {
+		if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
 			dev_info(hdspm->card->dev,
 				 "snd_hdspm_channel_info: output channel out of range (%d)\n",
-				 info->channel);
+				 channel);
 			return -EINVAL;
 		}
 
-		if (hdspm->channel_map_out[info->channel] < 0) {
+		channel = array_index_nospec(channel, hdspm->max_channels_out);
+		if (hdspm->channel_map_out[channel] < 0) {
 			dev_info(hdspm->card->dev,
 				 "snd_hdspm_channel_info: output channel %d mapped out\n",
-				 info->channel);
+				 channel);
 			return -EINVAL;
 		}
 
-		info->offset = hdspm->channel_map_out[info->channel] *
+		info->offset = hdspm->channel_map_out[channel] *
 			HDSPM_CHANNEL_BUFFER_BYTES;
 	} else {
-		if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) {
+		if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
 			dev_info(hdspm->card->dev,
 				 "snd_hdspm_channel_info: input channel out of range (%d)\n",
-				 info->channel);
+				 channel);
 			return -EINVAL;
 		}
 
-		if (hdspm->channel_map_in[info->channel] < 0) {
+		channel = array_index_nospec(channel, hdspm->max_channels_in);
+		if (hdspm->channel_map_in[channel] < 0) {
 			dev_info(hdspm->card->dev,
 				 "snd_hdspm_channel_info: input channel %d mapped out\n",
-				 info->channel);
+				 channel);
 			return -EINVAL;
 		}
 
-		info->offset = hdspm->channel_map_in[info->channel] *
+		info->offset = hdspm->channel_map_in[channel] *
 			HDSPM_CHANNEL_BUFFER_BYTES;
 	}
 
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 55172c6..a76b1f1 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -26,6 +26,7 @@
 #include <linux/pci.h>
 #include <linux/module.h>
 #include <linux/io.h>
+#include <linux/nospec.h>
 
 #include <sound/core.h>
 #include <sound/control.h>
@@ -2036,9 +2037,10 @@
 	if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
 		return -EINVAL;
 
-	if ((chn = rme9652->channel_map[info->channel]) < 0) {
+	chn = rme9652->channel_map[array_index_nospec(info->channel,
+						      RME9652_NCHANNELS)];
+	if (chn < 0)
 		return -EINVAL;
-	}
 
 	info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
 	info->first = 0;
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index 29a97d5..66d6c52 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -91,8 +91,8 @@
 	do {
 		mutex_lock(&ctx->lock);
 
-		tmo = 5;
-		while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+		tmo = 6;
+		while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
 			udelay(21);	/* wait an ac97 frame time */
 		if (!tmo) {
 			pr_debug("ac97rd timeout #1\n");
@@ -105,7 +105,7 @@
 		 * poll, Forrest, poll...
 		 */
 		tmo = 0x10000;
-		while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+		while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
 			asm volatile ("nop");
 		data = RD(ctx, AC97_CMDRESP);
 
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 993bde2..7693e63 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -54,10 +54,17 @@
  * using 2 wire for device control, so we cache them instead.
  * There is no point in caching the reset register
  */
-static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = {
-	0x0097, 0x0097, 0x0079, 0x0079,
-	0x000a, 0x0008, 0x009f, 0x000a,
-	0x0000, 0x0000
+static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
+	{ .reg = 0x00, .def = 0x0097 },
+	{ .reg = 0x01, .def = 0x0097 },
+	{ .reg = 0x02, .def = 0x0079 },
+	{ .reg = 0x03, .def = 0x0079 },
+	{ .reg = 0x04, .def = 0x000a },
+	{ .reg = 0x05, .def = 0x0008 },
+	{ .reg = 0x06, .def = 0x009f },
+	{ .reg = 0x07, .def = 0x000a },
+	{ .reg = 0x08, .def = 0x0000 },
+	{ .reg = 0x09, .def = 0x0000 }
 };
 
 
@@ -620,8 +627,8 @@
 	.volatile_reg = ssm2602_register_volatile,
 
 	.cache_type = REGCACHE_RBTREE,
-	.reg_defaults_raw = ssm2602_reg,
-	.num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg),
+	.reg_defaults = ssm2602_reg,
+	.num_reg_defaults = ARRAY_SIZE(ssm2602_reg),
 };
 EXPORT_SYMBOL_GPL(ssm2602_regmap_config);
 
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 38bfd46..3ef1745 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -145,6 +145,13 @@
 
 	psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
 
+	/* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
+	if (ratio <= 256) {
+		pm = ratio;
+		fp = 1;
+		goto out;
+	}
+
 	/* Set the max fluctuation -- 0.1% of the max devisor */
 	savesub = (psr ? 1 : 8)  * 256 * maxfp / 1000;
 
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index dd88c2c..48804e4 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -201,7 +201,7 @@
 	if (ret < 0)
 		return ret;
 
-	ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX);
+	ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
 	if (ret < 0)
 		return ret;
 
diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
index 4ccc80e..c798f8d 100644
--- a/sound/soc/intel/atom/sst/sst_stream.c
+++ b/sound/soc/intel/atom/sst/sst_stream.c
@@ -221,7 +221,7 @@
 		sst_free_block(sst_drv_ctx, block);
 out:
 	test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
-	return 0;
+	return ret;
 }
 
 /*
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 9052561..b1eb696 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -111,6 +111,7 @@
 	SND_SOC_DAPM_HP("Headphone", NULL),
 	SND_SOC_DAPM_MIC("Headset Mic", NULL),
 	SND_SOC_DAPM_MIC("Int Mic", NULL),
+	SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
 	SND_SOC_DAPM_SPK("Ext Spk", NULL),
 	SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
 			platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
@@ -121,6 +122,8 @@
 	{"IN1N", NULL, "Headset Mic"},
 	{"DMIC L1", NULL, "Int Mic"},
 	{"DMIC R1", NULL, "Int Mic"},
+	{"IN2P", NULL, "Int Analog Mic"},
+	{"IN2N", NULL, "Int Analog Mic"},
 	{"Headphone", NULL, "HPOL"},
 	{"Headphone", NULL, "HPOR"},
 	{"Ext Spk", NULL, "SPOL"},
@@ -134,6 +137,9 @@
 	{"Headphone", NULL, "Platform Clock"},
 	{"Headset Mic", NULL, "Platform Clock"},
 	{"Int Mic", NULL, "Platform Clock"},
+	{"Int Analog Mic", NULL, "Platform Clock"},
+	{"Int Analog Mic", NULL, "micbias1"},
+	{"Int Analog Mic", NULL, "micbias2"},
 	{"Ext Spk", NULL, "Platform Clock"},
 };
 
@@ -162,6 +168,7 @@
 	SOC_DAPM_PIN_SWITCH("Headphone"),
 	SOC_DAPM_PIN_SWITCH("Headset Mic"),
 	SOC_DAPM_PIN_SWITCH("Int Mic"),
+	SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
 	SOC_DAPM_PIN_SWITCH("Ext Spk"),
 };
 
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 805b7f2..78472c9 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -331,7 +331,11 @@
 	if (skl->skl_sst->is_first_boot == true)
 		return 0;
 
+	/* disable dynamic clock gating during fw and lib download */
+	ctx->enable_miscbdcge(ctx->dev, false);
+
 	ret = skl_dsp_wake(ctx->dsp);
+	ctx->enable_miscbdcge(ctx->dev, true);
 	if (ret < 0)
 		return ret;
 
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 58c7286..2fd213c 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -1191,7 +1191,11 @@
 			return -EIO;
 		}
 
+		/* disable dynamic clock gating during fw and lib download */
+		skl->skl_sst->enable_miscbdcge(platform->dev, false);
+
 		ret = ops->init_fw(platform->dev, skl->skl_sst);
+		skl->skl_sst->enable_miscbdcge(platform->dev, true);
 		if (ret < 0) {
 			dev_err(platform->dev, "Failed to boot first fw: %d\n", ret);
 			return ret;
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 85324e6..2d14e37 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -642,8 +642,12 @@
 		tmp |= mod_slave;
 		break;
 	case SND_SOC_DAIFMT_CBS_CFS:
-		/* Set default source clock in Master mode */
-		if (i2s->rclk_srcrate == 0)
+		/*
+		 * Set default source clock in Master mode, only when the
+		 * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
+		 * clock configuration assigned in DT is not overwritten.
+		 */
+		if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL)
 			i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
 							0, SND_SOC_CLOCK_IN);
 		break;
@@ -858,6 +862,11 @@
 		return 0;
 
 	if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
+		struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
+
+		if (i2s->rclk_srcrate == 0 && rclksrc && !IS_ERR(rclksrc))
+			i2s->rclk_srcrate = clk_get_rate(rclksrc);
+
 		psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
 		writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
 		dev_dbg(&i2s->pdev->dev,
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index fefa6ad..17e305b 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -552,6 +552,13 @@
 		struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 		u32 *buf = (u32 *)(runtime->dma_area +
 				   rsnd_dai_pointer_offset(io, 0));
+		int shift = 0;
+
+		switch (runtime->sample_bits) {
+		case 32:
+			shift = 8;
+			break;
+		}
 
 		/*
 		 * 8/16/32 data can be assesse to TDR/RDR register
@@ -559,9 +566,9 @@
 		 * see rsnd_ssi_init()
 		 */
 		if (rsnd_io_is_play(io))
-			rsnd_mod_write(mod, SSITDR, *buf);
+			rsnd_mod_write(mod, SSITDR, (*buf) << shift);
 		else
-			*buf = rsnd_mod_read(mod, SSIRDR);
+			*buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
 
 		elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
 	}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index b761761..2840a14 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1073,7 +1073,8 @@
 	}
 	if (!rtd->platform) {
 		dev_err(card->dev, "ASoC: platform %s not registered\n",
-			dai_link->platform_name);
+			((platform_name) ? platform_name :
+			  dai_link->platform_of_node->full_name));
 		goto _err_defer;
 	}
 
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 8a758c9..d6b48c7 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1180,6 +1180,9 @@
 			kfree(sm);
 			continue;
 		}
+
+		/* create any TLV data */
+		soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr);
 	}
 	return kc;
 
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
index d0fb2f2..4f4ebe9 100644
--- a/sound/usb/line6/midi.c
+++ b/sound/usb/line6/midi.c
@@ -125,7 +125,7 @@
 	}
 
 	usb_fill_int_urb(urb, line6->usbdev,
-			 usb_sndbulkpipe(line6->usbdev,
+			 usb_sndintpipe(line6->usbdev,
 					 line6->properties->ep_ctrl_w),
 			 transfer_buffer, length, midi_sent, line6,
 			 line6->interval);
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index ce11cc9..1c5d099 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -975,6 +975,14 @@
 		}
 		break;
 
+	case USB_ID(0x0d8c, 0x0103):
+		if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
+			usb_audio_info(chip,
+				 "set volume quirk for CM102-A+/102S+\n");
+			cval->min = -256;
+		}
+		break;
+
 	case USB_ID(0x0471, 0x0101):
 	case USB_ID(0x0471, 0x0104):
 	case USB_ID(0x0471, 0x0105):
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 9038b2e..eaa03ac 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -353,8 +353,11 @@
 /*
  * Dell usb dock with ALC4020 codec had a firmware problem where it got
  * screwed up when zero volume is passed; just skip it as a workaround
+ *
+ * Also the extension unit gives an access error, so skip it as well.
  */
 static const struct usbmix_name_map dell_alc4020_map[] = {
+	{ 4, NULL },	/* extension unit */
 	{ 16, NULL },
 	{ 19, NULL },
 	{ 0 }
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 1cd7f8b..da9fc08 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1153,28 +1153,32 @@
 	return false;
 }
 
-/* Marantz/Denon USB DACs need a vendor cmd to switch
+/* ITF-USB DSD based DACs need a vendor cmd to switch
  * between PCM and native DSD mode
+ * (2 altsets version)
  */
-static bool is_marantz_denon_dac(unsigned int id)
+static bool is_itf_usb_dsd_2alts_dac(unsigned int id)
 {
 	switch (id) {
 	case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
 	case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
 	case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
+	case USB_ID(0x1852, 0x5065): /* Luxman DA-06 */
 		return true;
 	}
 	return false;
 }
 
-/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
- * between PCM/DOP and native DSD mode
+/* ITF-USB DSD based DACs need a vendor cmd to switch
+ * between PCM and native DSD mode
+ * (3 altsets version)
  */
-static bool is_teac_dsd_dac(unsigned int id)
+static bool is_itf_usb_dsd_3alts_dac(unsigned int id)
 {
 	switch (id) {
 	case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
 	case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
+	case USB_ID(0x0644, 0x804a): /* TEAC UD-301 */
 		return true;
 	}
 	return false;
@@ -1186,7 +1190,7 @@
 	struct usb_device *dev = subs->dev;
 	int err;
 
-	if (is_marantz_denon_dac(subs->stream->chip->usb_id)) {
+	if (is_itf_usb_dsd_2alts_dac(subs->stream->chip->usb_id)) {
 		/* First switch to alt set 0, otherwise the mode switch cmd
 		 * will not be accepted by the DAC
 		 */
@@ -1207,7 +1211,7 @@
 			break;
 		}
 		mdelay(20);
-	} else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
+	} else if (is_itf_usb_dsd_3alts_dac(subs->stream->chip->usb_id)) {
 		/* Vendor mode switch cmd is required. */
 		switch (fmt->altsetting) {
 		case 3: /* DSD mode (DSD_U32) requested */
@@ -1303,10 +1307,10 @@
 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
 		mdelay(20);
 
-	/* Marantz/Denon devices with USB DAC functionality need a delay
+	/* ITF-USB DSD based DACs functionality need a delay
 	 * after each class compliant request
 	 */
-	if (is_marantz_denon_dac(chip->usb_id)
+	if (is_itf_usb_dsd_2alts_dac(chip->usb_id)
 	    && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
 		mdelay(20);
 
@@ -1370,14 +1374,14 @@
 		break;
 	}
 
-	/* Denon/Marantz devices with USB DAC functionality */
-	if (is_marantz_denon_dac(chip->usb_id)) {
+	/* ITF-USB DSD based DACs (2 altsets version) */
+	if (is_itf_usb_dsd_2alts_dac(chip->usb_id)) {
 		if (fp->altsetting == 2)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 	}
 
-	/* TEAC devices with USB DAC functionality */
-	if (is_teac_dsd_dac(chip->usb_id)) {
+	/* ITF-USB DSD based DACs (3 altsets version) */
+	if (is_itf_usb_dsd_3alts_dac(chip->usb_id)) {
 		if (fp->altsetting == 3)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 	}
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index a2b3eb3..0b8cf31 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -84,6 +84,13 @@
 #define KVM_VGIC_V2_DIST_SIZE		0x1000
 #define KVM_VGIC_V2_CPU_SIZE		0x2000
 
+/* Supported VGICv3 address types  */
+#define KVM_VGIC_V3_ADDR_TYPE_DIST	2
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST	3
+
+#define KVM_VGIC_V3_DIST_SIZE		SZ_64K
+#define KVM_VGIC_V3_REDIST_SIZE		(2 * SZ_64K)
+
 #define KVM_ARM_VCPU_POWER_OFF		0 /* CPU is started in OFF state */
 #define KVM_ARM_VCPU_PSCI_0_2		1 /* CPU uses PSCI v0.2 */
 
@@ -166,6 +173,12 @@
 #define KVM_REG_ARM_VFP_FPINST		0x1009
 #define KVM_REG_ARM_VFP_FPINST2		0x100A
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM | KVM_REG_SIZE_U64 | \
+					 KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 3051f86..702de7a 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -195,6 +195,12 @@
 #define KVM_REG_ARM_TIMER_CNT		ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL		ARM64_SYS_REG(3, 3, 14, 0, 2)
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+					 KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index c93cf35..0fb1326 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -596,6 +596,7 @@
 #define KVM_REG_PPC_TM_VSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
 #define KVM_REG_PPC_TM_DSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
 #define KVM_REG_PPC_TM_TAR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
 
 /* PPC64 eXternal Interrupt Controller Specification */
 #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index a2ffec4..81c02e1 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -197,6 +197,7 @@
 #define KVM_SYNC_VRS    (1UL << 6)
 #define KVM_SYNC_RICCB  (1UL << 7)
 #define KVM_SYNC_FPRS   (1UL << 8)
+#define KVM_SYNC_BPBC	(1UL << 10)
 /* definition of registers in kvm_run */
 struct kvm_sync_regs {
 	__u64 prefix;	/* prefix register */
@@ -217,7 +218,9 @@
 	};
 	__u8  reserved[512];	/* for future vector expansion */
 	__u32 fpc;		/* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
-	__u8 padding[52];	/* riccb needs to be 64byte aligned */
+	__u8 bpbc : 1;		/* bp mode */
+	__u8 reserved2 : 7;
+	__u8 padding1[51];	/* riccb needs to be 64byte aligned */
 	__u8 riccb[64];		/* runtime instrumentation controls block */
 };
 
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index f79669a..c278f27 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -12,7 +12,7 @@
 /*
  * Defines x86 CPU feature bits
  */
-#define NCAPINTS	18	/* N 32-bit words worth of info */
+#define NCAPINTS	19	/* N 32-bit words worth of info */
 #define NBUGINTS	1	/* N 32-bit bug flags */
 
 /*
@@ -189,17 +189,32 @@
 
 #define X86_FEATURE_CPB		( 7*32+ 2) /* AMD Core Performance Boost */
 #define X86_FEATURE_EPB		( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
 
 #define X86_FEATURE_HW_PSTATE	( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
-#define X86_FEATURE_INTEL_PT	( 7*32+15) /* Intel Processor Trace */
-#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_RETPOLINE	( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+
+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD	( 7*32+17) /* Speculative Store Bypass Disable */
+
+#define X86_FEATURE_RSB_CTXSW	( 7*32+19) /* "" Fill RSB on context switches */
 
 /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
 #define X86_FEATURE_KAISER	( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
 
+#define X86_FEATURE_USE_IBPB	( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+#define X86_FEATURE_USE_IBRS_FW	( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD	( 7*32+24) /* "" AMD SSBD implementation */
+#define X86_FEATURE_IBRS	( 7*32+25) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB	( 7*32+26) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP	( 7*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN		( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+
+
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
 #define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
@@ -231,6 +246,7 @@
 #define X86_FEATURE_SMAP	( 9*32+20) /* Supervisor Mode Access Prevention */
 #define X86_FEATURE_CLFLUSHOPT	( 9*32+23) /* CLFLUSHOPT instruction */
 #define X86_FEATURE_CLWB	( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_INTEL_PT	( 9*32+25) /* Intel Processor Trace */
 #define X86_FEATURE_AVX512PF	( 9*32+26) /* AVX-512 Prefetch */
 #define X86_FEATURE_AVX512ER	( 9*32+27) /* AVX-512 Exponential and Reciprocal */
 #define X86_FEATURE_AVX512CD	( 9*32+28) /* AVX-512 Conflict Detection */
@@ -255,6 +271,10 @@
 /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
 #define X86_FEATURE_CLZERO	(13*32+0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF	(13*32+1) /* Instructions Retired Count */
+#define X86_FEATURE_AMD_IBPB	(13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS	(13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP	(13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD	(13*32+25) /* Virtualized Speculative Store Bypass Disable */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
 #define X86_FEATURE_DTHERM	(14*32+ 0) /* Digital Thermal Sensor */
@@ -290,6 +310,16 @@
 #define X86_FEATURE_SUCCOR	(17*32+1) /* Uncorrectable error containment and recovery */
 #define X86_FEATURE_SMCA	(17*32+3) /* Scalable MCA */
 
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+#define X86_FEATURE_AVX512_4VNNIW	(18*32+ 2) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_PCONFIG		(18*32+18) /* Intel PCONFIG */
+#define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_SPEC_CTRL_SSBD	(18*32+31) /* "" Speculative Store Bypass Disable */
+
 /*
  * BUG word(s)
  */
@@ -314,4 +344,10 @@
 #define X86_BUG_NULL_SEG	X86_BUG(10) /* Nulling a selector preserves the base */
 #define X86_BUG_SWAPGS_FENCE	X86_BUG(11) /* SWAPGS without input dep on GS */
 #define X86_BUG_MONITOR		X86_BUG(12) /* IPI required to wake up remote CPU */
+#define X86_BUG_AMD_E400	X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_CPU_MELTDOWN	X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1	X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2	X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 85599ad..1f8cca4 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -21,11 +21,13 @@
 # define DISABLE_K6_MTRR	(1<<(X86_FEATURE_K6_MTRR & 31))
 # define DISABLE_CYRIX_ARR	(1<<(X86_FEATURE_CYRIX_ARR & 31))
 # define DISABLE_CENTAUR_MCR	(1<<(X86_FEATURE_CENTAUR_MCR & 31))
+# define DISABLE_PCID		0
 #else
 # define DISABLE_VME		0
 # define DISABLE_K6_MTRR	0
 # define DISABLE_CYRIX_ARR	0
 # define DISABLE_CENTAUR_MCR	0
+# define DISABLE_PCID		(1<<(X86_FEATURE_PCID & 31))
 #endif /* CONFIG_X86_64 */
 
 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -43,7 +45,7 @@
 #define DISABLED_MASK1	0
 #define DISABLED_MASK2	0
 #define DISABLED_MASK3	(DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
-#define DISABLED_MASK4	0
+#define DISABLED_MASK4	(DISABLE_PCID)
 #define DISABLED_MASK5	0
 #define DISABLED_MASK6	0
 #define DISABLED_MASK7	0
@@ -57,6 +59,7 @@
 #define DISABLED_MASK15	0
 #define DISABLED_MASK16	(DISABLE_PKU|DISABLE_OSPKE)
 #define DISABLED_MASK17	0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define DISABLED_MASK18	0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
 
 #endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
index fac9a5c..6847d85 100644
--- a/tools/arch/x86/include/asm/required-features.h
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -100,6 +100,7 @@
 #define REQUIRED_MASK15	0
 #define REQUIRED_MASK16	0
 #define REQUIRED_MASK17	0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define REQUIRED_MASK18	0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
 
 #endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
index 653d1ba..0304600 100644
--- a/tools/include/asm-generic/bitops.h
+++ b/tools/include/asm-generic/bitops.h
@@ -13,6 +13,7 @@
  */
 
 #include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/__ffz.h>
 #include <asm-generic/bitops/fls.h>
 #include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
diff --git a/tools/include/asm-generic/bitops/__ffz.h b/tools/include/asm-generic/bitops/__ffz.h
new file mode 100644
index 0000000..6744bd4
--- /dev/null
+++ b/tools/include/asm-generic/bitops/__ffz.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
+#define _ASM_GENERIC_BITOPS_FFZ_H_
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x)  __ffs(~(x))
+
+#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/tools/include/asm-generic/bitops/find.h b/tools/include/asm-generic/bitops/find.h
index 31f5154..5538ecd 100644
--- a/tools/include/asm-generic/bitops/find.h
+++ b/tools/include/asm-generic/bitops/find.h
@@ -15,6 +15,21 @@
 		size, unsigned long offset);
 #endif
 
+#ifndef find_next_zero_bit
+
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+				 unsigned long offset);
+#endif
+
 #ifndef find_first_bit
 
 /**
@@ -30,4 +45,17 @@
 
 #endif /* find_first_bit */
 
+#ifndef find_first_zero_bit
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size);
+#endif
+
 #endif /*_TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/tools/include/linux/atomic.h b/tools/include/linux/atomic.h
index 4e3d3d1..9f21fc2 100644
--- a/tools/include/linux/atomic.h
+++ b/tools/include/linux/atomic.h
@@ -3,4 +3,10 @@
 
 #include <asm/atomic.h>
 
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define  atomic_cmpxchg_relaxed		atomic_cmpxchg
+#define  atomic_cmpxchg_release         atomic_cmpxchg
+#endif /* atomic_cmpxchg_relaxed */
+
 #endif /* __TOOLS_LINUX_ATOMIC_H */
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 43c1c50..eef41d5 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -35,6 +35,32 @@
 	}
 }
 
+static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+{
+	unsigned int nlongs = BITS_TO_LONGS(nbits);
+	if (!small_const_nbits(nbits)) {
+		unsigned int len = (nlongs - 1) * sizeof(unsigned long);
+		memset(dst, 0xff,  len);
+	}
+	dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+}
+
+static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+{
+	if (small_const_nbits(nbits))
+		return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+
+	return find_first_bit(src, nbits) == nbits;
+}
+
+static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+{
+	if (small_const_nbits(nbits))
+		return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
+
+	return find_first_zero_bit(src, nbits) == nbits;
+}
+
 static inline int bitmap_weight(const unsigned long *src, int nbits)
 {
 	if (small_const_nbits(nbits))
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 49c929a..fc446343 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -39,6 +39,11 @@
 	     (bit) < (size);					\
 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
 
+#define for_each_clear_bit(bit, addr, size) \
+	for ((bit) = find_first_zero_bit((addr), (size));       \
+	     (bit) < (size);                                    \
+	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
 /* same as for_each_set_bit() but use bit as value to start with */
 #define for_each_set_bit_from(bit, addr, size) \
 	for ((bit) = find_next_bit((addr), (size), (bit));	\
diff --git a/tools/include/linux/bug.h b/tools/include/linux/bug.h
new file mode 100644
index 0000000..8e4a4f4
--- /dev/null
+++ b/tools/include/linux/bug.h
@@ -0,0 +1,10 @@
+#ifndef _TOOLS_PERF_LINUX_BUG_H
+#define _TOOLS_PERF_LINUX_BUG_H
+
+/* Force a compilation error if condition is true, but also produce a
+   result (of value 0 and type size_t), so the expression can be used
+   e.g. in a structure initializer (or where-ever else comma expressions
+   aren't permitted). */
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+
+#endif	/* _TOOLS_PERF_LINUX_BUG_H */
diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h
new file mode 100644
index 0000000..825d44f
--- /dev/null
+++ b/tools/include/linux/compiler-gcc.h
@@ -0,0 +1,21 @@
+#ifndef _TOOLS_LINUX_COMPILER_H_
+#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/*
+ * Common definitions for all gcc versions go here.
+ */
+#define GCC_VERSION (__GNUC__ * 10000		\
+		     + __GNUC_MINOR__ * 100	\
+		     + __GNUC_PATCHLEVEL__)
+
+#if GCC_VERSION >= 70000 && !defined(__CHECKER__)
+# define __fallthrough __attribute__ ((fallthrough))
+#endif
+
+#if GCC_VERSION >= 40300
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* GCC_VERSION >= 40300 */
+
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index d94179f..23299d7 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -1,6 +1,14 @@
 #ifndef _TOOLS_LINUX_COMPILER_H_
 #define _TOOLS_LINUX_COMPILER_H_
 
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
+#ifndef __compiletime_error
+# define __compiletime_error(message)
+#endif
+
 /* Optimization barrier */
 /* The "volatile" is due to gcc bugs */
 #define barrier() __asm__ __volatile__("": : :"memory")
@@ -9,6 +17,11 @@
 # define __always_inline	inline __attribute__((always_inline))
 #endif
 
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
 #ifdef __ANDROID__
 /*
  * FIXME: Big hammer to get rid of tons of:
@@ -21,6 +34,8 @@
 #endif
 
 #define __user
+#define __rcu
+#define __read_mostly
 
 #ifndef __attribute_const__
 # define __attribute_const__
@@ -50,6 +65,8 @@
 # define unlikely(x)		__builtin_expect(!!(x), 0)
 #endif
 
+#define uninitialized_var(x) x = *(&(x))
+
 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
 #include <linux/types.h>
@@ -128,11 +145,7 @@
 
 
 #ifndef __fallthrough
-# if defined(__GNUC__) && __GNUC__ >= 7
-#  define __fallthrough __attribute__ ((fallthrough))
-# else
-#  define __fallthrough
-# endif
+# define __fallthrough
 #endif
 
 #endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/hashtable.h b/tools/include/linux/hashtable.h
index c65cc0a..251eabf 100644
--- a/tools/include/linux/hashtable.h
+++ b/tools/include/linux/hashtable.h
@@ -13,10 +13,6 @@
 #include <linux/hash.h>
 #include <linux/log2.h>
 
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
 #define DEFINE_HASHTABLE(name, bits)						\
 	struct hlist_head name[1 << (bits)] =					\
 			{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index 28607db..73ccc48 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -4,6 +4,11 @@
 #include <stdarg.h>
 #include <stddef.h>
 #include <assert.h>
+#include <linux/compiler.h>
+
+#ifndef UINT_MAX
+#define UINT_MAX	(~0U)
+#endif
 
 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
 
@@ -72,6 +77,8 @@
 int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
 int scnprintf(char * buf, size_t size, const char * fmt, ...);
 
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
 /*
  * This looks more complex than it should be. But we need to
  * get the type for the ~ right in round_down (it needs to be
diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
index d5677d3..0325cef 100644
--- a/tools/include/linux/log2.h
+++ b/tools/include/linux/log2.h
@@ -12,6 +12,9 @@
 #ifndef _TOOLS_LINUX_LOG2_H
 #define _TOOLS_LINUX_LOG2_H
 
+#include <linux/bitops.h>
+#include <linux/types.h>
+
 /*
  * non-constant log of base 2 calculators
  * - the arch may override these in asm/bitops.h if they can be implemented
diff --git a/tools/include/linux/refcount.h b/tools/include/linux/refcount.h
new file mode 100644
index 0000000..a0177c1
--- /dev/null
+++ b/tools/include/linux/refcount.h
@@ -0,0 +1,151 @@
+#ifndef _TOOLS_LINUX_REFCOUNT_H
+#define _TOOLS_LINUX_REFCOUNT_H
+
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * It differs in that the counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free issues.
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+
+#ifdef NDEBUG
+#define REFCOUNT_WARN(cond, str) (void)(cond)
+#define __refcount_check
+#else
+#define REFCOUNT_WARN(cond, str) BUG_ON(cond)
+#define __refcount_check	__must_check
+#endif
+
+typedef struct refcount_struct {
+	atomic_t refs;
+} refcount_t;
+
+#define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
+
+static inline void refcount_set(refcount_t *r, unsigned int n)
+{
+	atomic_set(&r->refs, n);
+}
+
+static inline unsigned int refcount_read(const refcount_t *r)
+{
+	return atomic_read(&r->refs);
+}
+
+/*
+ * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_inc_not_zero(refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		new = val + 1;
+
+		if (!val)
+			return false;
+
+		if (unlikely(!new))
+			return true;
+
+		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+	return true;
+}
+
+/*
+ * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object, will WARN when this is not so.
+ */
+static inline void refcount_inc(refcount_t *r)
+{
+	REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (unlikely(val == UINT_MAX))
+			return false;
+
+		new = val - i;
+		if (new > val) {
+			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+			return false;
+		}
+
+		old = atomic_cmpxchg_release(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	return !new;
+}
+
+static inline __refcount_check
+bool refcount_dec_and_test(refcount_t *r)
+{
+	return refcount_sub_and_test(1, r);
+}
+
+
+#endif /* _ATOMIC_LINUX_REFCOUNT_H */
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
new file mode 100644
index 0000000..58397dc
--- /dev/null
+++ b/tools/include/linux/spinlock.h
@@ -0,0 +1,5 @@
+#define spinlock_t		pthread_mutex_t
+#define DEFINE_SPINLOCK(x)	pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER;
+
+#define spin_lock_irqsave(x, f)		(void)f, pthread_mutex_lock(x)
+#define spin_unlock_irqrestore(x, f)	(void)f, pthread_mutex_unlock(x)
diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h
index 8ebf627..77a28a2 100644
--- a/tools/include/linux/types.h
+++ b/tools/include/linux/types.h
@@ -7,6 +7,7 @@
 
 #define __SANE_USERSPACE_TYPES__	/* For PPC64, to get LL64 types */
 #include <asm/types.h>
+#include <asm/posix_types.h>
 
 struct page;
 struct kmem_cache;
@@ -42,11 +43,7 @@
 #else
 #define __bitwise__
 #endif
-#ifdef __CHECK_ENDIAN__
 #define __bitwise __bitwise__
-#else
-#define __bitwise
-#endif
 
 #define __force
 #define __user
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index 5827438..8c27db0 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -72,4 +72,9 @@
 #define MAP_HUGE_SHIFT	26
 #define MAP_HUGE_MASK	0x3f
 
+#define PKEY_DISABLE_ACCESS	0x1
+#define PKEY_DISABLE_WRITE	0x2
+#define PKEY_ACCESS_MASK	(PKEY_DISABLE_ACCESS |\
+				 PKEY_DISABLE_WRITE)
+
 #endif /* __ASM_GENERIC_MMAN_COMMON_H */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 42dfbeb..a339bea 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -73,6 +73,8 @@
 	BPF_PROG_LOAD,
 	BPF_OBJ_PIN,
 	BPF_OBJ_GET,
+	BPF_PROG_ATTACH,
+	BPF_PROG_DETACH,
 };
 
 enum bpf_map_type {
@@ -95,8 +97,24 @@
 	BPF_PROG_TYPE_SCHED_ACT,
 	BPF_PROG_TYPE_TRACEPOINT,
 	BPF_PROG_TYPE_XDP,
+	BPF_PROG_TYPE_PERF_EVENT,
+	BPF_PROG_TYPE_CGROUP_SKB,
 };
 
+enum bpf_attach_type {
+	BPF_CGROUP_INET_INGRESS,
+	BPF_CGROUP_INET_EGRESS,
+	__MAX_BPF_ATTACH_TYPE
+};
+
+#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+
+/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
+ * to the given target_fd cgroup the descendent cgroup will be able to
+ * override effective bpf program that was inherited from this cgroup
+ */
+#define BPF_F_ALLOW_OVERRIDE	(1U << 0)
+
 #define BPF_PSEUDO_MAP_FD	1
 
 /* flags for BPF_MAP_UPDATE_ELEM command */
@@ -106,6 +124,10 @@
 
 #define BPF_F_NO_PREALLOC	(1U << 0)
 
+/* Flags for accessing BPF object */
+#define BPF_F_RDONLY		(1U << 3)
+#define BPF_F_WRONLY		(1U << 4)
+
 union bpf_attr {
 	struct { /* anonymous struct used by BPF_MAP_CREATE command */
 		__u32	map_type;	/* one of enum bpf_map_type */
@@ -139,6 +161,14 @@
 	struct { /* anonymous struct used by BPF_OBJ_* commands */
 		__aligned_u64	pathname;
 		__u32		bpf_fd;
+		__u32		file_flags;
+	};
+
+	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
+		__u32		target_fd;	/* container object to attach to */
+		__u32		attach_bpf_fd;	/* eBPF program to attach */
+		__u32		attach_type;
+		__u32		attach_flags;
 	};
 } __attribute__((aligned(8)));
 
@@ -376,40 +406,52 @@
 	BPF_FUNC_probe_write_user,
 
 	/**
-	 * int bpf_skb_change_tail(skb, len, flags)
-	 *     The helper will resize the skb to the given new size, to be used f.e.
-	 *     with control messages.
-	 *     @skb: pointer to skb
-	 *     @len: new skb length
-	 *     @flags: reserved
-	 *     Return: 0 on success or negative error
+	 * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task
+	 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+	 * @index: index of the cgroup in the bpf_map
+	 * Return:
+	 *   == 0 current failed the cgroup2 descendant test
+	 *   == 1 current succeeded the cgroup2 descendant test
+	 *    < 0 error
+	 */
+	BPF_FUNC_current_task_under_cgroup,
+
+	/**
+	 * bpf_skb_change_tail(skb, len, flags)
+	 * The helper will resize the skb to the given new size,
+	 * to be used f.e. with control messages.
+	 * @skb: pointer to skb
+	 * @len: new skb length
+	 * @flags: reserved
+	 * Return: 0 on success or negative error
 	 */
 	BPF_FUNC_skb_change_tail,
 
 	/**
-	 * int bpf_skb_pull_data(skb, len)
-	 *     The helper will pull in non-linear data in case the skb is non-linear
-	 *     and not all of len are part of the linear section. Only needed for
-	 *     read/write with direct packet access.
-	 *     @skb: pointer to skb
-	 *     @len: len to make read/writeable
-	 *     Return: 0 on success or negative error
+	 * bpf_skb_pull_data(skb, len)
+	 * The helper will pull in non-linear data in case the
+	 * skb is non-linear and not all of len are part of the
+	 * linear section. Only needed for read/write with direct
+	 * packet access.
+	 * @skb: pointer to skb
+	 * @len: len to make read/writeable
+	 * Return: 0 on success or negative error
 	 */
 	BPF_FUNC_skb_pull_data,
 
 	/**
-	 * s64 bpf_csum_update(skb, csum)
-	 *     Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
-	 *     @skb: pointer to skb
-	 *     @csum: csum to add
-	 *     Return: csum on success or negative error
+	 * bpf_csum_update(skb, csum)
+	 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+	 * @skb: pointer to skb
+	 * @csum: csum to add
+	 * Return: csum on success or negative error
 	 */
 	BPF_FUNC_csum_update,
 
 	/**
-	 * void bpf_set_hash_invalid(skb)
-	 *     Invalidate current skb->hash.
-	 *     @skb: pointer to skb
+	 * bpf_set_hash_invalid(skb)
+	 * Invalidate current skb>hash.
+	 * @skb: pointer to skb
 	 */
 	BPF_FUNC_set_hash_invalid,
 
@@ -457,12 +499,11 @@
 	BPF_FUNC_probe_read_str,
 
 	/**
-	 * u64 bpf_get_socket_cookie(skb)
-	 * Get the cookie for the socket stored inside sk_buff.
-	 * @skb: pointer to skb
-	 * Return: 8 Bytes non-decreasing number on success or 0 if
-	 * the socket
-	 * field is missing inside sk_buff
+	 * u64 bpf_bpf_get_socket_cookie(skb)
+	 *     Get the cookie for the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: 8 Bytes non-decreasing number on success or 0 if the socket
+	 *     field is missing inside sk_buff
 	 */
 	BPF_FUNC_get_socket_cookie,
 
@@ -470,7 +511,8 @@
 	 * u32 bpf_get_socket_uid(skb)
 	 *     Get the owner uid of the socket stored inside sk_buff.
 	 *     @skb: pointer to skb
-	 *     Return: uid of the socket owner on success or overflowuid if failed.
+	 *     Return: uid of the socket owner on success or 0 if the socket pointer
+	 *     inside sk_buff is NULL
 	 */
 	BPF_FUNC_get_socket_uid,
 
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
new file mode 100644
index 0000000..beed138
--- /dev/null
+++ b/tools/include/uapi/linux/fcntl.h
@@ -0,0 +1,67 @@
+#ifndef _UAPI_LINUX_FCNTL_H
+#define _UAPI_LINUX_FCNTL_H
+
+#include <asm/fcntl.h>
+
+#define F_SETLEASE	(F_LINUX_SPECIFIC_BASE + 0)
+#define F_GETLEASE	(F_LINUX_SPECIFIC_BASE + 1)
+
+/*
+ * Cancel a blocking posix lock; internal use only until we expose an
+ * asynchronous lock api to userspace:
+ */
+#define F_CANCELLK	(F_LINUX_SPECIFIC_BASE + 5)
+
+/* Create a file descriptor with FD_CLOEXEC set. */
+#define F_DUPFD_CLOEXEC	(F_LINUX_SPECIFIC_BASE + 6)
+
+/*
+ * Request nofications on a directory.
+ * See below for events that may be notified.
+ */
+#define F_NOTIFY	(F_LINUX_SPECIFIC_BASE+2)
+
+/*
+ * Set and get of pipe page size array
+ */
+#define F_SETPIPE_SZ	(F_LINUX_SPECIFIC_BASE + 7)
+#define F_GETPIPE_SZ	(F_LINUX_SPECIFIC_BASE + 8)
+
+/*
+ * Set/Get seals
+ */
+#define F_ADD_SEALS	(F_LINUX_SPECIFIC_BASE + 9)
+#define F_GET_SEALS	(F_LINUX_SPECIFIC_BASE + 10)
+
+/*
+ * Types of seals
+ */
+#define F_SEAL_SEAL	0x0001	/* prevent further seals from being set */
+#define F_SEAL_SHRINK	0x0002	/* prevent file from shrinking */
+#define F_SEAL_GROW	0x0004	/* prevent file from growing */
+#define F_SEAL_WRITE	0x0008	/* prevent writes */
+/* (1U << 31) is reserved for signed error codes */
+
+/*
+ * Types of directory notifications that may be requested.
+ */
+#define DN_ACCESS	0x00000001	/* File accessed */
+#define DN_MODIFY	0x00000002	/* File modified */
+#define DN_CREATE	0x00000004	/* File created */
+#define DN_DELETE	0x00000008	/* File removed */
+#define DN_RENAME	0x00000010	/* File renamed */
+#define DN_ATTRIB	0x00000020	/* File changed attibutes */
+#define DN_MULTISHOT	0x80000000	/* Don't remove notifier */
+
+#define AT_FDCWD		-100    /* Special value used to indicate
+                                           openat should use the current
+                                           working directory. */
+#define AT_SYMLINK_NOFOLLOW	0x100   /* Do not follow symbolic links.  */
+#define AT_REMOVEDIR		0x200   /* Remove directory instead of
+                                           unlinking file.  */
+#define AT_SYMLINK_FOLLOW	0x400   /* Follow symbolic links.  */
+#define AT_NO_AUTOMOUNT		0x800	/* Suppress terminal automount traversal */
+#define AT_EMPTY_PATH		0x1000	/* Allow empty relative pathname */
+
+
+#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
new file mode 100644
index 0000000..7fec7e3
--- /dev/null
+++ b/tools/include/uapi/linux/stat.h
@@ -0,0 +1,45 @@
+#ifndef _UAPI_LINUX_STAT_H
+#define _UAPI_LINUX_STAT_H
+
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#define S_IFMT  00170000
+#define S_IFSOCK 0140000
+#define S_IFLNK	 0120000
+#define S_IFREG  0100000
+#define S_IFBLK  0060000
+#define S_IFDIR  0040000
+#define S_IFCHR  0020000
+#define S_IFIFO  0010000
+#define S_ISUID  0004000
+#define S_ISGID  0002000
+#define S_ISVTX  0001000
+
+#define S_ISLNK(m)	(((m) & S_IFMT) == S_IFLNK)
+#define S_ISREG(m)	(((m) & S_IFMT) == S_IFREG)
+#define S_ISDIR(m)	(((m) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(m)	(((m) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(m)	(((m) & S_IFMT) == S_IFBLK)
+#define S_ISFIFO(m)	(((m) & S_IFMT) == S_IFIFO)
+#define S_ISSOCK(m)	(((m) & S_IFMT) == S_IFSOCK)
+
+#define S_IRWXU 00700
+#define S_IRUSR 00400
+#define S_IWUSR 00200
+#define S_IXUSR 00100
+
+#define S_IRWXG 00070
+#define S_IRGRP 00040
+#define S_IWGRP 00020
+#define S_IXGRP 00010
+
+#define S_IRWXO 00007
+#define S_IROTH 00004
+#define S_IWOTH 00002
+#define S_IXOTH 00001
+
+#endif
+
+
+#endif /* _UAPI_LINUX_STAT_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index b699aea..7788cfb 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -590,6 +590,24 @@
 	return 0;
 }
 
+static bool section_have_execinstr(struct bpf_object *obj, int idx)
+{
+	Elf_Scn *scn;
+	GElf_Shdr sh;
+
+	scn = elf_getscn(obj->efile.elf, idx);
+	if (!scn)
+		return false;
+
+	if (gelf_getshdr(scn, &sh) != &sh)
+		return false;
+
+	if (sh.sh_flags & SHF_EXECINSTR)
+		return true;
+
+	return false;
+}
+
 static int bpf_object__elf_collect(struct bpf_object *obj)
 {
 	Elf *elf = obj->efile.elf;
@@ -673,6 +691,14 @@
 		} else if (sh.sh_type == SHT_REL) {
 			void *reloc = obj->efile.reloc;
 			int nr_reloc = obj->efile.nr_reloc + 1;
+			int sec = sh.sh_info; /* points to other section */
+
+			/* Only do relo for section with exec instructions */
+			if (!section_have_execinstr(obj, sec)) {
+				pr_debug("skip relo %s(%d) for section(%d)\n",
+					 name, idx, sec);
+				continue;
+			}
 
 			reloc = realloc(reloc,
 					sizeof(*obj->efile.reloc) * nr_reloc);
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c
index 9122a9e..6d8b8f2 100644
--- a/tools/lib/find_bit.c
+++ b/tools/lib/find_bit.c
@@ -82,3 +82,28 @@
 	return size;
 }
 #endif
+
+#ifndef find_first_zero_bit
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+	unsigned long idx;
+
+	for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
+		if (addr[idx] != ~0UL)
+			return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
+	}
+
+	return size;
+}
+#endif
+
+#ifndef find_next_zero_bit
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+				 unsigned long offset)
+{
+	return _find_next_bit(addr, size, offset, ~0UL);
+}
+#endif
diff --git a/tools/lib/str_error_r.c b/tools/lib/str_error_r.c
index 503ae07..9ab2d0a 100644
--- a/tools/lib/str_error_r.c
+++ b/tools/lib/str_error_r.c
@@ -21,6 +21,6 @@
 {
 	int err = strerror_r(errnum, buf, buflen);
 	if (err)
-		snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, %p, %zd)=%d", errnum, buf, buflen, err);
+		snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, [buf], %zd)=%d", errnum, buflen, err);
 	return buf;
 }
diff --git a/tools/lib/subcmd/pager.c b/tools/lib/subcmd/pager.c
index 6518bea..68af60f 100644
--- a/tools/lib/subcmd/pager.c
+++ b/tools/lib/subcmd/pager.c
@@ -29,10 +29,13 @@
 	 * have real input
 	 */
 	fd_set in;
+	fd_set exception;
 
 	FD_ZERO(&in);
+	FD_ZERO(&exception);
 	FD_SET(0, &in);
-	select(1, &in, NULL, &in, NULL);
+	FD_SET(0, &exception);
+	select(1, &in, NULL, &exception, NULL);
 
 	setenv("LESS", "FRSX", 0);
 }
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 664c90c..6694753 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4927,21 +4927,22 @@
 				else
 					ls = 2;
 
-				if (*(ptr+1) == 'F' || *(ptr+1) == 'f' ||
-				    *(ptr+1) == 'S' || *(ptr+1) == 's') {
+				if (isalnum(ptr[1]))
 					ptr++;
+
+				if (*ptr == 'F' || *ptr == 'f' ||
+				    *ptr == 'S' || *ptr == 's') {
 					show_func = *ptr;
-				} else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
-					print_mac_arg(s, *(ptr+1), data, size, event, arg);
-					ptr++;
+				} else if (*ptr == 'M' || *ptr == 'm') {
+					print_mac_arg(s, *ptr, data, size, event, arg);
 					arg = arg->next;
 					break;
-				} else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
+				} else if (*ptr == 'I' || *ptr == 'i') {
 					int n;
 
-					n = print_ip_arg(s, ptr+1, data, size, event, arg);
+					n = print_ip_arg(s, ptr, data, size, event, arg);
 					if (n > 0) {
-						ptr += n;
+						ptr += n - 1;
 						arg = arg->next;
 						break;
 					}
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 7c214ce..5e10ba7 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -1879,17 +1879,25 @@
 	struct pevent *pevent;
 	unsigned long long addr;
 	const char *val = NULL;
+	unsigned int size;
 	char hex[64];
 
 	/* If the field is not a string convert it */
 	if (arg->str.field->flags & FIELD_IS_STRING) {
 		val = record->data + arg->str.field->offset;
+		size = arg->str.field->size;
+
+		if (arg->str.field->flags & FIELD_IS_DYNAMIC) {
+			addr = *(unsigned int *)val;
+			val = record->data + (addr & 0xffff);
+			size = addr >> 16;
+		}
 
 		/*
 		 * We need to copy the data since we can't be sure the field
 		 * is null terminated.
 		 */
-		if (*(val + arg->str.field->size - 1)) {
+		if (*(val + size - 1)) {
 			/* copy it */
 			memcpy(arg->str.buffer, val, arg->str.field->size);
 			/* the buffer is already NULL terminated */
diff --git a/tools/objtool/Build b/tools/objtool/Build
index d6cdece..749becd 100644
--- a/tools/objtool/Build
+++ b/tools/objtool/Build
@@ -1,5 +1,9 @@
 objtool-y += arch/$(SRCARCH)/
 objtool-y += builtin-check.o
+objtool-y += builtin-orc.o
+objtool-y += check.o
+objtool-y += orc_gen.o
+objtool-y += orc_dump.o
 objtool-y += elf.o
 objtool-y += special.o
 objtool-y += objtool.o
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 05536d8..3995735 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -11,9 +11,6 @@
 It enforces a set of rules on asm code and C inline assembly code so
 that stack traces can be reliable.
 
-Currently it only checks frame pointer usage, but there are plans to add
-CFI validation for C files and CFI generation for asm files.
-
 For each function, it recursively follows all possible code paths and
 validates the correct frame pointer state at each instruction.
 
@@ -23,6 +20,10 @@
 instructions).  Similarly, it knows how to follow switch statements, for
 which gcc sometimes uses jump tables.
 
+(Objtool also has an 'orc generate' subcommand which generates debuginfo
+for the ORC unwinder.  See Documentation/x86/orc-unwinder.txt in the
+kernel tree for more details.)
+
 
 Why do we need stack metadata validation?
 -----------------------------------------
@@ -93,62 +94,24 @@
        or at the very end of the function after the stack frame has been
        destroyed.  This is an inherent limitation of frame pointers.
 
-b) 100% reliable stack traces for DWARF enabled kernels
+b) ORC (Oops Rewind Capability) unwind table generation
 
-   (NOTE: This is not yet implemented)
+   An alternative to frame pointers and DWARF, ORC unwind data can be
+   used to walk the stack.  Unlike frame pointers, ORC data is out of
+   band.  So it doesn't affect runtime performance and it can be
+   reliable even when interrupts or exceptions are involved.
 
-   As an alternative to frame pointers, DWARF Call Frame Information
-   (CFI) metadata can be used to walk the stack.  Unlike frame pointers,
-   CFI metadata is out of band.  So it doesn't affect runtime
-   performance and it can be reliable even when interrupts or exceptions
-   are involved.
-
-   For C code, gcc automatically generates DWARF CFI metadata.  But for
-   asm code, generating CFI is a tedious manual approach which requires
-   manually placed .cfi assembler macros to be scattered throughout the
-   code.  It's clumsy and very easy to get wrong, and it makes the real
-   code harder to read.
-
-   Stacktool will improve this situation in several ways.  For code
-   which already has CFI annotations, it will validate them.  For code
-   which doesn't have CFI annotations, it will generate them.  So an
-   architecture can opt to strip out all the manual .cfi annotations
-   from their asm code and have objtool generate them instead.
-
-   We might also add a runtime stack validation debug option where we
-   periodically walk the stack from schedule() and/or an NMI to ensure
-   that the stack metadata is sane and that we reach the bottom of the
-   stack.
-
-   So the benefit of objtool here will be that external tooling should
-   always show perfect stack traces.  And the same will be true for
-   kernel warning/oops traces if the architecture has a runtime DWARF
-   unwinder.
+   For more details, see Documentation/x86/orc-unwinder.txt.
 
 c) Higher live patching compatibility rate
 
-   (NOTE: This is not yet implemented)
+   Livepatch has an optional "consistency model", which is needed for
+   more complex patches.  In order for the consistency model to work,
+   stack traces need to be reliable (or an unreliable condition needs to
+   be detectable).  Objtool makes that possible.
 
-   Currently with CONFIG_LIVEPATCH there's a basic live patching
-   framework which is safe for roughly 85-90% of "security" fixes.  But
-   patches can't have complex features like function dependency or
-   prototype changes, or data structure changes.
-
-   There's a strong need to support patches which have the more complex
-   features so that the patch compatibility rate for security fixes can
-   eventually approach something resembling 100%.  To achieve that, a
-   "consistency model" is needed, which allows tasks to be safely
-   transitioned from an unpatched state to a patched state.
-
-   One of the key requirements of the currently proposed livepatch
-   consistency model [*] is that it needs to walk the stack of each
-   sleeping task to determine if it can be transitioned to the patched
-   state.  If objtool can ensure that stack traces are reliable, this
-   consistency model can be used and the live patching compatibility
-   rate can be improved significantly.
-
-   [*] https://lkml.kernel.org/r/cover.1423499826.git.jpoimboe@redhat.com
-
+   For more details, see the livepatch documentation in the Linux kernel
+   source tree at Documentation/livepatch/livepatch.txt.
 
 Rules
 -----
@@ -201,80 +164,84 @@
    return normally.
 
 
-Errors in .S files
-------------------
+Objtool warnings
+----------------
 
-If you're getting an error in a compiled .S file which you don't
-understand, first make sure that the affected code follows the above
-rules.
+For asm files, if you're getting an error which doesn't make sense,
+first make sure that the affected code follows the above rules.
+
+For C files, the common culprits are inline asm statements and calls to
+"noreturn" functions.  See below for more details.
+
+Another possible cause for errors in C code is if the Makefile removes
+-fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
 
 Here are some examples of common warnings reported by objtool, what
 they mean, and suggestions for how to fix them.
 
 
-1. asm_file.o: warning: objtool: func()+0x128: call without frame pointer save/setup
+1. file.o: warning: objtool: func()+0x128: call without frame pointer save/setup
 
    The func() function made a function call without first saving and/or
-   updating the frame pointer.
+   updating the frame pointer, and CONFIG_FRAME_POINTER is enabled.
 
-   If func() is indeed a callable function, add proper frame pointer
-   logic using the FRAME_BEGIN and FRAME_END macros.  Otherwise, remove
-   its ELF function annotation by changing ENDPROC to END.
+   If the error is for an asm file, and func() is indeed a callable
+   function, add proper frame pointer logic using the FRAME_BEGIN and
+   FRAME_END macros.  Otherwise, if it's not a callable function, remove
+   its ELF function annotation by changing ENDPROC to END, and instead
+   use the manual unwind hint macros in asm/unwind_hints.h.
 
-   If you're getting this error in a .c file, see the "Errors in .c
-   files" section.
+   If it's a GCC-compiled .c file, the error may be because the function
+   uses an inline asm() statement which has a "call" instruction.  An
+   asm() statement with a call instruction must declare the use of the
+   stack pointer in its output operand.  On x86_64, this means adding
+   the ASM_CALL_CONSTRAINT as an output constraint:
+
+     asm volatile("call func" : ASM_CALL_CONSTRAINT);
+
+   Otherwise the stack frame may not get created before the call.
 
 
-2. asm_file.o: warning: objtool: .text+0x53: return instruction outside of a callable function
+2. file.o: warning: objtool: .text+0x53: unreachable instruction
 
-   A return instruction was detected, but objtool couldn't find a way
-   for a callable function to reach the instruction.
+   Objtool couldn't find a code path to reach the instruction.
 
-   If the return instruction is inside (or reachable from) a callable
-   function, the function needs to be annotated with the ENTRY/ENDPROC
-   macros.
+   If the error is for an asm file, and the instruction is inside (or
+   reachable from) a callable function, the function should be annotated
+   with the ENTRY/ENDPROC macros (ENDPROC is the important one).
+   Otherwise, the code should probably be annotated with the unwind hint
+   macros in asm/unwind_hints.h so objtool and the unwinder can know the
+   stack state associated with the code.
 
-   If you _really_ need a return instruction outside of a function, and
-   are 100% sure that it won't affect stack traces, you can tell
-   objtool to ignore it.  See the "Adding exceptions" section below.
-
-
-3. asm_file.o: warning: objtool: func()+0x9: function has unreachable instruction
-
-   The instruction lives inside of a callable function, but there's no
-   possible control flow path from the beginning of the function to the
-   instruction.
-
-   If the instruction is actually needed, and it's actually in a
-   callable function, ensure that its function is properly annotated
-   with ENTRY/ENDPROC.
+   If you're 100% sure the code won't affect stack traces, or if you're
+   a just a bad person, you can tell objtool to ignore it.  See the
+   "Adding exceptions" section below.
 
    If it's not actually in a callable function (e.g. kernel entry code),
    change ENDPROC to END.
 
 
-4. asm_file.o: warning: objtool: func(): can't find starting instruction
+4. file.o: warning: objtool: func(): can't find starting instruction
    or
-   asm_file.o: warning: objtool: func()+0x11dd: can't decode instruction
+   file.o: warning: objtool: func()+0x11dd: can't decode instruction
 
-   Did you put data in a text section?  If so, that can confuse
+   Does the file have data in a text section?  If so, that can confuse
    objtool's instruction decoder.  Move the data to a more appropriate
    section like .data or .rodata.
 
 
-5. asm_file.o: warning: objtool: func()+0x6: kernel entry/exit from callable instruction
+5. file.o: warning: objtool: func()+0x6: unsupported instruction in callable function
 
-   This is a kernel entry/exit instruction like sysenter or sysret.
-   Such instructions aren't allowed in a callable function, and are most
-   likely part of the kernel entry code.
-
-   If the instruction isn't actually in a callable function, change
-   ENDPROC to END.
+   This is a kernel entry/exit instruction like sysenter or iret.  Such
+   instructions aren't allowed in a callable function, and are most
+   likely part of the kernel entry code.  They should usually not have
+   the callable function annotation (ENDPROC) and should always be
+   annotated with the unwind hint macros in asm/unwind_hints.h.
 
 
-6. asm_file.o: warning: objtool: func()+0x26: sibling call from callable instruction with changed frame pointer
+6. file.o: warning: objtool: func()+0x26: sibling call from callable instruction with modified stack frame
 
-   This is a dynamic jump or a jump to an undefined symbol.  Stacktool
+   This is a dynamic jump or a jump to an undefined symbol.  Objtool
    assumed it's a sibling call and detected that the frame pointer
    wasn't first restored to its original state.
 
@@ -282,24 +249,28 @@
    destination code to the local file.
 
    If the instruction is not actually in a callable function (e.g.
-   kernel entry code), change ENDPROC to END.
+   kernel entry code), change ENDPROC to END and annotate manually with
+   the unwind hint macros in asm/unwind_hints.h.
 
 
-7. asm_file: warning: objtool: func()+0x5c: frame pointer state mismatch
+7. file: warning: objtool: func()+0x5c: stack state mismatch
 
    The instruction's frame pointer state is inconsistent, depending on
    which execution path was taken to reach the instruction.
 
-   Make sure the function pushes and sets up the frame pointer (for
-   x86_64, this means rbp) at the beginning of the function and pops it
-   at the end of the function.  Also make sure that no other code in the
-   function touches the frame pointer.
+   Make sure that, when CONFIG_FRAME_POINTER is enabled, the function
+   pushes and sets up the frame pointer (for x86_64, this means rbp) at
+   the beginning of the function and pops it at the end of the function.
+   Also make sure that no other code in the function touches the frame
+   pointer.
+
+   Another possibility is that the code has some asm or inline asm which
+   does some unusual things to the stack or the frame pointer.  In such
+   cases it's probably appropriate to use the unwind hint macros in
+   asm/unwind_hints.h.
 
 
-Errors in .c files
-------------------
-
-1. c_file.o: warning: objtool: funcA() falls through to next function funcB()
+8. file.o: warning: objtool: funcA() falls through to next function funcB()
 
    This means that funcA() doesn't end with a return instruction or an
    unconditional jump, and that objtool has determined that the function
@@ -318,21 +289,6 @@
       might be corrupt due to a gcc bug.  For more details, see:
       https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
 
-2. If you're getting any other objtool error in a compiled .c file, it
-   may be because the file uses an asm() statement which has a "call"
-   instruction.  An asm() statement with a call instruction must declare
-   the use of the stack pointer in its output operand.  On x86_64, this
-   means adding the ASM_CALL_CONSTRAINT as an output constraint:
-
-     asm volatile("call func" : ASM_CALL_CONSTRAINT);
-
-   Otherwise the stack frame may not get created before the call.
-
-3. Another possible cause for errors in C code is if the Makefile removes
-   -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
-
-Also see the above section for .S file errors for more information what
-the individual error messages mean.
 
 If the error doesn't seem to make sense, it could be a bug in objtool.
 Feel free to ask the objtool maintainer for help.
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 041b493..e6acc28 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 include ../scripts/Makefile.include
 include ../scripts/Makefile.arch
 
@@ -6,17 +7,19 @@
 endif
 
 # always use the host compiler
-CC = gcc
-LD = ld
-AR = ar
+HOSTCC	?= gcc
+HOSTLD	?= ld
+CC	 = $(HOSTCC)
+LD	 = $(HOSTLD)
+AR	 = ar
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
 
 SUBCMD_SRCDIR		= $(srctree)/tools/lib/subcmd/
-LIBSUBCMD_OUTPUT	= $(if $(OUTPUT),$(OUTPUT),$(PWD)/)
+LIBSUBCMD_OUTPUT	= $(if $(OUTPUT),$(OUTPUT),$(CURDIR)/)
 LIBSUBCMD		= $(LIBSUBCMD_OUTPUT)libsubcmd.a
 
 OBJTOOL    := $(OUTPUT)objtool
@@ -24,8 +27,11 @@
 
 all: $(OBJTOOL)
 
-INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi
-CFLAGS   += -Wall -Werror $(EXTRA_WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
+INCLUDES := -I$(srctree)/tools/include \
+	    -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
+	    -I$(srctree)/tools/objtool/arch/$(ARCH)/include
+WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
+CFLAGS   += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
 LDFLAGS  += -lelf $(LIBSUBCMD)
 
 # Allow old libelf to be used:
@@ -39,19 +45,8 @@
 $(OBJTOOL_IN): fixdep FORCE
 	@$(MAKE) $(build)=objtool
 
-# Busybox's diff doesn't have -I, avoid warning in that case
-#
 $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
-	@(diff -I 2>&1 | grep -q 'option requires an argument' && \
-	test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
-	diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
-	diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
-	diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
-	diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
-	diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
-	diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
-	diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
-	|| echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true
+	@$(CONFIG_SHELL) ./sync-check.sh
 	$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
 
 
@@ -61,7 +56,7 @@
 clean:
 	$(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
 	$(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
-	$(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep
+	$(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
 
 FORCE:
 
diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h
index f7350fc..b0d7dc3 100644
--- a/tools/objtool/arch.h
+++ b/tools/objtool/arch.h
@@ -19,26 +19,64 @@
 #define _ARCH_H
 
 #include <stdbool.h>
+#include <linux/list.h>
 #include "elf.h"
+#include "cfi.h"
 
-#define INSN_FP_SAVE		1
-#define INSN_FP_SETUP		2
-#define INSN_FP_RESTORE		3
-#define INSN_JUMP_CONDITIONAL	4
-#define INSN_JUMP_UNCONDITIONAL	5
-#define INSN_JUMP_DYNAMIC	6
-#define INSN_CALL		7
-#define INSN_CALL_DYNAMIC	8
-#define INSN_RETURN		9
-#define INSN_CONTEXT_SWITCH	10
-#define INSN_BUG		11
-#define INSN_NOP		12
-#define INSN_OTHER		13
+#define INSN_JUMP_CONDITIONAL	1
+#define INSN_JUMP_UNCONDITIONAL	2
+#define INSN_JUMP_DYNAMIC	3
+#define INSN_CALL		4
+#define INSN_CALL_DYNAMIC	5
+#define INSN_RETURN		6
+#define INSN_CONTEXT_SWITCH	7
+#define INSN_STACK		8
+#define INSN_BUG		9
+#define INSN_NOP		10
+#define INSN_OTHER		11
 #define INSN_LAST		INSN_OTHER
 
+enum op_dest_type {
+	OP_DEST_REG,
+	OP_DEST_REG_INDIRECT,
+	OP_DEST_MEM,
+	OP_DEST_PUSH,
+	OP_DEST_LEAVE,
+};
+
+struct op_dest {
+	enum op_dest_type type;
+	unsigned char reg;
+	int offset;
+};
+
+enum op_src_type {
+	OP_SRC_REG,
+	OP_SRC_REG_INDIRECT,
+	OP_SRC_CONST,
+	OP_SRC_POP,
+	OP_SRC_ADD,
+	OP_SRC_AND,
+};
+
+struct op_src {
+	enum op_src_type type;
+	unsigned char reg;
+	int offset;
+};
+
+struct stack_op {
+	struct op_dest dest;
+	struct op_src src;
+};
+
+void arch_initial_func_cfi_state(struct cfi_state *state);
+
 int arch_decode_instruction(struct elf *elf, struct section *sec,
 			    unsigned long offset, unsigned int maxlen,
 			    unsigned int *len, unsigned char *type,
-			    unsigned long *displacement);
+			    unsigned long *immediate, struct stack_op *op);
+
+bool arch_callee_saved_reg(unsigned char reg);
 
 #endif /* _ARCH_H */
diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build
index debbdb0..b998412 100644
--- a/tools/objtool/arch/x86/Build
+++ b/tools/objtool/arch/x86/Build
@@ -1,12 +1,12 @@
 objtool-y += decode.o
 
-inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk
-inat_tables_maps = arch/x86/insn/x86-opcode-map.txt
+inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk
+inat_tables_maps = arch/x86/lib/x86-opcode-map.txt
 
-$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
 	$(call rule_mkdir)
 	$(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
 
-$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c
+$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c
 
-CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn
+CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 039636f..540a209 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -19,14 +19,25 @@
 #include <stdlib.h>
 
 #define unlikely(cond) (cond)
-#include "insn/insn.h"
-#include "insn/inat.c"
-#include "insn/insn.c"
+#include <asm/insn.h>
+#include "lib/inat.c"
+#include "lib/insn.c"
 
 #include "../../elf.h"
 #include "../../arch.h"
 #include "../../warn.h"
 
+static unsigned char op_to_cfi_reg[][2] = {
+	{CFI_AX, CFI_R8},
+	{CFI_CX, CFI_R9},
+	{CFI_DX, CFI_R10},
+	{CFI_BX, CFI_R11},
+	{CFI_SP, CFI_R12},
+	{CFI_BP, CFI_R13},
+	{CFI_SI, CFI_R14},
+	{CFI_DI, CFI_R15},
+};
+
 static int is_x86_64(struct elf *elf)
 {
 	switch (elf->ehdr.e_machine) {
@@ -40,24 +51,50 @@
 	}
 }
 
+bool arch_callee_saved_reg(unsigned char reg)
+{
+	switch (reg) {
+	case CFI_BP:
+	case CFI_BX:
+	case CFI_R12:
+	case CFI_R13:
+	case CFI_R14:
+	case CFI_R15:
+		return true;
+
+	case CFI_AX:
+	case CFI_CX:
+	case CFI_DX:
+	case CFI_SI:
+	case CFI_DI:
+	case CFI_SP:
+	case CFI_R8:
+	case CFI_R9:
+	case CFI_R10:
+	case CFI_R11:
+	case CFI_RA:
+	default:
+		return false;
+	}
+}
+
 int arch_decode_instruction(struct elf *elf, struct section *sec,
 			    unsigned long offset, unsigned int maxlen,
 			    unsigned int *len, unsigned char *type,
-			    unsigned long *immediate)
+			    unsigned long *immediate, struct stack_op *op)
 {
 	struct insn insn;
-	int x86_64;
-	unsigned char op1, op2, ext;
+	int x86_64, sign;
+	unsigned char op1, op2, rex = 0, rex_b = 0, rex_r = 0, rex_w = 0,
+		      rex_x = 0, modrm = 0, modrm_mod = 0, modrm_rm = 0,
+		      modrm_reg = 0, sib = 0;
 
 	x86_64 = is_x86_64(elf);
 	if (x86_64 == -1)
 		return -1;
 
-	insn_init(&insn, (void *)(sec->data + offset), maxlen, x86_64);
+	insn_init(&insn, sec->data->d_buf + offset, maxlen, x86_64);
 	insn_get_length(&insn);
-	insn_get_opcode(&insn);
-	insn_get_modrm(&insn);
-	insn_get_immediate(&insn);
 
 	if (!insn_complete(&insn)) {
 		WARN_FUNC("can't decode instruction", sec, offset);
@@ -73,70 +110,317 @@
 	op1 = insn.opcode.bytes[0];
 	op2 = insn.opcode.bytes[1];
 
+	if (insn.rex_prefix.nbytes) {
+		rex = insn.rex_prefix.bytes[0];
+		rex_w = X86_REX_W(rex) >> 3;
+		rex_r = X86_REX_R(rex) >> 2;
+		rex_x = X86_REX_X(rex) >> 1;
+		rex_b = X86_REX_B(rex);
+	}
+
+	if (insn.modrm.nbytes) {
+		modrm = insn.modrm.bytes[0];
+		modrm_mod = X86_MODRM_MOD(modrm);
+		modrm_reg = X86_MODRM_REG(modrm);
+		modrm_rm = X86_MODRM_RM(modrm);
+	}
+
+	if (insn.sib.nbytes)
+		sib = insn.sib.bytes[0];
+
 	switch (op1) {
-	case 0x55:
-		if (!insn.rex_prefix.nbytes)
-			/* push rbp */
-			*type = INSN_FP_SAVE;
+
+	case 0x1:
+	case 0x29:
+		if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+
+			/* add/sub reg, %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+		}
 		break;
 
-	case 0x5d:
-		if (!insn.rex_prefix.nbytes)
-			/* pop rbp */
-			*type = INSN_FP_RESTORE;
+	case 0x50 ... 0x57:
+
+		/* push reg */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_REG;
+		op->src.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+		op->dest.type = OP_DEST_PUSH;
+
+		break;
+
+	case 0x58 ... 0x5f:
+
+		/* pop reg */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_POP;
+		op->dest.type = OP_DEST_REG;
+		op->dest.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+
+		break;
+
+	case 0x68:
+	case 0x6a:
+		/* push immediate */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_CONST;
+		op->dest.type = OP_DEST_PUSH;
 		break;
 
 	case 0x70 ... 0x7f:
 		*type = INSN_JUMP_CONDITIONAL;
 		break;
 
+	case 0x81:
+	case 0x83:
+		if (rex != 0x48)
+			break;
+
+		if (modrm == 0xe4) {
+			/* and imm, %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_AND;
+			op->src.reg = CFI_SP;
+			op->src.offset = insn.immediate.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+			break;
+		}
+
+		if (modrm == 0xc4)
+			sign = 1;
+		else if (modrm == 0xec)
+			sign = -1;
+		else
+			break;
+
+		/* add/sub imm, %rsp */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_ADD;
+		op->src.reg = CFI_SP;
+		op->src.offset = insn.immediate.value * sign;
+		op->dest.type = OP_DEST_REG;
+		op->dest.reg = CFI_SP;
+		break;
+
 	case 0x89:
-		if (insn.rex_prefix.nbytes == 1 &&
-		    insn.rex_prefix.bytes[0] == 0x48 &&
-		    insn.modrm.nbytes && insn.modrm.bytes[0] == 0xe5)
-			/* mov rsp, rbp */
-			*type = INSN_FP_SETUP;
+		if (rex_w && !rex_r && modrm_mod == 3 && modrm_reg == 4) {
+
+			/* mov %rsp, reg */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = CFI_SP;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b];
+			break;
+		}
+
+		if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+
+			/* mov reg, %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+			break;
+		}
+
+		/* fallthrough */
+	case 0x88:
+		if (!rex_b &&
+		    (modrm_mod == 1 || modrm_mod == 2) && modrm_rm == 5) {
+
+			/* mov reg, disp(%rbp) */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_DEST_REG_INDIRECT;
+			op->dest.reg = CFI_BP;
+			op->dest.offset = insn.displacement.value;
+
+		} else if (rex_w && !rex_b && modrm_rm == 4 && sib == 0x24) {
+
+			/* mov reg, disp(%rsp) */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_DEST_REG_INDIRECT;
+			op->dest.reg = CFI_SP;
+			op->dest.offset = insn.displacement.value;
+		}
+
+		break;
+
+	case 0x8b:
+		if (rex_w && !rex_b && modrm_mod == 1 && modrm_rm == 5) {
+
+			/* mov disp(%rbp), reg */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG_INDIRECT;
+			op->src.reg = CFI_BP;
+			op->src.offset = insn.displacement.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+
+		} else if (rex_w && !rex_b && sib == 0x24 &&
+			   modrm_mod != 3 && modrm_rm == 4) {
+
+			/* mov disp(%rsp), reg */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG_INDIRECT;
+			op->src.reg = CFI_SP;
+			op->src.offset = insn.displacement.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+		}
+
 		break;
 
 	case 0x8d:
-		if (insn.rex_prefix.nbytes &&
-		    insn.rex_prefix.bytes[0] == 0x48 &&
-		    insn.modrm.nbytes && insn.modrm.bytes[0] == 0x2c &&
-		    insn.sib.nbytes && insn.sib.bytes[0] == 0x24)
-			/* lea %(rsp), %rbp */
-			*type = INSN_FP_SETUP;
+		if (sib == 0x24 && rex_w && !rex_b && !rex_x) {
+
+			*type = INSN_STACK;
+			if (!insn.displacement.value) {
+				/* lea (%rsp), reg */
+				op->src.type = OP_SRC_REG;
+			} else {
+				/* lea disp(%rsp), reg */
+				op->src.type = OP_SRC_ADD;
+				op->src.offset = insn.displacement.value;
+			}
+			op->src.reg = CFI_SP;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+
+		} else if (rex == 0x48 && modrm == 0x65) {
+
+			/* lea disp(%rbp), %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_BP;
+			op->src.offset = insn.displacement.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+
+		} else if (rex == 0x49 && modrm == 0x62 &&
+			   insn.displacement.value == -8) {
+
+			/*
+			 * lea -0x8(%r10), %rsp
+			 *
+			 * Restoring rsp back to its original value after a
+			 * stack realignment.
+			 */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_R10;
+			op->src.offset = -8;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+
+		} else if (rex == 0x49 && modrm == 0x65 &&
+			   insn.displacement.value == -16) {
+
+			/*
+			 * lea -0x10(%r13), %rsp
+			 *
+			 * Restoring rsp back to its original value after a
+			 * stack realignment.
+			 */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_R13;
+			op->src.offset = -16;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+		}
+
+		break;
+
+	case 0x8f:
+		/* pop to mem */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_POP;
+		op->dest.type = OP_DEST_MEM;
 		break;
 
 	case 0x90:
 		*type = INSN_NOP;
 		break;
 
+	case 0x9c:
+		/* pushf */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_CONST;
+		op->dest.type = OP_DEST_PUSH;
+		break;
+
+	case 0x9d:
+		/* popf */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_POP;
+		op->dest.type = OP_DEST_MEM;
+		break;
+
 	case 0x0f:
-		if (op2 >= 0x80 && op2 <= 0x8f)
+
+		if (op2 >= 0x80 && op2 <= 0x8f) {
+
 			*type = INSN_JUMP_CONDITIONAL;
-		else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
-			 op2 == 0x35)
+
+		} else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
+			   op2 == 0x35) {
+
 			/* sysenter, sysret */
 			*type = INSN_CONTEXT_SWITCH;
-		else if (op2 == 0x0b || op2 == 0xb9)
+
+		} else if (op2 == 0x0b || op2 == 0xb9) {
+
 			/* ud2 */
 			*type = INSN_BUG;
-		else if (op2 == 0x0d || op2 == 0x1f)
+
+		} else if (op2 == 0x0d || op2 == 0x1f) {
+
 			/* nopl/nopw */
 			*type = INSN_NOP;
-		else if (op2 == 0x01 && insn.modrm.nbytes &&
-			 (insn.modrm.bytes[0] == 0xc2 ||
-			  insn.modrm.bytes[0] == 0xd8))
-			/* vmlaunch, vmrun */
-			*type = INSN_CONTEXT_SWITCH;
+
+		} else if (op2 == 0xa0 || op2 == 0xa8) {
+
+			/* push fs/gs */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_CONST;
+			op->dest.type = OP_DEST_PUSH;
+
+		} else if (op2 == 0xa1 || op2 == 0xa9) {
+
+			/* pop fs/gs */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_POP;
+			op->dest.type = OP_DEST_MEM;
+		}
 
 		break;
 
-	case 0xc9: /* leave */
-		*type = INSN_FP_RESTORE;
+	case 0xc9:
+		/*
+		 * leave
+		 *
+		 * equivalent to:
+		 * mov bp, sp
+		 * pop bp
+		 */
+		*type = INSN_STACK;
+		op->dest.type = OP_DEST_LEAVE;
+
 		break;
 
-	case 0xe3: /* jecxz/jrcxz */
+	case 0xe3:
+		/* jecxz/jrcxz */
 		*type = INSN_JUMP_CONDITIONAL;
 		break;
 
@@ -161,14 +445,27 @@
 		break;
 
 	case 0xff:
-		ext = X86_MODRM_REG(insn.modrm.bytes[0]);
-		if (ext == 2 || ext == 3)
+		if (modrm_reg == 2 || modrm_reg == 3)
+
 			*type = INSN_CALL_DYNAMIC;
-		else if (ext == 4)
+
+		else if (modrm_reg == 4)
+
 			*type = INSN_JUMP_DYNAMIC;
-		else if (ext == 5) /*jmpf */
+
+		else if (modrm_reg == 5)
+
+			/* jmpf */
 			*type = INSN_CONTEXT_SWITCH;
 
+		else if (modrm_reg == 6) {
+
+			/* push from mem */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_CONST;
+			op->dest.type = OP_DEST_PUSH;
+		}
+
 		break;
 
 	default:
@@ -179,3 +476,21 @@
 
 	return 0;
 }
+
+void arch_initial_func_cfi_state(struct cfi_state *state)
+{
+	int i;
+
+	for (i = 0; i < CFI_NUM_REGS; i++) {
+		state->regs[i].base = CFI_UNDEFINED;
+		state->regs[i].offset = 0;
+	}
+
+	/* initial CFA (call frame address) */
+	state->cfa.base = CFI_SP;
+	state->cfa.offset = 8;
+
+	/* initial RA (return address) */
+	state->regs[16].base = CFI_CFA;
+	state->regs[16].offset = -8;
+}
diff --git a/tools/objtool/arch/x86/insn/inat.h b/tools/objtool/arch/x86/include/asm/inat.h
similarity index 99%
rename from tools/objtool/arch/x86/insn/inat.h
rename to tools/objtool/arch/x86/include/asm/inat.h
index 125ecd2..02aff08 100644
--- a/tools/objtool/arch/x86/insn/inat.h
+++ b/tools/objtool/arch/x86/include/asm/inat.h
@@ -20,7 +20,7 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  *
  */
-#include "inat_types.h"
+#include <asm/inat_types.h>
 
 /*
  * Internal bits. Don't use bitmasks directly, because these bits are
diff --git a/tools/objtool/arch/x86/insn/inat_types.h b/tools/objtool/arch/x86/include/asm/inat_types.h
similarity index 100%
rename from tools/objtool/arch/x86/insn/inat_types.h
rename to tools/objtool/arch/x86/include/asm/inat_types.h
diff --git a/tools/objtool/arch/x86/insn/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
similarity index 99%
rename from tools/objtool/arch/x86/insn/insn.h
rename to tools/objtool/arch/x86/include/asm/insn.h
index e23578c..b3e32b0 100644
--- a/tools/objtool/arch/x86/insn/insn.h
+++ b/tools/objtool/arch/x86/include/asm/insn.h
@@ -21,7 +21,7 @@
  */
 
 /* insn_attr_t is defined in inat.h */
-#include "inat.h"
+#include <asm/inat.h>
 
 struct insn_field {
 	union {
diff --git a/tools/objtool/arch/x86/include/asm/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h
new file mode 100644
index 0000000..7dc777a
--- /dev/null
+++ b/tools/objtool/arch/x86/include/asm/orc_types.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and BP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED		0
+#define ORC_REG_PREV_SP			1
+#define ORC_REG_DX			2
+#define ORC_REG_DI			3
+#define ORC_REG_BP			4
+#define ORC_REG_SP			5
+#define ORC_REG_R10			6
+#define ORC_REG_R13			7
+#define ORC_REG_BP_INDIRECT		8
+#define ORC_REG_SP_INDIRECT		9
+#define ORC_REG_MAX			15
+
+/*
+ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
+ * caller's SP right before it made the call).  Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
+ * to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
+ * points to the iret return frame.
+ *
+ * The UNWIND_HINT macros are used only for the unwind_hint struct.  They
+ * aren't used in struct orc_entry due to size and complexity constraints.
+ * Objtool converts them to real types when it converts the hints to orc
+ * entries.
+ */
+#define ORC_TYPE_CALL			0
+#define ORC_TYPE_REGS			1
+#define ORC_TYPE_REGS_IRET		2
+#define UNWIND_HINT_TYPE_SAVE		3
+#define UNWIND_HINT_TYPE_RESTORE	4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard.  It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder.  It tells the
+ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
+ * the stack for a given code address.  Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+	s16		sp_offset;
+	s16		bp_offset;
+	unsigned	sp_reg:4;
+	unsigned	bp_reg:4;
+	unsigned	type:2;
+};
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack for the ORC unwinder.
+ *
+ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
+ */
+struct unwind_hint {
+	u32		ip;
+	s16		sp_offset;
+	u8		sp_reg;
+	u8		type;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
diff --git a/tools/objtool/arch/x86/insn/inat.c b/tools/objtool/arch/x86/lib/inat.c
similarity index 98%
rename from tools/objtool/arch/x86/insn/inat.c
rename to tools/objtool/arch/x86/lib/inat.c
index e4bf28e..c1f01a8 100644
--- a/tools/objtool/arch/x86/insn/inat.c
+++ b/tools/objtool/arch/x86/lib/inat.c
@@ -18,7 +18,7 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  *
  */
-#include "insn.h"
+#include <asm/insn.h>
 
 /* Attribute tables are generated from opcode map */
 #include "inat-tables.c"
diff --git a/tools/objtool/arch/x86/insn/insn.c b/tools/objtool/arch/x86/lib/insn.c
similarity index 99%
rename from tools/objtool/arch/x86/insn/insn.c
rename to tools/objtool/arch/x86/lib/insn.c
index ca983e2..1088eb8 100644
--- a/tools/objtool/arch/x86/insn/insn.c
+++ b/tools/objtool/arch/x86/lib/insn.c
@@ -23,8 +23,8 @@
 #else
 #include <string.h>
 #endif
-#include "inat.h"
-#include "insn.h"
+#include <asm/inat.h>
+#include <asm/insn.h>
 
 /* Verify next sizeof(t) bytes can be on the same instruction */
 #define validate_next(t, insn, n)	\
diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
similarity index 100%
rename from tools/objtool/arch/x86/insn/x86-opcode-map.txt
rename to tools/objtool/arch/x86/lib/x86-opcode-map.txt
diff --git a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
similarity index 100%
rename from tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
rename to tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index a688a85..694abc6 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -25,1300 +25,35 @@
  * For more information, see tools/objtool/Documentation/stack-validation.txt.
  */
 
-#include <string.h>
-#include <stdlib.h>
 #include <subcmd/parse-options.h>
-
 #include "builtin.h"
-#include "elf.h"
-#include "special.h"
-#include "arch.h"
-#include "warn.h"
+#include "check.h"
 
-#include <linux/hashtable.h>
+bool no_fp, no_unreachable, retpoline, module;
 
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
-#define STATE_FP_SAVED		0x1
-#define STATE_FP_SETUP		0x2
-#define STATE_FENTRY		0x4
-
-struct instruction {
-	struct list_head list;
-	struct hlist_node hash;
-	struct section *sec;
-	unsigned long offset;
-	unsigned int len, state;
-	unsigned char type;
-	unsigned long immediate;
-	bool alt_group, visited, ignore_alts;
-	struct symbol *call_dest;
-	struct instruction *jump_dest;
-	struct list_head alts;
-	struct symbol *func;
-};
-
-struct alternative {
-	struct list_head list;
-	struct instruction *insn;
-};
-
-struct objtool_file {
-	struct elf *elf;
-	struct list_head insn_list;
-	DECLARE_HASHTABLE(insn_hash, 16);
-	struct section *rodata, *whitelist;
-	bool ignore_unreachables, c_file;
-};
-
-const char *objname;
-static bool nofp;
-
-static struct instruction *find_insn(struct objtool_file *file,
-				     struct section *sec, unsigned long offset)
-{
-	struct instruction *insn;
-
-	hash_for_each_possible(file->insn_hash, insn, hash, offset)
-		if (insn->sec == sec && insn->offset == offset)
-			return insn;
-
-	return NULL;
-}
-
-static struct instruction *next_insn_same_sec(struct objtool_file *file,
-					      struct instruction *insn)
-{
-	struct instruction *next = list_next_entry(insn, list);
-
-	if (&next->list == &file->insn_list || next->sec != insn->sec)
-		return NULL;
-
-	return next;
-}
-
-static bool gcov_enabled(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *sym;
-
-	list_for_each_entry(sec, &file->elf->sections, list)
-		list_for_each_entry(sym, &sec->symbol_list, list)
-			if (!strncmp(sym->name, "__gcov_.", 8))
-				return true;
-
-	return false;
-}
-
-#define for_each_insn(file, insn)					\
-	list_for_each_entry(insn, &file->insn_list, list)
-
-#define func_for_each_insn(file, func, insn)				\
-	for (insn = find_insn(file, func->sec, func->offset);		\
-	     insn && &insn->list != &file->insn_list &&			\
-		insn->sec == func->sec &&				\
-		insn->offset < func->offset + func->len;		\
-	     insn = list_next_entry(insn, list))
-
-#define func_for_each_insn_continue_reverse(file, func, insn)		\
-	for (insn = list_prev_entry(insn, list);			\
-	     &insn->list != &file->insn_list &&				\
-		insn->sec == func->sec && insn->offset >= func->offset;	\
-	     insn = list_prev_entry(insn, list))
-
-#define sec_for_each_insn_from(file, insn)				\
-	for (; insn; insn = next_insn_same_sec(file, insn))
-
-
-/*
- * Check if the function has been manually whitelisted with the
- * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
- * due to its use of a context switching instruction.
- */
-static bool ignore_func(struct objtool_file *file, struct symbol *func)
-{
-	struct rela *rela;
-	struct instruction *insn;
-
-	/* check for STACK_FRAME_NON_STANDARD */
-	if (file->whitelist && file->whitelist->rela)
-		list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
-			if (rela->sym->type == STT_SECTION &&
-			    rela->sym->sec == func->sec &&
-			    rela->addend == func->offset)
-				return true;
-			if (rela->sym->type == STT_FUNC && rela->sym == func)
-				return true;
-		}
-
-	/* check if it has a context switching instruction */
-	func_for_each_insn(file, func, insn)
-		if (insn->type == INSN_CONTEXT_SWITCH)
-			return true;
-
-	return false;
-}
-
-/*
- * This checks to see if the given function is a "noreturn" function.
- *
- * For global functions which are outside the scope of this object file, we
- * have to keep a manual list of them.
- *
- * For local functions, we have to detect them manually by simply looking for
- * the lack of a return instruction.
- *
- * Returns:
- *  -1: error
- *   0: no dead end
- *   1: dead end
- */
-static int __dead_end_function(struct objtool_file *file, struct symbol *func,
-			       int recursion)
-{
-	int i;
-	struct instruction *insn;
-	bool empty = true;
-
-	/*
-	 * Unfortunately these have to be hard coded because the noreturn
-	 * attribute isn't provided in ELF data.
-	 */
-	static const char * const global_noreturns[] = {
-		"__stack_chk_fail",
-		"panic",
-		"do_exit",
-		"do_task_dead",
-		"__module_put_and_exit",
-		"complete_and_exit",
-		"kvm_spurious_fault",
-		"__reiserfs_panic",
-		"lbug_with_loc"
-	};
-
-	if (func->bind == STB_WEAK)
-		return 0;
-
-	if (func->bind == STB_GLOBAL)
-		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
-			if (!strcmp(func->name, global_noreturns[i]))
-				return 1;
-
-	if (!func->sec)
-		return 0;
-
-	func_for_each_insn(file, func, insn) {
-		empty = false;
-
-		if (insn->type == INSN_RETURN)
-			return 0;
-	}
-
-	if (empty)
-		return 0;
-
-	/*
-	 * A function can have a sibling call instead of a return.  In that
-	 * case, the function's dead-end status depends on whether the target
-	 * of the sibling call returns.
-	 */
-	func_for_each_insn(file, func, insn) {
-		if (insn->sec != func->sec ||
-		    insn->offset >= func->offset + func->len)
-			break;
-
-		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
-			struct instruction *dest = insn->jump_dest;
-			struct symbol *dest_func;
-
-			if (!dest)
-				/* sibling call to another file */
-				return 0;
-
-			if (dest->sec != func->sec ||
-			    dest->offset < func->offset ||
-			    dest->offset >= func->offset + func->len) {
-				/* local sibling call */
-				dest_func = find_symbol_by_offset(dest->sec,
-								  dest->offset);
-				if (!dest_func)
-					continue;
-
-				if (recursion == 5) {
-					WARN_FUNC("infinite recursion (objtool bug!)",
-						  dest->sec, dest->offset);
-					return -1;
-				}
-
-				return __dead_end_function(file, dest_func,
-							   recursion + 1);
-			}
-		}
-
-		if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
-			/* sibling call */
-			return 0;
-	}
-
-	return 1;
-}
-
-static int dead_end_function(struct objtool_file *file, struct symbol *func)
-{
-	return __dead_end_function(file, func, 0);
-}
-
-/*
- * Call the arch-specific instruction decoder for all the instructions and add
- * them to the global instruction list.
- */
-static int decode_instructions(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *func;
-	unsigned long offset;
-	struct instruction *insn;
-	int ret;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-
-		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
-			continue;
-
-		for (offset = 0; offset < sec->len; offset += insn->len) {
-			insn = malloc(sizeof(*insn));
-			memset(insn, 0, sizeof(*insn));
-
-			INIT_LIST_HEAD(&insn->alts);
-			insn->sec = sec;
-			insn->offset = offset;
-
-			ret = arch_decode_instruction(file->elf, sec, offset,
-						      sec->len - offset,
-						      &insn->len, &insn->type,
-						      &insn->immediate);
-			if (ret)
-				return ret;
-
-			if (!insn->type || insn->type > INSN_LAST) {
-				WARN_FUNC("invalid instruction type %d",
-					  insn->sec, insn->offset, insn->type);
-				return -1;
-			}
-
-			hash_add(file->insn_hash, &insn->hash, insn->offset);
-			list_add_tail(&insn->list, &file->insn_list);
-		}
-
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			if (!find_insn(file, sec, func->offset)) {
-				WARN("%s(): can't find starting instruction",
-				     func->name);
-				return -1;
-			}
-
-			func_for_each_insn(file, func, insn)
-				if (!insn->func)
-					insn->func = func;
-		}
-	}
-
-	return 0;
-}
-
-/*
- * Warnings shouldn't be reported for ignored functions.
- */
-static void add_ignores(struct objtool_file *file)
-{
-	struct instruction *insn;
-	struct section *sec;
-	struct symbol *func;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			if (!ignore_func(file, func))
-				continue;
-
-			func_for_each_insn(file, func, insn)
-				insn->visited = true;
-		}
-	}
-}
-
-/*
- * FIXME: For now, just ignore any alternatives which add retpolines.  This is
- * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
- * But it at least allows objtool to understand the control flow *around* the
- * retpoline.
- */
-static int add_nospec_ignores(struct objtool_file *file)
-{
-	struct section *sec;
-	struct rela *rela;
-	struct instruction *insn;
-
-	sec = find_section_by_name(file->elf, ".rela.discard.nospec");
-	if (!sec)
-		return 0;
-
-	list_for_each_entry(rela, &sec->rela_list, list) {
-		if (rela->sym->type != STT_SECTION) {
-			WARN("unexpected relocation symbol type in %s", sec->name);
-			return -1;
-		}
-
-		insn = find_insn(file, rela->sym->sec, rela->addend);
-		if (!insn) {
-			WARN("bad .discard.nospec entry");
-			return -1;
-		}
-
-		insn->ignore_alts = true;
-	}
-
-	return 0;
-}
-
-/*
- * Find the destination instructions for all jumps.
- */
-static int add_jump_destinations(struct objtool_file *file)
-{
-	struct instruction *insn;
-	struct rela *rela;
-	struct section *dest_sec;
-	unsigned long dest_off;
-
-	for_each_insn(file, insn) {
-		if (insn->type != INSN_JUMP_CONDITIONAL &&
-		    insn->type != INSN_JUMP_UNCONDITIONAL)
-			continue;
-
-		/* skip ignores */
-		if (insn->visited)
-			continue;
-
-		rela = find_rela_by_dest_range(insn->sec, insn->offset,
-					       insn->len);
-		if (!rela) {
-			dest_sec = insn->sec;
-			dest_off = insn->offset + insn->len + insn->immediate;
-		} else if (rela->sym->type == STT_SECTION) {
-			dest_sec = rela->sym->sec;
-			dest_off = rela->addend + 4;
-		} else if (rela->sym->sec->idx) {
-			dest_sec = rela->sym->sec;
-			dest_off = rela->sym->sym.st_value + rela->addend + 4;
-		} else if (strstr(rela->sym->name, "_indirect_thunk_")) {
-			/*
-			 * Retpoline jumps are really dynamic jumps in
-			 * disguise, so convert them accordingly.
-			 */
-			insn->type = INSN_JUMP_DYNAMIC;
-			continue;
-		} else {
-			/* sibling call */
-			insn->jump_dest = 0;
-			continue;
-		}
-
-		insn->jump_dest = find_insn(file, dest_sec, dest_off);
-		if (!insn->jump_dest) {
-
-			/*
-			 * This is a special case where an alt instruction
-			 * jumps past the end of the section.  These are
-			 * handled later in handle_group_alt().
-			 */
-			if (!strcmp(insn->sec->name, ".altinstr_replacement"))
-				continue;
-
-			WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
-				  insn->sec, insn->offset, dest_sec->name,
-				  dest_off);
-			return -1;
-		}
-	}
-
-	return 0;
-}
-
-/*
- * Find the destination instructions for all calls.
- */
-static int add_call_destinations(struct objtool_file *file)
-{
-	struct instruction *insn;
-	unsigned long dest_off;
-	struct rela *rela;
-
-	for_each_insn(file, insn) {
-		if (insn->type != INSN_CALL)
-			continue;
-
-		rela = find_rela_by_dest_range(insn->sec, insn->offset,
-					       insn->len);
-		if (!rela) {
-			dest_off = insn->offset + insn->len + insn->immediate;
-			insn->call_dest = find_symbol_by_offset(insn->sec,
-								dest_off);
-			/*
-			 * FIXME: Thanks to retpolines, it's now considered
-			 * normal for a function to call within itself.  So
-			 * disable this warning for now.
-			 */
-#if 0
-			if (!insn->call_dest) {
-				WARN_FUNC("can't find call dest symbol at offset 0x%lx",
-					  insn->sec, insn->offset, dest_off);
-				return -1;
-			}
-#endif
-		} else if (rela->sym->type == STT_SECTION) {
-			insn->call_dest = find_symbol_by_offset(rela->sym->sec,
-								rela->addend+4);
-			if (!insn->call_dest ||
-			    insn->call_dest->type != STT_FUNC) {
-				WARN_FUNC("can't find call dest symbol at %s+0x%x",
-					  insn->sec, insn->offset,
-					  rela->sym->sec->name,
-					  rela->addend + 4);
-				return -1;
-			}
-		} else
-			insn->call_dest = rela->sym;
-	}
-
-	return 0;
-}
-
-/*
- * The .alternatives section requires some extra special care, over and above
- * what other special sections require:
- *
- * 1. Because alternatives are patched in-place, we need to insert a fake jump
- *    instruction at the end so that validate_branch() skips all the original
- *    replaced instructions when validating the new instruction path.
- *
- * 2. An added wrinkle is that the new instruction length might be zero.  In
- *    that case the old instructions are replaced with noops.  We simulate that
- *    by creating a fake jump as the only new instruction.
- *
- * 3. In some cases, the alternative section includes an instruction which
- *    conditionally jumps to the _end_ of the entry.  We have to modify these
- *    jumps' destinations to point back to .text rather than the end of the
- *    entry in .altinstr_replacement.
- *
- * 4. It has been requested that we don't validate the !POPCNT feature path
- *    which is a "very very small percentage of machines".
- */
-static int handle_group_alt(struct objtool_file *file,
-			    struct special_alt *special_alt,
-			    struct instruction *orig_insn,
-			    struct instruction **new_insn)
-{
-	struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump;
-	unsigned long dest_off;
-
-	last_orig_insn = NULL;
-	insn = orig_insn;
-	sec_for_each_insn_from(file, insn) {
-		if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
-			break;
-
-		if (special_alt->skip_orig)
-			insn->type = INSN_NOP;
-
-		insn->alt_group = true;
-		last_orig_insn = insn;
-	}
-
-	if (!next_insn_same_sec(file, last_orig_insn)) {
-		WARN("%s: don't know how to handle alternatives at end of section",
-		     special_alt->orig_sec->name);
-		return -1;
-	}
-
-	fake_jump = malloc(sizeof(*fake_jump));
-	if (!fake_jump) {
-		WARN("malloc failed");
-		return -1;
-	}
-	memset(fake_jump, 0, sizeof(*fake_jump));
-	INIT_LIST_HEAD(&fake_jump->alts);
-	fake_jump->sec = special_alt->new_sec;
-	fake_jump->offset = -1;
-	fake_jump->type = INSN_JUMP_UNCONDITIONAL;
-	fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
-
-	if (!special_alt->new_len) {
-		*new_insn = fake_jump;
-		return 0;
-	}
-
-	last_new_insn = NULL;
-	insn = *new_insn;
-	sec_for_each_insn_from(file, insn) {
-		if (insn->offset >= special_alt->new_off + special_alt->new_len)
-			break;
-
-		last_new_insn = insn;
-
-		if (insn->type != INSN_JUMP_CONDITIONAL &&
-		    insn->type != INSN_JUMP_UNCONDITIONAL)
-			continue;
-
-		if (!insn->immediate)
-			continue;
-
-		dest_off = insn->offset + insn->len + insn->immediate;
-		if (dest_off == special_alt->new_off + special_alt->new_len)
-			insn->jump_dest = fake_jump;
-
-		if (!insn->jump_dest) {
-			WARN_FUNC("can't find alternative jump destination",
-				  insn->sec, insn->offset);
-			return -1;
-		}
-	}
-
-	if (!last_new_insn) {
-		WARN_FUNC("can't find last new alternative instruction",
-			  special_alt->new_sec, special_alt->new_off);
-		return -1;
-	}
-
-	list_add(&fake_jump->list, &last_new_insn->list);
-
-	return 0;
-}
-
-/*
- * A jump table entry can either convert a nop to a jump or a jump to a nop.
- * If the original instruction is a jump, make the alt entry an effective nop
- * by just skipping the original instruction.
- */
-static int handle_jump_alt(struct objtool_file *file,
-			   struct special_alt *special_alt,
-			   struct instruction *orig_insn,
-			   struct instruction **new_insn)
-{
-	if (orig_insn->type == INSN_NOP)
-		return 0;
-
-	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
-		WARN_FUNC("unsupported instruction at jump label",
-			  orig_insn->sec, orig_insn->offset);
-		return -1;
-	}
-
-	*new_insn = list_next_entry(orig_insn, list);
-	return 0;
-}
-
-/*
- * Read all the special sections which have alternate instructions which can be
- * patched in or redirected to at runtime.  Each instruction having alternate
- * instruction(s) has them added to its insn->alts list, which will be
- * traversed in validate_branch().
- */
-static int add_special_section_alts(struct objtool_file *file)
-{
-	struct list_head special_alts;
-	struct instruction *orig_insn, *new_insn;
-	struct special_alt *special_alt, *tmp;
-	struct alternative *alt;
-	int ret;
-
-	ret = special_get_alts(file->elf, &special_alts);
-	if (ret)
-		return ret;
-
-	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
-
-		orig_insn = find_insn(file, special_alt->orig_sec,
-				      special_alt->orig_off);
-		if (!orig_insn) {
-			WARN_FUNC("special: can't find orig instruction",
-				  special_alt->orig_sec, special_alt->orig_off);
-			ret = -1;
-			goto out;
-		}
-
-		/* Ignore retpoline alternatives. */
-		if (orig_insn->ignore_alts)
-			continue;
-
-		new_insn = NULL;
-		if (!special_alt->group || special_alt->new_len) {
-			new_insn = find_insn(file, special_alt->new_sec,
-					     special_alt->new_off);
-			if (!new_insn) {
-				WARN_FUNC("special: can't find new instruction",
-					  special_alt->new_sec,
-					  special_alt->new_off);
-				ret = -1;
-				goto out;
-			}
-		}
-
-		if (special_alt->group) {
-			ret = handle_group_alt(file, special_alt, orig_insn,
-					       &new_insn);
-			if (ret)
-				goto out;
-		} else if (special_alt->jump_or_nop) {
-			ret = handle_jump_alt(file, special_alt, orig_insn,
-					      &new_insn);
-			if (ret)
-				goto out;
-		}
-
-		alt = malloc(sizeof(*alt));
-		if (!alt) {
-			WARN("malloc failed");
-			ret = -1;
-			goto out;
-		}
-
-		alt->insn = new_insn;
-		list_add_tail(&alt->list, &orig_insn->alts);
-
-		list_del(&special_alt->list);
-		free(special_alt);
-	}
-
-out:
-	return ret;
-}
-
-static int add_switch_table(struct objtool_file *file, struct symbol *func,
-			    struct instruction *insn, struct rela *table,
-			    struct rela *next_table)
-{
-	struct rela *rela = table;
-	struct instruction *alt_insn;
-	struct alternative *alt;
-
-	list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
-		if (rela == next_table)
-			break;
-
-		if (rela->sym->sec != insn->sec ||
-		    rela->addend <= func->offset ||
-		    rela->addend >= func->offset + func->len)
-			break;
-
-		alt_insn = find_insn(file, insn->sec, rela->addend);
-		if (!alt_insn) {
-			WARN("%s: can't find instruction at %s+0x%x",
-			     file->rodata->rela->name, insn->sec->name,
-			     rela->addend);
-			return -1;
-		}
-
-		alt = malloc(sizeof(*alt));
-		if (!alt) {
-			WARN("malloc failed");
-			return -1;
-		}
-
-		alt->insn = alt_insn;
-		list_add_tail(&alt->list, &insn->alts);
-	}
-
-	return 0;
-}
-
-/*
- * find_switch_table() - Given a dynamic jump, find the switch jump table in
- * .rodata associated with it.
- *
- * There are 3 basic patterns:
- *
- * 1. jmpq *[rodata addr](,%reg,8)
- *
- *    This is the most common case by far.  It jumps to an address in a simple
- *    jump table which is stored in .rodata.
- *
- * 2. jmpq *[rodata addr](%rip)
- *
- *    This is caused by a rare GCC quirk, currently only seen in three driver
- *    functions in the kernel, only with certain obscure non-distro configs.
- *
- *    As part of an optimization, GCC makes a copy of an existing switch jump
- *    table, modifies it, and then hard-codes the jump (albeit with an indirect
- *    jump) to use a single entry in the table.  The rest of the jump table and
- *    some of its jump targets remain as dead code.
- *
- *    In such a case we can just crudely ignore all unreachable instruction
- *    warnings for the entire object file.  Ideally we would just ignore them
- *    for the function, but that would require redesigning the code quite a
- *    bit.  And honestly that's just not worth doing: unreachable instruction
- *    warnings are of questionable value anyway, and this is such a rare issue.
- *
- * 3. mov [rodata addr],%reg1
- *    ... some instructions ...
- *    jmpq *(%reg1,%reg2,8)
- *
- *    This is a fairly uncommon pattern which is new for GCC 6.  As of this
- *    writing, there are 11 occurrences of it in the allmodconfig kernel.
- *
- *    TODO: Once we have DWARF CFI and smarter instruction decoding logic,
- *    ensure the same register is used in the mov and jump instructions.
- */
-static struct rela *find_switch_table(struct objtool_file *file,
-				      struct symbol *func,
-				      struct instruction *insn)
-{
-	struct rela *text_rela, *rodata_rela;
-	struct instruction *orig_insn = insn;
-
-	text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
-	if (text_rela && text_rela->sym == file->rodata->sym) {
-		/* case 1 */
-		rodata_rela = find_rela_by_dest(file->rodata,
-						text_rela->addend);
-		if (rodata_rela)
-			return rodata_rela;
-
-		/* case 2 */
-		rodata_rela = find_rela_by_dest(file->rodata,
-						text_rela->addend + 4);
-		if (!rodata_rela)
-			return NULL;
-		file->ignore_unreachables = true;
-		return rodata_rela;
-	}
-
-	/* case 3 */
-	func_for_each_insn_continue_reverse(file, func, insn) {
-		if (insn->type == INSN_JUMP_DYNAMIC)
-			break;
-
-		/* allow small jumps within the range */
-		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
-		    insn->jump_dest &&
-		    (insn->jump_dest->offset <= insn->offset ||
-		     insn->jump_dest->offset > orig_insn->offset))
-		    break;
-
-		/* look for a relocation which references .rodata */
-		text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
-						    insn->len);
-		if (!text_rela || text_rela->sym != file->rodata->sym)
-			continue;
-
-		/*
-		 * Make sure the .rodata address isn't associated with a
-		 * symbol.  gcc jump tables are anonymous data.
-		 */
-		if (find_symbol_containing(file->rodata, text_rela->addend))
-			continue;
-
-		return find_rela_by_dest(file->rodata, text_rela->addend);
-	}
-
-	return NULL;
-}
-
-static int add_func_switch_tables(struct objtool_file *file,
-				  struct symbol *func)
-{
-	struct instruction *insn, *prev_jump = NULL;
-	struct rela *rela, *prev_rela = NULL;
-	int ret;
-
-	func_for_each_insn(file, func, insn) {
-		if (insn->type != INSN_JUMP_DYNAMIC)
-			continue;
-
-		rela = find_switch_table(file, func, insn);
-		if (!rela)
-			continue;
-
-		/*
-		 * We found a switch table, but we don't know yet how big it
-		 * is.  Don't add it until we reach the end of the function or
-		 * the beginning of another switch table in the same function.
-		 */
-		if (prev_jump) {
-			ret = add_switch_table(file, func, prev_jump, prev_rela,
-					       rela);
-			if (ret)
-				return ret;
-		}
-
-		prev_jump = insn;
-		prev_rela = rela;
-	}
-
-	if (prev_jump) {
-		ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-/*
- * For some switch statements, gcc generates a jump table in the .rodata
- * section which contains a list of addresses within the function to jump to.
- * This finds these jump tables and adds them to the insn->alts lists.
- */
-static int add_switch_table_alts(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *func;
-	int ret;
-
-	if (!file->rodata || !file->rodata->rela)
-		return 0;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			ret = add_func_switch_tables(file, func);
-			if (ret)
-				return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int decode_sections(struct objtool_file *file)
-{
-	int ret;
-
-	ret = decode_instructions(file);
-	if (ret)
-		return ret;
-
-	add_ignores(file);
-
-	ret = add_nospec_ignores(file);
-	if (ret)
-		return ret;
-
-	ret = add_jump_destinations(file);
-	if (ret)
-		return ret;
-
-	ret = add_call_destinations(file);
-	if (ret)
-		return ret;
-
-	ret = add_special_section_alts(file);
-	if (ret)
-		return ret;
-
-	ret = add_switch_table_alts(file);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static bool is_fentry_call(struct instruction *insn)
-{
-	if (insn->type == INSN_CALL &&
-	    insn->call_dest->type == STT_NOTYPE &&
-	    !strcmp(insn->call_dest->name, "__fentry__"))
-		return true;
-
-	return false;
-}
-
-static bool has_modified_stack_frame(struct instruction *insn)
-{
-	return (insn->state & STATE_FP_SAVED) ||
-	       (insn->state & STATE_FP_SETUP);
-}
-
-static bool has_valid_stack_frame(struct instruction *insn)
-{
-	return (insn->state & STATE_FP_SAVED) &&
-	       (insn->state & STATE_FP_SETUP);
-}
-
-static unsigned int frame_state(unsigned long state)
-{
-	return (state & (STATE_FP_SAVED | STATE_FP_SETUP));
-}
-
-/*
- * Follow the branch starting at the given instruction, and recursively follow
- * any other branches (jumps).  Meanwhile, track the frame pointer state at
- * each instruction and validate all the rules described in
- * tools/objtool/Documentation/stack-validation.txt.
- */
-static int validate_branch(struct objtool_file *file,
-			   struct instruction *first, unsigned char first_state)
-{
-	struct alternative *alt;
-	struct instruction *insn;
-	struct section *sec;
-	struct symbol *func = NULL;
-	unsigned char state;
-	int ret;
-
-	insn = first;
-	sec = insn->sec;
-	state = first_state;
-
-	if (insn->alt_group && list_empty(&insn->alts)) {
-		WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
-			  sec, insn->offset);
-		return 1;
-	}
-
-	while (1) {
-		if (file->c_file && insn->func) {
-			if (func && func != insn->func) {
-				WARN("%s() falls through to next function %s()",
-				     func->name, insn->func->name);
-				return 1;
-			}
-
-			func = insn->func;
-		}
-
-		if (insn->visited) {
-			if (frame_state(insn->state) != frame_state(state)) {
-				WARN_FUNC("frame pointer state mismatch",
-					  sec, insn->offset);
-				return 1;
-			}
-
-			return 0;
-		}
-
-		insn->visited = true;
-		insn->state = state;
-
-		list_for_each_entry(alt, &insn->alts, list) {
-			ret = validate_branch(file, alt->insn, state);
-			if (ret)
-				return 1;
-		}
-
-		switch (insn->type) {
-
-		case INSN_FP_SAVE:
-			if (!nofp) {
-				if (state & STATE_FP_SAVED) {
-					WARN_FUNC("duplicate frame pointer save",
-						  sec, insn->offset);
-					return 1;
-				}
-				state |= STATE_FP_SAVED;
-			}
-			break;
-
-		case INSN_FP_SETUP:
-			if (!nofp) {
-				if (state & STATE_FP_SETUP) {
-					WARN_FUNC("duplicate frame pointer setup",
-						  sec, insn->offset);
-					return 1;
-				}
-				state |= STATE_FP_SETUP;
-			}
-			break;
-
-		case INSN_FP_RESTORE:
-			if (!nofp) {
-				if (has_valid_stack_frame(insn))
-					state &= ~STATE_FP_SETUP;
-
-				state &= ~STATE_FP_SAVED;
-			}
-			break;
-
-		case INSN_RETURN:
-			if (!nofp && has_modified_stack_frame(insn)) {
-				WARN_FUNC("return without frame pointer restore",
-					  sec, insn->offset);
-				return 1;
-			}
-			return 0;
-
-		case INSN_CALL:
-			if (is_fentry_call(insn)) {
-				state |= STATE_FENTRY;
-				break;
-			}
-
-			ret = dead_end_function(file, insn->call_dest);
-			if (ret == 1)
-				return 0;
-			if (ret == -1)
-				return 1;
-
-			/* fallthrough */
-		case INSN_CALL_DYNAMIC:
-			if (!nofp && !has_valid_stack_frame(insn)) {
-				WARN_FUNC("call without frame pointer save/setup",
-					  sec, insn->offset);
-				return 1;
-			}
-			break;
-
-		case INSN_JUMP_CONDITIONAL:
-		case INSN_JUMP_UNCONDITIONAL:
-			if (insn->jump_dest) {
-				ret = validate_branch(file, insn->jump_dest,
-						      state);
-				if (ret)
-					return 1;
-			} else if (has_modified_stack_frame(insn)) {
-				WARN_FUNC("sibling call from callable instruction with changed frame pointer",
-					  sec, insn->offset);
-				return 1;
-			} /* else it's a sibling call */
-
-			if (insn->type == INSN_JUMP_UNCONDITIONAL)
-				return 0;
-
-			break;
-
-		case INSN_JUMP_DYNAMIC:
-			if (list_empty(&insn->alts) &&
-			    has_modified_stack_frame(insn)) {
-				WARN_FUNC("sibling call from callable instruction with changed frame pointer",
-					  sec, insn->offset);
-				return 1;
-			}
-
-			return 0;
-
-		case INSN_BUG:
-			return 0;
-
-		default:
-			break;
-		}
-
-		insn = next_insn_same_sec(file, insn);
-		if (!insn) {
-			WARN("%s: unexpected end of section", sec->name);
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-static bool is_kasan_insn(struct instruction *insn)
-{
-	return (insn->type == INSN_CALL &&
-		!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
-}
-
-static bool is_ubsan_insn(struct instruction *insn)
-{
-	return (insn->type == INSN_CALL &&
-		!strcmp(insn->call_dest->name,
-			"__ubsan_handle_builtin_unreachable"));
-}
-
-static bool ignore_unreachable_insn(struct symbol *func,
-				    struct instruction *insn)
-{
-	int i;
-
-	if (insn->type == INSN_NOP)
-		return true;
-
-	/*
-	 * Check if this (or a subsequent) instruction is related to
-	 * CONFIG_UBSAN or CONFIG_KASAN.
-	 *
-	 * End the search at 5 instructions to avoid going into the weeds.
-	 */
-	for (i = 0; i < 5; i++) {
-
-		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
-			return true;
-
-		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
-			insn = insn->jump_dest;
-			continue;
-		}
-
-		if (insn->offset + insn->len >= func->offset + func->len)
-			break;
-		insn = list_next_entry(insn, list);
-	}
-
-	return false;
-}
-
-static int validate_functions(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *func;
-	struct instruction *insn;
-	int ret, warnings = 0;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			insn = find_insn(file, sec, func->offset);
-			if (!insn)
-				continue;
-
-			ret = validate_branch(file, insn, 0);
-			warnings += ret;
-		}
-	}
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			func_for_each_insn(file, func, insn) {
-				if (insn->visited)
-					continue;
-
-				insn->visited = true;
-
-				if (file->ignore_unreachables || warnings ||
-				    ignore_unreachable_insn(func, insn))
-					continue;
-
-				/*
-				 * gcov produces a lot of unreachable
-				 * instructions.  If we get an unreachable
-				 * warning and the file has gcov enabled, just
-				 * ignore it, and all other such warnings for
-				 * the file.
-				 */
-				if (!file->ignore_unreachables &&
-				    gcov_enabled(file)) {
-					file->ignore_unreachables = true;
-					continue;
-				}
-
-				WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
-				warnings++;
-			}
-		}
-	}
-
-	return warnings;
-}
-
-static int validate_uncallable_instructions(struct objtool_file *file)
-{
-	struct instruction *insn;
-	int warnings = 0;
-
-	for_each_insn(file, insn) {
-		if (!insn->visited && insn->type == INSN_RETURN) {
-
-			/*
-			 * Don't warn about call instructions in unvisited
-			 * retpoline alternatives.
-			 */
-			if (!strcmp(insn->sec->name, ".altinstr_replacement"))
-				continue;
-
-			WARN_FUNC("return instruction outside of a callable function",
-				  insn->sec, insn->offset);
-			warnings++;
-		}
-	}
-
-	return warnings;
-}
-
-static void cleanup(struct objtool_file *file)
-{
-	struct instruction *insn, *tmpinsn;
-	struct alternative *alt, *tmpalt;
-
-	list_for_each_entry_safe(insn, tmpinsn, &file->insn_list, list) {
-		list_for_each_entry_safe(alt, tmpalt, &insn->alts, list) {
-			list_del(&alt->list);
-			free(alt);
-		}
-		list_del(&insn->list);
-		hash_del(&insn->hash);
-		free(insn);
-	}
-	elf_close(file->elf);
-}
-
-const char * const check_usage[] = {
+static const char * const check_usage[] = {
 	"objtool check [<options>] file.o",
 	NULL,
 };
 
+const struct option check_options[] = {
+	OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
+	OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
+	OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
+	OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
+	OPT_END(),
+};
+
 int cmd_check(int argc, const char **argv)
 {
-	struct objtool_file file;
-	int ret, warnings = 0;
+	const char *objname;
 
-	const struct option options[] = {
-		OPT_BOOLEAN('f', "no-fp", &nofp, "Skip frame pointer validation"),
-		OPT_END(),
-	};
-
-	argc = parse_options(argc, argv, options, check_usage, 0);
+	argc = parse_options(argc, argv, check_options, check_usage, 0);
 
 	if (argc != 1)
-		usage_with_options(check_usage, options);
+		usage_with_options(check_usage, check_options);
 
 	objname = argv[0];
 
-	file.elf = elf_open(objname);
-	if (!file.elf) {
-		fprintf(stderr, "error reading elf file %s\n", objname);
-		return 1;
-	}
-
-	INIT_LIST_HEAD(&file.insn_list);
-	hash_init(file.insn_hash);
-	file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
-	file.rodata = find_section_by_name(file.elf, ".rodata");
-	file.ignore_unreachables = false;
-	file.c_file = find_section_by_name(file.elf, ".comment");
-
-	ret = decode_sections(&file);
-	if (ret < 0)
-		goto out;
-	warnings += ret;
-
-	ret = validate_functions(&file);
-	if (ret < 0)
-		goto out;
-	warnings += ret;
-
-	ret = validate_uncallable_instructions(&file);
-	if (ret < 0)
-		goto out;
-	warnings += ret;
-
-out:
-	cleanup(&file);
-
-	/* ignore warnings for now until we get all the code cleaned up */
-	if (ret || warnings)
-		return 0;
-	return 0;
+	return check(objname, false);
 }
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
new file mode 100644
index 0000000..77ea2b9
--- /dev/null
+++ b/tools/objtool/builtin-orc.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * objtool orc:
+ *
+ * This command analyzes a .o file and adds .orc_unwind and .orc_unwind_ip
+ * sections to it, which is used by the in-kernel ORC unwinder.
+ *
+ * This command is a superset of "objtool check".
+ */
+
+#include <string.h>
+#include "builtin.h"
+#include "check.h"
+
+
+static const char *orc_usage[] = {
+	"objtool orc generate [<options>] file.o",
+	"objtool orc dump file.o",
+	NULL,
+};
+
+int cmd_orc(int argc, const char **argv)
+{
+	const char *objname;
+
+	argc--; argv++;
+	if (argc <= 0)
+		usage_with_options(orc_usage, check_options);
+
+	if (!strncmp(argv[0], "gen", 3)) {
+		argc = parse_options(argc, argv, check_options, orc_usage, 0);
+		if (argc != 1)
+			usage_with_options(orc_usage, check_options);
+
+		objname = argv[0];
+
+		return check(objname, true);
+	}
+
+	if (!strcmp(argv[0], "dump")) {
+		if (argc != 2)
+			usage_with_options(orc_usage, check_options);
+
+		objname = argv[1];
+
+		return orc_dump(objname);
+	}
+
+	usage_with_options(orc_usage, check_options);
+
+	return 0;
+}
diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
index 34d2ba7..28ff40e 100644
--- a/tools/objtool/builtin.h
+++ b/tools/objtool/builtin.h
@@ -17,6 +17,12 @@
 #ifndef _BUILTIN_H
 #define _BUILTIN_H
 
+#include <subcmd/parse-options.h>
+
+extern const struct option check_options[];
+extern bool no_fp, no_unreachable, retpoline, module;
+
 extern int cmd_check(int argc, const char **argv);
+extern int cmd_orc(int argc, const char **argv);
 
 #endif /* _BUILTIN_H */
diff --git a/tools/objtool/cfi.h b/tools/objtool/cfi.h
new file mode 100644
index 0000000..2fe883c
--- /dev/null
+++ b/tools/objtool/cfi.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _OBJTOOL_CFI_H
+#define _OBJTOOL_CFI_H
+
+#define CFI_UNDEFINED		-1
+#define CFI_CFA			-2
+#define CFI_SP_INDIRECT		-3
+#define CFI_BP_INDIRECT		-4
+
+#define CFI_AX			0
+#define CFI_DX			1
+#define CFI_CX			2
+#define CFI_BX			3
+#define CFI_SI			4
+#define CFI_DI			5
+#define CFI_BP			6
+#define CFI_SP			7
+#define CFI_R8			8
+#define CFI_R9			9
+#define CFI_R10			10
+#define CFI_R11			11
+#define CFI_R12			12
+#define CFI_R13			13
+#define CFI_R14			14
+#define CFI_R15			15
+#define CFI_RA			16
+#define CFI_NUM_REGS		17
+
+struct cfi_reg {
+	int base;
+	int offset;
+};
+
+struct cfi_state {
+	struct cfi_reg cfa;
+	struct cfi_reg regs[CFI_NUM_REGS];
+};
+
+#endif /* _OBJTOOL_CFI_H */
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
new file mode 100644
index 0000000..e128d1c
--- /dev/null
+++ b/tools/objtool/check.c
@@ -0,0 +1,2209 @@
+/*
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+
+#include "builtin.h"
+#include "check.h"
+#include "elf.h"
+#include "special.h"
+#include "arch.h"
+#include "warn.h"
+
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+
+struct alternative {
+	struct list_head list;
+	struct instruction *insn;
+};
+
+const char *objname;
+struct cfi_state initial_func_cfi;
+
+struct instruction *find_insn(struct objtool_file *file,
+			      struct section *sec, unsigned long offset)
+{
+	struct instruction *insn;
+
+	hash_for_each_possible(file->insn_hash, insn, hash, offset)
+		if (insn->sec == sec && insn->offset == offset)
+			return insn;
+
+	return NULL;
+}
+
+static struct instruction *next_insn_same_sec(struct objtool_file *file,
+					      struct instruction *insn)
+{
+	struct instruction *next = list_next_entry(insn, list);
+
+	if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
+		return NULL;
+
+	return next;
+}
+
+static struct instruction *next_insn_same_func(struct objtool_file *file,
+					       struct instruction *insn)
+{
+	struct instruction *next = list_next_entry(insn, list);
+	struct symbol *func = insn->func;
+
+	if (!func)
+		return NULL;
+
+	if (&next->list != &file->insn_list && next->func == func)
+		return next;
+
+	/* Check if we're already in the subfunction: */
+	if (func == func->cfunc)
+		return NULL;
+
+	/* Move to the subfunction: */
+	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
+}
+
+#define func_for_each_insn_all(file, func, insn)			\
+	for (insn = find_insn(file, func->sec, func->offset);		\
+	     insn;							\
+	     insn = next_insn_same_func(file, insn))
+
+#define func_for_each_insn(file, func, insn)				\
+	for (insn = find_insn(file, func->sec, func->offset);		\
+	     insn && &insn->list != &file->insn_list &&			\
+		insn->sec == func->sec &&				\
+		insn->offset < func->offset + func->len;		\
+	     insn = list_next_entry(insn, list))
+
+#define func_for_each_insn_continue_reverse(file, func, insn)		\
+	for (insn = list_prev_entry(insn, list);			\
+	     &insn->list != &file->insn_list &&				\
+		insn->sec == func->sec && insn->offset >= func->offset;	\
+	     insn = list_prev_entry(insn, list))
+
+#define sec_for_each_insn_from(file, insn)				\
+	for (; insn; insn = next_insn_same_sec(file, insn))
+
+#define sec_for_each_insn_continue(file, insn)				\
+	for (insn = next_insn_same_sec(file, insn); insn;		\
+	     insn = next_insn_same_sec(file, insn))
+
+/*
+ * Check if the function has been manually whitelisted with the
+ * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
+ * due to its use of a context switching instruction.
+ */
+static bool ignore_func(struct objtool_file *file, struct symbol *func)
+{
+	struct rela *rela;
+
+	/* check for STACK_FRAME_NON_STANDARD */
+	if (file->whitelist && file->whitelist->rela)
+		list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
+			if (rela->sym->type == STT_SECTION &&
+			    rela->sym->sec == func->sec &&
+			    rela->addend == func->offset)
+				return true;
+			if (rela->sym->type == STT_FUNC && rela->sym == func)
+				return true;
+		}
+
+	return false;
+}
+
+/*
+ * This checks to see if the given function is a "noreturn" function.
+ *
+ * For global functions which are outside the scope of this object file, we
+ * have to keep a manual list of them.
+ *
+ * For local functions, we have to detect them manually by simply looking for
+ * the lack of a return instruction.
+ *
+ * Returns:
+ *  -1: error
+ *   0: no dead end
+ *   1: dead end
+ */
+static int __dead_end_function(struct objtool_file *file, struct symbol *func,
+			       int recursion)
+{
+	int i;
+	struct instruction *insn;
+	bool empty = true;
+
+	/*
+	 * Unfortunately these have to be hard coded because the noreturn
+	 * attribute isn't provided in ELF data.
+	 */
+	static const char * const global_noreturns[] = {
+		"__stack_chk_fail",
+		"panic",
+		"do_exit",
+		"do_task_dead",
+		"__module_put_and_exit",
+		"complete_and_exit",
+		"kvm_spurious_fault",
+		"__reiserfs_panic",
+		"lbug_with_loc",
+		"fortify_panic",
+	};
+
+	if (func->bind == STB_WEAK)
+		return 0;
+
+	if (func->bind == STB_GLOBAL)
+		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
+			if (!strcmp(func->name, global_noreturns[i]))
+				return 1;
+
+	if (!func->len)
+		return 0;
+
+	insn = find_insn(file, func->sec, func->offset);
+	if (!insn->func)
+		return 0;
+
+	func_for_each_insn_all(file, func, insn) {
+		empty = false;
+
+		if (insn->type == INSN_RETURN)
+			return 0;
+	}
+
+	if (empty)
+		return 0;
+
+	/*
+	 * A function can have a sibling call instead of a return.  In that
+	 * case, the function's dead-end status depends on whether the target
+	 * of the sibling call returns.
+	 */
+	func_for_each_insn_all(file, func, insn) {
+		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+			struct instruction *dest = insn->jump_dest;
+
+			if (!dest)
+				/* sibling call to another file */
+				return 0;
+
+			if (dest->func && dest->func->pfunc != insn->func->pfunc) {
+
+				/* local sibling call */
+				if (recursion == 5) {
+					/*
+					 * Infinite recursion: two functions
+					 * have sibling calls to each other.
+					 * This is a very rare case.  It means
+					 * they aren't dead ends.
+					 */
+					return 0;
+				}
+
+				return __dead_end_function(file, dest->func,
+							   recursion + 1);
+			}
+		}
+
+		if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
+			/* sibling call */
+			return 0;
+	}
+
+	return 1;
+}
+
+static int dead_end_function(struct objtool_file *file, struct symbol *func)
+{
+	return __dead_end_function(file, func, 0);
+}
+
+static void clear_insn_state(struct insn_state *state)
+{
+	int i;
+
+	memset(state, 0, sizeof(*state));
+	state->cfa.base = CFI_UNDEFINED;
+	for (i = 0; i < CFI_NUM_REGS; i++) {
+		state->regs[i].base = CFI_UNDEFINED;
+		state->vals[i].base = CFI_UNDEFINED;
+	}
+	state->drap_reg = CFI_UNDEFINED;
+	state->drap_offset = -1;
+}
+
+/*
+ * Call the arch-specific instruction decoder for all the instructions and add
+ * them to the global instruction list.
+ */
+static int decode_instructions(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *func;
+	unsigned long offset;
+	struct instruction *insn;
+	int ret;
+
+	for_each_sec(file, sec) {
+
+		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+			continue;
+
+		if (strcmp(sec->name, ".altinstr_replacement") &&
+		    strcmp(sec->name, ".altinstr_aux") &&
+		    strncmp(sec->name, ".discard.", 9))
+			sec->text = true;
+
+		for (offset = 0; offset < sec->len; offset += insn->len) {
+			insn = malloc(sizeof(*insn));
+			if (!insn) {
+				WARN("malloc failed");
+				return -1;
+			}
+			memset(insn, 0, sizeof(*insn));
+			INIT_LIST_HEAD(&insn->alts);
+			clear_insn_state(&insn->state);
+
+			insn->sec = sec;
+			insn->offset = offset;
+
+			ret = arch_decode_instruction(file->elf, sec, offset,
+						      sec->len - offset,
+						      &insn->len, &insn->type,
+						      &insn->immediate,
+						      &insn->stack_op);
+			if (ret)
+				goto err;
+
+			if (!insn->type || insn->type > INSN_LAST) {
+				WARN_FUNC("invalid instruction type %d",
+					  insn->sec, insn->offset, insn->type);
+				ret = -1;
+				goto err;
+			}
+
+			hash_add(file->insn_hash, &insn->hash, insn->offset);
+			list_add_tail(&insn->list, &file->insn_list);
+		}
+
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			if (!find_insn(file, sec, func->offset)) {
+				WARN("%s(): can't find starting instruction",
+				     func->name);
+				return -1;
+			}
+
+			func_for_each_insn(file, func, insn)
+				if (!insn->func)
+					insn->func = func;
+		}
+	}
+
+	return 0;
+
+err:
+	free(insn);
+	return ret;
+}
+
+/*
+ * Mark "ud2" instructions and manually annotated dead ends.
+ */
+static int add_dead_ends(struct objtool_file *file)
+{
+	struct section *sec;
+	struct rela *rela;
+	struct instruction *insn;
+	bool found;
+
+	/*
+	 * By default, "ud2" is a dead end unless otherwise annotated, because
+	 * GCC 7 inserts it for certain divide-by-zero cases.
+	 */
+	for_each_insn(file, insn)
+		if (insn->type == INSN_BUG)
+			insn->dead_end = true;
+
+	/*
+	 * Check for manually annotated dead ends.
+	 */
+	sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
+	if (!sec)
+		goto reachable;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (insn)
+			insn = list_prev_entry(insn, list);
+		else if (rela->addend == rela->sym->sec->len) {
+			found = false;
+			list_for_each_entry_reverse(insn, &file->insn_list, list) {
+				if (insn->sec == rela->sym->sec) {
+					found = true;
+					break;
+				}
+			}
+
+			if (!found) {
+				WARN("can't find unreachable insn at %s+0x%x",
+				     rela->sym->sec->name, rela->addend);
+				return -1;
+			}
+		} else {
+			WARN("can't find unreachable insn at %s+0x%x",
+			     rela->sym->sec->name, rela->addend);
+			return -1;
+		}
+
+		insn->dead_end = true;
+	}
+
+reachable:
+	/*
+	 * These manually annotated reachable checks are needed for GCC 4.4,
+	 * where the Linux unreachable() macro isn't supported.  In that case
+	 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
+	 * not a dead end.
+	 */
+	sec = find_section_by_name(file->elf, ".rela.discard.reachable");
+	if (!sec)
+		return 0;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (insn)
+			insn = list_prev_entry(insn, list);
+		else if (rela->addend == rela->sym->sec->len) {
+			found = false;
+			list_for_each_entry_reverse(insn, &file->insn_list, list) {
+				if (insn->sec == rela->sym->sec) {
+					found = true;
+					break;
+				}
+			}
+
+			if (!found) {
+				WARN("can't find reachable insn at %s+0x%x",
+				     rela->sym->sec->name, rela->addend);
+				return -1;
+			}
+		} else {
+			WARN("can't find reachable insn at %s+0x%x",
+			     rela->sym->sec->name, rela->addend);
+			return -1;
+		}
+
+		insn->dead_end = false;
+	}
+
+	return 0;
+}
+
+/*
+ * Warnings shouldn't be reported for ignored functions.
+ */
+static void add_ignores(struct objtool_file *file)
+{
+	struct instruction *insn;
+	struct section *sec;
+	struct symbol *func;
+
+	for_each_sec(file, sec) {
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			if (!ignore_func(file, func))
+				continue;
+
+			func_for_each_insn_all(file, func, insn)
+				insn->ignore = true;
+		}
+	}
+}
+
+/*
+ * FIXME: For now, just ignore any alternatives which add retpolines.  This is
+ * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
+ * But it at least allows objtool to understand the control flow *around* the
+ * retpoline.
+ */
+static int add_nospec_ignores(struct objtool_file *file)
+{
+	struct section *sec;
+	struct rela *rela;
+	struct instruction *insn;
+
+	sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+	if (!sec)
+		return 0;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (!insn) {
+			WARN("bad .discard.nospec entry");
+			return -1;
+		}
+
+		insn->ignore_alts = true;
+	}
+
+	return 0;
+}
+
+/*
+ * Find the destination instructions for all jumps.
+ */
+static int add_jump_destinations(struct objtool_file *file)
+{
+	struct instruction *insn;
+	struct rela *rela;
+	struct section *dest_sec;
+	unsigned long dest_off;
+
+	for_each_insn(file, insn) {
+		if (insn->type != INSN_JUMP_CONDITIONAL &&
+		    insn->type != INSN_JUMP_UNCONDITIONAL)
+			continue;
+
+		if (insn->ignore)
+			continue;
+
+		rela = find_rela_by_dest_range(insn->sec, insn->offset,
+					       insn->len);
+		if (!rela) {
+			dest_sec = insn->sec;
+			dest_off = insn->offset + insn->len + insn->immediate;
+		} else if (rela->sym->type == STT_SECTION) {
+			dest_sec = rela->sym->sec;
+			dest_off = rela->addend + 4;
+		} else if (rela->sym->sec->idx) {
+			dest_sec = rela->sym->sec;
+			dest_off = rela->sym->sym.st_value + rela->addend + 4;
+		} else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+			/*
+			 * Retpoline jumps are really dynamic jumps in
+			 * disguise, so convert them accordingly.
+			 */
+			insn->type = INSN_JUMP_DYNAMIC;
+			insn->retpoline_safe = true;
+			continue;
+		} else {
+			/* sibling call */
+			insn->jump_dest = 0;
+			continue;
+		}
+
+		insn->jump_dest = find_insn(file, dest_sec, dest_off);
+		if (!insn->jump_dest) {
+
+			/*
+			 * This is a special case where an alt instruction
+			 * jumps past the end of the section.  These are
+			 * handled later in handle_group_alt().
+			 */
+			if (!strcmp(insn->sec->name, ".altinstr_replacement"))
+				continue;
+
+			WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
+				  insn->sec, insn->offset, dest_sec->name,
+				  dest_off);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Find the destination instructions for all calls.
+ */
+static int add_call_destinations(struct objtool_file *file)
+{
+	struct instruction *insn;
+	unsigned long dest_off;
+	struct rela *rela;
+
+	for_each_insn(file, insn) {
+		if (insn->type != INSN_CALL)
+			continue;
+
+		rela = find_rela_by_dest_range(insn->sec, insn->offset,
+					       insn->len);
+		if (!rela) {
+			dest_off = insn->offset + insn->len + insn->immediate;
+			insn->call_dest = find_symbol_by_offset(insn->sec,
+								dest_off);
+
+			if (!insn->call_dest && !insn->ignore) {
+				WARN_FUNC("unsupported intra-function call",
+					  insn->sec, insn->offset);
+				if (retpoline)
+					WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
+				return -1;
+			}
+
+		} else if (rela->sym->type == STT_SECTION) {
+			insn->call_dest = find_symbol_by_offset(rela->sym->sec,
+								rela->addend+4);
+			if (!insn->call_dest ||
+			    insn->call_dest->type != STT_FUNC) {
+				WARN_FUNC("can't find call dest symbol at %s+0x%x",
+					  insn->sec, insn->offset,
+					  rela->sym->sec->name,
+					  rela->addend + 4);
+				return -1;
+			}
+		} else
+			insn->call_dest = rela->sym;
+	}
+
+	return 0;
+}
+
+/*
+ * The .alternatives section requires some extra special care, over and above
+ * what other special sections require:
+ *
+ * 1. Because alternatives are patched in-place, we need to insert a fake jump
+ *    instruction at the end so that validate_branch() skips all the original
+ *    replaced instructions when validating the new instruction path.
+ *
+ * 2. An added wrinkle is that the new instruction length might be zero.  In
+ *    that case the old instructions are replaced with noops.  We simulate that
+ *    by creating a fake jump as the only new instruction.
+ *
+ * 3. In some cases, the alternative section includes an instruction which
+ *    conditionally jumps to the _end_ of the entry.  We have to modify these
+ *    jumps' destinations to point back to .text rather than the end of the
+ *    entry in .altinstr_replacement.
+ *
+ * 4. It has been requested that we don't validate the !POPCNT feature path
+ *    which is a "very very small percentage of machines".
+ */
+static int handle_group_alt(struct objtool_file *file,
+			    struct special_alt *special_alt,
+			    struct instruction *orig_insn,
+			    struct instruction **new_insn)
+{
+	struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump = NULL;
+	unsigned long dest_off;
+
+	last_orig_insn = NULL;
+	insn = orig_insn;
+	sec_for_each_insn_from(file, insn) {
+		if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
+			break;
+
+		if (special_alt->skip_orig)
+			insn->type = INSN_NOP;
+
+		insn->alt_group = true;
+		last_orig_insn = insn;
+	}
+
+	if (next_insn_same_sec(file, last_orig_insn)) {
+		fake_jump = malloc(sizeof(*fake_jump));
+		if (!fake_jump) {
+			WARN("malloc failed");
+			return -1;
+		}
+		memset(fake_jump, 0, sizeof(*fake_jump));
+		INIT_LIST_HEAD(&fake_jump->alts);
+		clear_insn_state(&fake_jump->state);
+
+		fake_jump->sec = special_alt->new_sec;
+		fake_jump->offset = -1;
+		fake_jump->type = INSN_JUMP_UNCONDITIONAL;
+		fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
+		fake_jump->ignore = true;
+	}
+
+	if (!special_alt->new_len) {
+		if (!fake_jump) {
+			WARN("%s: empty alternative at end of section",
+			     special_alt->orig_sec->name);
+			return -1;
+		}
+
+		*new_insn = fake_jump;
+		return 0;
+	}
+
+	last_new_insn = NULL;
+	insn = *new_insn;
+	sec_for_each_insn_from(file, insn) {
+		if (insn->offset >= special_alt->new_off + special_alt->new_len)
+			break;
+
+		last_new_insn = insn;
+
+		insn->ignore = orig_insn->ignore_alts;
+
+		if (insn->type != INSN_JUMP_CONDITIONAL &&
+		    insn->type != INSN_JUMP_UNCONDITIONAL)
+			continue;
+
+		if (!insn->immediate)
+			continue;
+
+		dest_off = insn->offset + insn->len + insn->immediate;
+		if (dest_off == special_alt->new_off + special_alt->new_len) {
+			if (!fake_jump) {
+				WARN("%s: alternative jump to end of section",
+				     special_alt->orig_sec->name);
+				return -1;
+			}
+			insn->jump_dest = fake_jump;
+		}
+
+		if (!insn->jump_dest) {
+			WARN_FUNC("can't find alternative jump destination",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+	}
+
+	if (!last_new_insn) {
+		WARN_FUNC("can't find last new alternative instruction",
+			  special_alt->new_sec, special_alt->new_off);
+		return -1;
+	}
+
+	if (fake_jump)
+		list_add(&fake_jump->list, &last_new_insn->list);
+
+	return 0;
+}
+
+/*
+ * A jump table entry can either convert a nop to a jump or a jump to a nop.
+ * If the original instruction is a jump, make the alt entry an effective nop
+ * by just skipping the original instruction.
+ */
+static int handle_jump_alt(struct objtool_file *file,
+			   struct special_alt *special_alt,
+			   struct instruction *orig_insn,
+			   struct instruction **new_insn)
+{
+	if (orig_insn->type == INSN_NOP)
+		return 0;
+
+	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
+		WARN_FUNC("unsupported instruction at jump label",
+			  orig_insn->sec, orig_insn->offset);
+		return -1;
+	}
+
+	*new_insn = list_next_entry(orig_insn, list);
+	return 0;
+}
+
+/*
+ * Read all the special sections which have alternate instructions which can be
+ * patched in or redirected to at runtime.  Each instruction having alternate
+ * instruction(s) has them added to its insn->alts list, which will be
+ * traversed in validate_branch().
+ */
+static int add_special_section_alts(struct objtool_file *file)
+{
+	struct list_head special_alts;
+	struct instruction *orig_insn, *new_insn;
+	struct special_alt *special_alt, *tmp;
+	struct alternative *alt;
+	int ret;
+
+	ret = special_get_alts(file->elf, &special_alts);
+	if (ret)
+		return ret;
+
+	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
+
+		orig_insn = find_insn(file, special_alt->orig_sec,
+				      special_alt->orig_off);
+		if (!orig_insn) {
+			WARN_FUNC("special: can't find orig instruction",
+				  special_alt->orig_sec, special_alt->orig_off);
+			ret = -1;
+			goto out;
+		}
+
+		new_insn = NULL;
+		if (!special_alt->group || special_alt->new_len) {
+			new_insn = find_insn(file, special_alt->new_sec,
+					     special_alt->new_off);
+			if (!new_insn) {
+				WARN_FUNC("special: can't find new instruction",
+					  special_alt->new_sec,
+					  special_alt->new_off);
+				ret = -1;
+				goto out;
+			}
+		}
+
+		if (special_alt->group) {
+			ret = handle_group_alt(file, special_alt, orig_insn,
+					       &new_insn);
+			if (ret)
+				goto out;
+		} else if (special_alt->jump_or_nop) {
+			ret = handle_jump_alt(file, special_alt, orig_insn,
+					      &new_insn);
+			if (ret)
+				goto out;
+		}
+
+		alt = malloc(sizeof(*alt));
+		if (!alt) {
+			WARN("malloc failed");
+			ret = -1;
+			goto out;
+		}
+
+		alt->insn = new_insn;
+		list_add_tail(&alt->list, &orig_insn->alts);
+
+		list_del(&special_alt->list);
+		free(special_alt);
+	}
+
+out:
+	return ret;
+}
+
+static int add_switch_table(struct objtool_file *file, struct instruction *insn,
+			    struct rela *table, struct rela *next_table)
+{
+	struct rela *rela = table;
+	struct instruction *alt_insn;
+	struct alternative *alt;
+	struct symbol *pfunc = insn->func->pfunc;
+	unsigned int prev_offset = 0;
+
+	list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
+		if (rela == next_table)
+			break;
+
+		/* Make sure the switch table entries are consecutive: */
+		if (prev_offset && rela->offset != prev_offset + 8)
+			break;
+
+		/* Detect function pointers from contiguous objects: */
+		if (rela->sym->sec == pfunc->sec &&
+		    rela->addend == pfunc->offset)
+			break;
+
+		alt_insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (!alt_insn)
+			break;
+
+		/* Make sure the jmp dest is in the function or subfunction: */
+		if (alt_insn->func->pfunc != pfunc)
+			break;
+
+		alt = malloc(sizeof(*alt));
+		if (!alt) {
+			WARN("malloc failed");
+			return -1;
+		}
+
+		alt->insn = alt_insn;
+		list_add_tail(&alt->list, &insn->alts);
+		prev_offset = rela->offset;
+	}
+
+	if (!prev_offset) {
+		WARN_FUNC("can't find switch jump table",
+			  insn->sec, insn->offset);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * find_switch_table() - Given a dynamic jump, find the switch jump table in
+ * .rodata associated with it.
+ *
+ * There are 3 basic patterns:
+ *
+ * 1. jmpq *[rodata addr](,%reg,8)
+ *
+ *    This is the most common case by far.  It jumps to an address in a simple
+ *    jump table which is stored in .rodata.
+ *
+ * 2. jmpq *[rodata addr](%rip)
+ *
+ *    This is caused by a rare GCC quirk, currently only seen in three driver
+ *    functions in the kernel, only with certain obscure non-distro configs.
+ *
+ *    As part of an optimization, GCC makes a copy of an existing switch jump
+ *    table, modifies it, and then hard-codes the jump (albeit with an indirect
+ *    jump) to use a single entry in the table.  The rest of the jump table and
+ *    some of its jump targets remain as dead code.
+ *
+ *    In such a case we can just crudely ignore all unreachable instruction
+ *    warnings for the entire object file.  Ideally we would just ignore them
+ *    for the function, but that would require redesigning the code quite a
+ *    bit.  And honestly that's just not worth doing: unreachable instruction
+ *    warnings are of questionable value anyway, and this is such a rare issue.
+ *
+ * 3. mov [rodata addr],%reg1
+ *    ... some instructions ...
+ *    jmpq *(%reg1,%reg2,8)
+ *
+ *    This is a fairly uncommon pattern which is new for GCC 6.  As of this
+ *    writing, there are 11 occurrences of it in the allmodconfig kernel.
+ *
+ *    As of GCC 7 there are quite a few more of these and the 'in between' code
+ *    is significant. Esp. with KASAN enabled some of the code between the mov
+ *    and jmpq uses .rodata itself, which can confuse things.
+ *
+ *    TODO: Once we have DWARF CFI and smarter instruction decoding logic,
+ *    ensure the same register is used in the mov and jump instructions.
+ *
+ *    NOTE: RETPOLINE made it harder still to decode dynamic jumps.
+ */
+static struct rela *find_switch_table(struct objtool_file *file,
+				      struct symbol *func,
+				      struct instruction *insn)
+{
+	struct rela *text_rela, *rodata_rela;
+	struct instruction *orig_insn = insn;
+	unsigned long table_offset;
+
+	/*
+	 * Backward search using the @first_jump_src links, these help avoid
+	 * much of the 'in between' code. Which avoids us getting confused by
+	 * it.
+	 */
+	for (;
+	     &insn->list != &file->insn_list &&
+	     insn->sec == func->sec &&
+	     insn->offset >= func->offset;
+
+	     insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
+
+		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
+			break;
+
+		/* allow small jumps within the range */
+		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
+		    insn->jump_dest &&
+		    (insn->jump_dest->offset <= insn->offset ||
+		     insn->jump_dest->offset > orig_insn->offset))
+		    break;
+
+		/* look for a relocation which references .rodata */
+		text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
+						    insn->len);
+		if (!text_rela || text_rela->sym != file->rodata->sym)
+			continue;
+
+		table_offset = text_rela->addend;
+		if (text_rela->type == R_X86_64_PC32)
+			table_offset += 4;
+
+		/*
+		 * Make sure the .rodata address isn't associated with a
+		 * symbol.  gcc jump tables are anonymous data.
+		 */
+		if (find_symbol_containing(file->rodata, table_offset))
+			continue;
+
+		rodata_rela = find_rela_by_dest(file->rodata, table_offset);
+		if (rodata_rela) {
+			/*
+			 * Use of RIP-relative switch jumps is quite rare, and
+			 * indicates a rare GCC quirk/bug which can leave dead
+			 * code behind.
+			 */
+			if (text_rela->type == R_X86_64_PC32)
+				file->ignore_unreachables = true;
+
+			return rodata_rela;
+		}
+	}
+
+	return NULL;
+}
+
+
+static int add_func_switch_tables(struct objtool_file *file,
+				  struct symbol *func)
+{
+	struct instruction *insn, *last = NULL, *prev_jump = NULL;
+	struct rela *rela, *prev_rela = NULL;
+	int ret;
+
+	func_for_each_insn_all(file, func, insn) {
+		if (!last)
+			last = insn;
+
+		/*
+		 * Store back-pointers for unconditional forward jumps such
+		 * that find_switch_table() can back-track using those and
+		 * avoid some potentially confusing code.
+		 */
+		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
+		    insn->offset > last->offset &&
+		    insn->jump_dest->offset > insn->offset &&
+		    !insn->jump_dest->first_jump_src) {
+
+			insn->jump_dest->first_jump_src = insn;
+			last = insn->jump_dest;
+		}
+
+		if (insn->type != INSN_JUMP_DYNAMIC)
+			continue;
+
+		rela = find_switch_table(file, func, insn);
+		if (!rela)
+			continue;
+
+		/*
+		 * We found a switch table, but we don't know yet how big it
+		 * is.  Don't add it until we reach the end of the function or
+		 * the beginning of another switch table in the same function.
+		 */
+		if (prev_jump) {
+			ret = add_switch_table(file, prev_jump, prev_rela, rela);
+			if (ret)
+				return ret;
+		}
+
+		prev_jump = insn;
+		prev_rela = rela;
+	}
+
+	if (prev_jump) {
+		ret = add_switch_table(file, prev_jump, prev_rela, NULL);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * For some switch statements, gcc generates a jump table in the .rodata
+ * section which contains a list of addresses within the function to jump to.
+ * This finds these jump tables and adds them to the insn->alts lists.
+ */
+static int add_switch_table_alts(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *func;
+	int ret;
+
+	if (!file->rodata || !file->rodata->rela)
+		return 0;
+
+	for_each_sec(file, sec) {
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			ret = add_func_switch_tables(file, func);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int read_unwind_hints(struct objtool_file *file)
+{
+	struct section *sec, *relasec;
+	struct rela *rela;
+	struct unwind_hint *hint;
+	struct instruction *insn;
+	struct cfi_reg *cfa;
+	int i;
+
+	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
+	if (!sec)
+		return 0;
+
+	relasec = sec->rela;
+	if (!relasec) {
+		WARN("missing .rela.discard.unwind_hints section");
+		return -1;
+	}
+
+	if (sec->len % sizeof(struct unwind_hint)) {
+		WARN("struct unwind_hint size mismatch");
+		return -1;
+	}
+
+	file->hints = true;
+
+	for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
+		hint = (struct unwind_hint *)sec->data->d_buf + i;
+
+		rela = find_rela_by_dest(sec, i * sizeof(*hint));
+		if (!rela) {
+			WARN("can't find rela for unwind_hints[%d]", i);
+			return -1;
+		}
+
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (!insn) {
+			WARN("can't find insn for unwind_hints[%d]", i);
+			return -1;
+		}
+
+		cfa = &insn->state.cfa;
+
+		if (hint->type == UNWIND_HINT_TYPE_SAVE) {
+			insn->save = true;
+			continue;
+
+		} else if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
+			insn->restore = true;
+			insn->hint = true;
+			continue;
+		}
+
+		insn->hint = true;
+
+		switch (hint->sp_reg) {
+		case ORC_REG_UNDEFINED:
+			cfa->base = CFI_UNDEFINED;
+			break;
+		case ORC_REG_SP:
+			cfa->base = CFI_SP;
+			break;
+		case ORC_REG_BP:
+			cfa->base = CFI_BP;
+			break;
+		case ORC_REG_SP_INDIRECT:
+			cfa->base = CFI_SP_INDIRECT;
+			break;
+		case ORC_REG_R10:
+			cfa->base = CFI_R10;
+			break;
+		case ORC_REG_R13:
+			cfa->base = CFI_R13;
+			break;
+		case ORC_REG_DI:
+			cfa->base = CFI_DI;
+			break;
+		case ORC_REG_DX:
+			cfa->base = CFI_DX;
+			break;
+		default:
+			WARN_FUNC("unsupported unwind_hint sp base reg %d",
+				  insn->sec, insn->offset, hint->sp_reg);
+			return -1;
+		}
+
+		cfa->offset = hint->sp_offset;
+		insn->state.type = hint->type;
+	}
+
+	return 0;
+}
+
+static int read_retpoline_hints(struct objtool_file *file)
+{
+	struct section *sec;
+	struct instruction *insn;
+	struct rela *rela;
+
+	sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
+	if (!sec)
+		return 0;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (!insn) {
+			WARN("bad .discard.retpoline_safe entry");
+			return -1;
+		}
+
+		if (insn->type != INSN_JUMP_DYNAMIC &&
+		    insn->type != INSN_CALL_DYNAMIC) {
+			WARN_FUNC("retpoline_safe hint not an indirect jump/call",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		insn->retpoline_safe = true;
+	}
+
+	return 0;
+}
+
+static int decode_sections(struct objtool_file *file)
+{
+	int ret;
+
+	ret = decode_instructions(file);
+	if (ret)
+		return ret;
+
+	ret = add_dead_ends(file);
+	if (ret)
+		return ret;
+
+	add_ignores(file);
+
+	ret = add_nospec_ignores(file);
+	if (ret)
+		return ret;
+
+	ret = add_jump_destinations(file);
+	if (ret)
+		return ret;
+
+	ret = add_special_section_alts(file);
+	if (ret)
+		return ret;
+
+	ret = add_call_destinations(file);
+	if (ret)
+		return ret;
+
+	ret = add_switch_table_alts(file);
+	if (ret)
+		return ret;
+
+	ret = read_unwind_hints(file);
+	if (ret)
+		return ret;
+
+	ret = read_retpoline_hints(file);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static bool is_fentry_call(struct instruction *insn)
+{
+	if (insn->type == INSN_CALL &&
+	    insn->call_dest->type == STT_NOTYPE &&
+	    !strcmp(insn->call_dest->name, "__fentry__"))
+		return true;
+
+	return false;
+}
+
+static bool has_modified_stack_frame(struct insn_state *state)
+{
+	int i;
+
+	if (state->cfa.base != initial_func_cfi.cfa.base ||
+	    state->cfa.offset != initial_func_cfi.cfa.offset ||
+	    state->stack_size != initial_func_cfi.cfa.offset ||
+	    state->drap)
+		return true;
+
+	for (i = 0; i < CFI_NUM_REGS; i++)
+		if (state->regs[i].base != initial_func_cfi.regs[i].base ||
+		    state->regs[i].offset != initial_func_cfi.regs[i].offset)
+			return true;
+
+	return false;
+}
+
+static bool has_valid_stack_frame(struct insn_state *state)
+{
+	if (state->cfa.base == CFI_BP && state->regs[CFI_BP].base == CFI_CFA &&
+	    state->regs[CFI_BP].offset == -16)
+		return true;
+
+	if (state->drap && state->regs[CFI_BP].base == CFI_BP)
+		return true;
+
+	return false;
+}
+
+static int update_insn_state_regs(struct instruction *insn, struct insn_state *state)
+{
+	struct cfi_reg *cfa = &state->cfa;
+	struct stack_op *op = &insn->stack_op;
+
+	if (cfa->base != CFI_SP)
+		return 0;
+
+	/* push */
+	if (op->dest.type == OP_DEST_PUSH)
+		cfa->offset += 8;
+
+	/* pop */
+	if (op->src.type == OP_SRC_POP)
+		cfa->offset -= 8;
+
+	/* add immediate to sp */
+	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
+	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
+		cfa->offset -= op->src.offset;
+
+	return 0;
+}
+
+static void save_reg(struct insn_state *state, unsigned char reg, int base,
+		     int offset)
+{
+	if (arch_callee_saved_reg(reg) &&
+	    state->regs[reg].base == CFI_UNDEFINED) {
+		state->regs[reg].base = base;
+		state->regs[reg].offset = offset;
+	}
+}
+
+static void restore_reg(struct insn_state *state, unsigned char reg)
+{
+	state->regs[reg].base = CFI_UNDEFINED;
+	state->regs[reg].offset = 0;
+}
+
+/*
+ * A note about DRAP stack alignment:
+ *
+ * GCC has the concept of a DRAP register, which is used to help keep track of
+ * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
+ * register.  The typical DRAP pattern is:
+ *
+ *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
+ *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
+ *   41 ff 72 f8		pushq  -0x8(%r10)
+ *   55				push   %rbp
+ *   48 89 e5			mov    %rsp,%rbp
+ *				(more pushes)
+ *   41 52			push   %r10
+ *				...
+ *   41 5a			pop    %r10
+ *				(more pops)
+ *   5d				pop    %rbp
+ *   49 8d 62 f8		lea    -0x8(%r10),%rsp
+ *   c3				retq
+ *
+ * There are some variations in the epilogues, like:
+ *
+ *   5b				pop    %rbx
+ *   41 5a			pop    %r10
+ *   41 5c			pop    %r12
+ *   41 5d			pop    %r13
+ *   41 5e			pop    %r14
+ *   c9				leaveq
+ *   49 8d 62 f8		lea    -0x8(%r10),%rsp
+ *   c3				retq
+ *
+ * and:
+ *
+ *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
+ *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
+ *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
+ *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
+ *   c9				leaveq
+ *   49 8d 62 f8		lea    -0x8(%r10),%rsp
+ *   c3				retq
+ *
+ * Sometimes r13 is used as the DRAP register, in which case it's saved and
+ * restored beforehand:
+ *
+ *   41 55			push   %r13
+ *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
+ *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
+ *				...
+ *   49 8d 65 f0		lea    -0x10(%r13),%rsp
+ *   41 5d			pop    %r13
+ *   c3				retq
+ */
+static int update_insn_state(struct instruction *insn, struct insn_state *state)
+{
+	struct stack_op *op = &insn->stack_op;
+	struct cfi_reg *cfa = &state->cfa;
+	struct cfi_reg *regs = state->regs;
+
+	/* stack operations don't make sense with an undefined CFA */
+	if (cfa->base == CFI_UNDEFINED) {
+		if (insn->func) {
+			WARN_FUNC("undefined stack state", insn->sec, insn->offset);
+			return -1;
+		}
+		return 0;
+	}
+
+	if (state->type == ORC_TYPE_REGS || state->type == ORC_TYPE_REGS_IRET)
+		return update_insn_state_regs(insn, state);
+
+	switch (op->dest.type) {
+
+	case OP_DEST_REG:
+		switch (op->src.type) {
+
+		case OP_SRC_REG:
+			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
+			    cfa->base == CFI_SP &&
+			    regs[CFI_BP].base == CFI_CFA &&
+			    regs[CFI_BP].offset == -cfa->offset) {
+
+				/* mov %rsp, %rbp */
+				cfa->base = op->dest.reg;
+				state->bp_scratch = false;
+			}
+
+			else if (op->src.reg == CFI_SP &&
+				 op->dest.reg == CFI_BP && state->drap) {
+
+				/* drap: mov %rsp, %rbp */
+				regs[CFI_BP].base = CFI_BP;
+				regs[CFI_BP].offset = -state->stack_size;
+				state->bp_scratch = false;
+			}
+
+			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
+
+				/*
+				 * mov %rsp, %reg
+				 *
+				 * This is needed for the rare case where GCC
+				 * does:
+				 *
+				 *   mov    %rsp, %rax
+				 *   ...
+				 *   mov    %rax, %rsp
+				 */
+				state->vals[op->dest.reg].base = CFI_CFA;
+				state->vals[op->dest.reg].offset = -state->stack_size;
+			}
+
+			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
+				 cfa->base == CFI_BP) {
+
+				/*
+				 * mov %rbp, %rsp
+				 *
+				 * Restore the original stack pointer (Clang).
+				 */
+				state->stack_size = -state->regs[CFI_BP].offset;
+			}
+
+			else if (op->dest.reg == cfa->base) {
+
+				/* mov %reg, %rsp */
+				if (cfa->base == CFI_SP &&
+				    state->vals[op->src.reg].base == CFI_CFA) {
+
+					/*
+					 * This is needed for the rare case
+					 * where GCC does something dumb like:
+					 *
+					 *   lea    0x8(%rsp), %rcx
+					 *   ...
+					 *   mov    %rcx, %rsp
+					 */
+					cfa->offset = -state->vals[op->src.reg].offset;
+					state->stack_size = cfa->offset;
+
+				} else {
+					cfa->base = CFI_UNDEFINED;
+					cfa->offset = 0;
+				}
+			}
+
+			break;
+
+		case OP_SRC_ADD:
+			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
+
+				/* add imm, %rsp */
+				state->stack_size -= op->src.offset;
+				if (cfa->base == CFI_SP)
+					cfa->offset -= op->src.offset;
+				break;
+			}
+
+			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
+
+				/* lea disp(%rbp), %rsp */
+				state->stack_size = -(op->src.offset + regs[CFI_BP].offset);
+				break;
+			}
+
+			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
+
+				/* drap: lea disp(%rsp), %drap */
+				state->drap_reg = op->dest.reg;
+
+				/*
+				 * lea disp(%rsp), %reg
+				 *
+				 * This is needed for the rare case where GCC
+				 * does something dumb like:
+				 *
+				 *   lea    0x8(%rsp), %rcx
+				 *   ...
+				 *   mov    %rcx, %rsp
+				 */
+				state->vals[op->dest.reg].base = CFI_CFA;
+				state->vals[op->dest.reg].offset = \
+					-state->stack_size + op->src.offset;
+
+				break;
+			}
+
+			if (state->drap && op->dest.reg == CFI_SP &&
+			    op->src.reg == state->drap_reg) {
+
+				 /* drap: lea disp(%drap), %rsp */
+				cfa->base = CFI_SP;
+				cfa->offset = state->stack_size = -op->src.offset;
+				state->drap_reg = CFI_UNDEFINED;
+				state->drap = false;
+				break;
+			}
+
+			if (op->dest.reg == state->cfa.base) {
+				WARN_FUNC("unsupported stack register modification",
+					  insn->sec, insn->offset);
+				return -1;
+			}
+
+			break;
+
+		case OP_SRC_AND:
+			if (op->dest.reg != CFI_SP ||
+			    (state->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
+			    (state->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
+				WARN_FUNC("unsupported stack pointer realignment",
+					  insn->sec, insn->offset);
+				return -1;
+			}
+
+			if (state->drap_reg != CFI_UNDEFINED) {
+				/* drap: and imm, %rsp */
+				cfa->base = state->drap_reg;
+				cfa->offset = state->stack_size = 0;
+				state->drap = true;
+			}
+
+			/*
+			 * Older versions of GCC (4.8ish) realign the stack
+			 * without DRAP, with a frame pointer.
+			 */
+
+			break;
+
+		case OP_SRC_POP:
+			if (!state->drap && op->dest.type == OP_DEST_REG &&
+			    op->dest.reg == cfa->base) {
+
+				/* pop %rbp */
+				cfa->base = CFI_SP;
+			}
+
+			if (state->drap && cfa->base == CFI_BP_INDIRECT &&
+			    op->dest.type == OP_DEST_REG &&
+			    op->dest.reg == state->drap_reg &&
+			    state->drap_offset == -state->stack_size) {
+
+				/* drap: pop %drap */
+				cfa->base = state->drap_reg;
+				cfa->offset = 0;
+				state->drap_offset = -1;
+
+			} else if (regs[op->dest.reg].offset == -state->stack_size) {
+
+				/* pop %reg */
+				restore_reg(state, op->dest.reg);
+			}
+
+			state->stack_size -= 8;
+			if (cfa->base == CFI_SP)
+				cfa->offset -= 8;
+
+			break;
+
+		case OP_SRC_REG_INDIRECT:
+			if (state->drap && op->src.reg == CFI_BP &&
+			    op->src.offset == state->drap_offset) {
+
+				/* drap: mov disp(%rbp), %drap */
+				cfa->base = state->drap_reg;
+				cfa->offset = 0;
+				state->drap_offset = -1;
+			}
+
+			if (state->drap && op->src.reg == CFI_BP &&
+			    op->src.offset == regs[op->dest.reg].offset) {
+
+				/* drap: mov disp(%rbp), %reg */
+				restore_reg(state, op->dest.reg);
+
+			} else if (op->src.reg == cfa->base &&
+			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
+
+				/* mov disp(%rbp), %reg */
+				/* mov disp(%rsp), %reg */
+				restore_reg(state, op->dest.reg);
+			}
+
+			break;
+
+		default:
+			WARN_FUNC("unknown stack-related instruction",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		break;
+
+	case OP_DEST_PUSH:
+		state->stack_size += 8;
+		if (cfa->base == CFI_SP)
+			cfa->offset += 8;
+
+		if (op->src.type != OP_SRC_REG)
+			break;
+
+		if (state->drap) {
+			if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
+
+				/* drap: push %drap */
+				cfa->base = CFI_BP_INDIRECT;
+				cfa->offset = -state->stack_size;
+
+				/* save drap so we know when to restore it */
+				state->drap_offset = -state->stack_size;
+
+			} else if (op->src.reg == CFI_BP && cfa->base == state->drap_reg) {
+
+				/* drap: push %rbp */
+				state->stack_size = 0;
+
+			} else if (regs[op->src.reg].base == CFI_UNDEFINED) {
+
+				/* drap: push %reg */
+				save_reg(state, op->src.reg, CFI_BP, -state->stack_size);
+			}
+
+		} else {
+
+			/* push %reg */
+			save_reg(state, op->src.reg, CFI_CFA, -state->stack_size);
+		}
+
+		/* detect when asm code uses rbp as a scratch register */
+		if (!no_fp && insn->func && op->src.reg == CFI_BP &&
+		    cfa->base != CFI_BP)
+			state->bp_scratch = true;
+		break;
+
+	case OP_DEST_REG_INDIRECT:
+
+		if (state->drap) {
+			if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
+
+				/* drap: mov %drap, disp(%rbp) */
+				cfa->base = CFI_BP_INDIRECT;
+				cfa->offset = op->dest.offset;
+
+				/* save drap offset so we know when to restore it */
+				state->drap_offset = op->dest.offset;
+			}
+
+			else if (regs[op->src.reg].base == CFI_UNDEFINED) {
+
+				/* drap: mov reg, disp(%rbp) */
+				save_reg(state, op->src.reg, CFI_BP, op->dest.offset);
+			}
+
+		} else if (op->dest.reg == cfa->base) {
+
+			/* mov reg, disp(%rbp) */
+			/* mov reg, disp(%rsp) */
+			save_reg(state, op->src.reg, CFI_CFA,
+				 op->dest.offset - state->cfa.offset);
+		}
+
+		break;
+
+	case OP_DEST_LEAVE:
+		if ((!state->drap && cfa->base != CFI_BP) ||
+		    (state->drap && cfa->base != state->drap_reg)) {
+			WARN_FUNC("leave instruction with modified stack frame",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		/* leave (mov %rbp, %rsp; pop %rbp) */
+
+		state->stack_size = -state->regs[CFI_BP].offset - 8;
+		restore_reg(state, CFI_BP);
+
+		if (!state->drap) {
+			cfa->base = CFI_SP;
+			cfa->offset -= 8;
+		}
+
+		break;
+
+	case OP_DEST_MEM:
+		if (op->src.type != OP_SRC_POP) {
+			WARN_FUNC("unknown stack-related memory operation",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		/* pop mem */
+		state->stack_size -= 8;
+		if (cfa->base == CFI_SP)
+			cfa->offset -= 8;
+
+		break;
+
+	default:
+		WARN_FUNC("unknown stack-related instruction",
+			  insn->sec, insn->offset);
+		return -1;
+	}
+
+	return 0;
+}
+
+static bool insn_state_match(struct instruction *insn, struct insn_state *state)
+{
+	struct insn_state *state1 = &insn->state, *state2 = state;
+	int i;
+
+	if (memcmp(&state1->cfa, &state2->cfa, sizeof(state1->cfa))) {
+		WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
+			  insn->sec, insn->offset,
+			  state1->cfa.base, state1->cfa.offset,
+			  state2->cfa.base, state2->cfa.offset);
+
+	} else if (memcmp(&state1->regs, &state2->regs, sizeof(state1->regs))) {
+		for (i = 0; i < CFI_NUM_REGS; i++) {
+			if (!memcmp(&state1->regs[i], &state2->regs[i],
+				    sizeof(struct cfi_reg)))
+				continue;
+
+			WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
+				  insn->sec, insn->offset,
+				  i, state1->regs[i].base, state1->regs[i].offset,
+				  i, state2->regs[i].base, state2->regs[i].offset);
+			break;
+		}
+
+	} else if (state1->type != state2->type) {
+		WARN_FUNC("stack state mismatch: type1=%d type2=%d",
+			  insn->sec, insn->offset, state1->type, state2->type);
+
+	} else if (state1->drap != state2->drap ||
+		 (state1->drap && state1->drap_reg != state2->drap_reg) ||
+		 (state1->drap && state1->drap_offset != state2->drap_offset)) {
+		WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
+			  insn->sec, insn->offset,
+			  state1->drap, state1->drap_reg, state1->drap_offset,
+			  state2->drap, state2->drap_reg, state2->drap_offset);
+
+	} else
+		return true;
+
+	return false;
+}
+
+/*
+ * Follow the branch starting at the given instruction, and recursively follow
+ * any other branches (jumps).  Meanwhile, track the frame pointer state at
+ * each instruction and validate all the rules described in
+ * tools/objtool/Documentation/stack-validation.txt.
+ */
+static int validate_branch(struct objtool_file *file, struct instruction *first,
+			   struct insn_state state)
+{
+	struct alternative *alt;
+	struct instruction *insn, *next_insn;
+	struct section *sec;
+	struct symbol *func = NULL;
+	int ret;
+
+	insn = first;
+	sec = insn->sec;
+
+	if (insn->alt_group && list_empty(&insn->alts)) {
+		WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
+			  sec, insn->offset);
+		return 1;
+	}
+
+	while (1) {
+		next_insn = next_insn_same_sec(file, insn);
+
+		if (file->c_file && func && insn->func && func != insn->func->pfunc) {
+			WARN("%s() falls through to next function %s()",
+			     func->name, insn->func->name);
+			return 1;
+		}
+
+		func = insn->func ? insn->func->pfunc : NULL;
+
+		if (func && insn->ignore) {
+			WARN_FUNC("BUG: why am I validating an ignored function?",
+				  sec, insn->offset);
+			return 1;
+		}
+
+		if (insn->visited) {
+			if (!insn->hint && !insn_state_match(insn, &state))
+				return 1;
+
+			return 0;
+		}
+
+		if (insn->hint) {
+			if (insn->restore) {
+				struct instruction *save_insn, *i;
+
+				i = insn;
+				save_insn = NULL;
+				func_for_each_insn_continue_reverse(file, insn->func, i) {
+					if (i->save) {
+						save_insn = i;
+						break;
+					}
+				}
+
+				if (!save_insn) {
+					WARN_FUNC("no corresponding CFI save for CFI restore",
+						  sec, insn->offset);
+					return 1;
+				}
+
+				if (!save_insn->visited) {
+					/*
+					 * Oops, no state to copy yet.
+					 * Hopefully we can reach this
+					 * instruction from another branch
+					 * after the save insn has been
+					 * visited.
+					 */
+					if (insn == first)
+						return 0;
+
+					WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
+						  sec, insn->offset);
+					return 1;
+				}
+
+				insn->state = save_insn->state;
+			}
+
+			state = insn->state;
+
+		} else
+			insn->state = state;
+
+		insn->visited = true;
+
+		if (!insn->ignore_alts) {
+			list_for_each_entry(alt, &insn->alts, list) {
+				ret = validate_branch(file, alt->insn, state);
+				if (ret)
+					return 1;
+			}
+		}
+
+		switch (insn->type) {
+
+		case INSN_RETURN:
+			if (func && has_modified_stack_frame(&state)) {
+				WARN_FUNC("return with modified stack frame",
+					  sec, insn->offset);
+				return 1;
+			}
+
+			if (state.bp_scratch) {
+				WARN("%s uses BP as a scratch register",
+				     insn->func->name);
+				return 1;
+			}
+
+			return 0;
+
+		case INSN_CALL:
+			if (is_fentry_call(insn))
+				break;
+
+			ret = dead_end_function(file, insn->call_dest);
+			if (ret == 1)
+				return 0;
+			if (ret == -1)
+				return 1;
+
+			/* fallthrough */
+		case INSN_CALL_DYNAMIC:
+			if (!no_fp && func && !has_valid_stack_frame(&state)) {
+				WARN_FUNC("call without frame pointer save/setup",
+					  sec, insn->offset);
+				return 1;
+			}
+			break;
+
+		case INSN_JUMP_CONDITIONAL:
+		case INSN_JUMP_UNCONDITIONAL:
+			if (insn->jump_dest &&
+			    (!func || !insn->jump_dest->func ||
+			     insn->jump_dest->func->pfunc == func)) {
+				ret = validate_branch(file, insn->jump_dest,
+						      state);
+				if (ret)
+					return 1;
+
+			} else if (func && has_modified_stack_frame(&state)) {
+				WARN_FUNC("sibling call from callable instruction with modified stack frame",
+					  sec, insn->offset);
+				return 1;
+			}
+
+			if (insn->type == INSN_JUMP_UNCONDITIONAL)
+				return 0;
+
+			break;
+
+		case INSN_JUMP_DYNAMIC:
+			if (func && list_empty(&insn->alts) &&
+			    has_modified_stack_frame(&state)) {
+				WARN_FUNC("sibling call from callable instruction with modified stack frame",
+					  sec, insn->offset);
+				return 1;
+			}
+
+			return 0;
+
+		case INSN_CONTEXT_SWITCH:
+			if (func && (!next_insn || !next_insn->hint)) {
+				WARN_FUNC("unsupported instruction in callable function",
+					  sec, insn->offset);
+				return 1;
+			}
+			return 0;
+
+		case INSN_STACK:
+			if (update_insn_state(insn, &state))
+				return 1;
+
+			break;
+
+		default:
+			break;
+		}
+
+		if (insn->dead_end)
+			return 0;
+
+		if (!next_insn) {
+			if (state.cfa.base == CFI_UNDEFINED)
+				return 0;
+			WARN("%s: unexpected end of section", sec->name);
+			return 1;
+		}
+
+		insn = next_insn;
+	}
+
+	return 0;
+}
+
+static int validate_unwind_hints(struct objtool_file *file)
+{
+	struct instruction *insn;
+	int ret, warnings = 0;
+	struct insn_state state;
+
+	if (!file->hints)
+		return 0;
+
+	clear_insn_state(&state);
+
+	for_each_insn(file, insn) {
+		if (insn->hint && !insn->visited) {
+			ret = validate_branch(file, insn, state);
+			warnings += ret;
+		}
+	}
+
+	return warnings;
+}
+
+static int validate_retpoline(struct objtool_file *file)
+{
+	struct instruction *insn;
+	int warnings = 0;
+
+	for_each_insn(file, insn) {
+		if (insn->type != INSN_JUMP_DYNAMIC &&
+		    insn->type != INSN_CALL_DYNAMIC)
+			continue;
+
+		if (insn->retpoline_safe)
+			continue;
+
+		/*
+		 * .init.text code is ran before userspace and thus doesn't
+		 * strictly need retpolines, except for modules which are
+		 * loaded late, they very much do need retpoline in their
+		 * .init.text
+		 */
+		if (!strcmp(insn->sec->name, ".init.text") && !module)
+			continue;
+
+		WARN_FUNC("indirect %s found in RETPOLINE build",
+			  insn->sec, insn->offset,
+			  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+
+		warnings++;
+	}
+
+	return warnings;
+}
+
+static bool is_kasan_insn(struct instruction *insn)
+{
+	return (insn->type == INSN_CALL &&
+		!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
+}
+
+static bool is_ubsan_insn(struct instruction *insn)
+{
+	return (insn->type == INSN_CALL &&
+		!strcmp(insn->call_dest->name,
+			"__ubsan_handle_builtin_unreachable"));
+}
+
+static bool ignore_unreachable_insn(struct instruction *insn)
+{
+	int i;
+
+	if (insn->ignore || insn->type == INSN_NOP)
+		return true;
+
+	/*
+	 * Ignore any unused exceptions.  This can happen when a whitelisted
+	 * function has an exception table entry.
+	 *
+	 * Also ignore alternative replacement instructions.  This can happen
+	 * when a whitelisted function uses one of the ALTERNATIVE macros.
+	 */
+	if (!strcmp(insn->sec->name, ".fixup") ||
+	    !strcmp(insn->sec->name, ".altinstr_replacement") ||
+	    !strcmp(insn->sec->name, ".altinstr_aux"))
+		return true;
+
+	/*
+	 * Check if this (or a subsequent) instruction is related to
+	 * CONFIG_UBSAN or CONFIG_KASAN.
+	 *
+	 * End the search at 5 instructions to avoid going into the weeds.
+	 */
+	if (!insn->func)
+		return false;
+	for (i = 0; i < 5; i++) {
+
+		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
+			return true;
+
+		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+			if (insn->jump_dest &&
+			    insn->jump_dest->func == insn->func) {
+				insn = insn->jump_dest;
+				continue;
+			}
+
+			break;
+		}
+
+		if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
+			break;
+
+		insn = list_next_entry(insn, list);
+	}
+
+	return false;
+}
+
+static int validate_functions(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *func;
+	struct instruction *insn;
+	struct insn_state state;
+	int ret, warnings = 0;
+
+	clear_insn_state(&state);
+
+	state.cfa = initial_func_cfi.cfa;
+	memcpy(&state.regs, &initial_func_cfi.regs,
+	       CFI_NUM_REGS * sizeof(struct cfi_reg));
+	state.stack_size = initial_func_cfi.cfa.offset;
+
+	for_each_sec(file, sec) {
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC || func->pfunc != func)
+				continue;
+
+			insn = find_insn(file, sec, func->offset);
+			if (!insn || insn->ignore)
+				continue;
+
+			ret = validate_branch(file, insn, state);
+			warnings += ret;
+		}
+	}
+
+	return warnings;
+}
+
+static int validate_reachable_instructions(struct objtool_file *file)
+{
+	struct instruction *insn;
+
+	if (file->ignore_unreachables)
+		return 0;
+
+	for_each_insn(file, insn) {
+		if (insn->visited || ignore_unreachable_insn(insn))
+			continue;
+
+		WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void cleanup(struct objtool_file *file)
+{
+	struct instruction *insn, *tmpinsn;
+	struct alternative *alt, *tmpalt;
+
+	list_for_each_entry_safe(insn, tmpinsn, &file->insn_list, list) {
+		list_for_each_entry_safe(alt, tmpalt, &insn->alts, list) {
+			list_del(&alt->list);
+			free(alt);
+		}
+		list_del(&insn->list);
+		hash_del(&insn->hash);
+		free(insn);
+	}
+	elf_close(file->elf);
+}
+
+int check(const char *_objname, bool orc)
+{
+	struct objtool_file file;
+	int ret, warnings = 0;
+
+	objname = _objname;
+
+	file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY);
+	if (!file.elf)
+		return 1;
+
+	INIT_LIST_HEAD(&file.insn_list);
+	hash_init(file.insn_hash);
+	file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
+	file.rodata = find_section_by_name(file.elf, ".rodata");
+	file.c_file = find_section_by_name(file.elf, ".comment");
+	file.ignore_unreachables = no_unreachable;
+	file.hints = false;
+
+	arch_initial_func_cfi_state(&initial_func_cfi);
+
+	ret = decode_sections(&file);
+	if (ret < 0)
+		goto out;
+	warnings += ret;
+
+	if (list_empty(&file.insn_list))
+		goto out;
+
+	if (retpoline) {
+		ret = validate_retpoline(&file);
+		if (ret < 0)
+			return ret;
+		warnings += ret;
+	}
+
+	ret = validate_functions(&file);
+	if (ret < 0)
+		goto out;
+	warnings += ret;
+
+	ret = validate_unwind_hints(&file);
+	if (ret < 0)
+		goto out;
+	warnings += ret;
+
+	if (!warnings) {
+		ret = validate_reachable_instructions(&file);
+		if (ret < 0)
+			goto out;
+		warnings += ret;
+	}
+
+	if (orc) {
+		ret = create_orc(&file);
+		if (ret < 0)
+			goto out;
+
+		ret = create_orc_sections(&file);
+		if (ret < 0)
+			goto out;
+
+		ret = elf_write(file.elf);
+		if (ret < 0)
+			goto out;
+	}
+
+out:
+	cleanup(&file);
+
+	/* ignore warnings for now until we get all the code cleaned up */
+	if (ret || warnings)
+		return 0;
+	return 0;
+}
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
new file mode 100644
index 0000000..c6b68fc
--- /dev/null
+++ b/tools/objtool/check.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CHECK_H
+#define _CHECK_H
+
+#include <stdbool.h>
+#include "elf.h"
+#include "cfi.h"
+#include "arch.h"
+#include "orc.h"
+#include <linux/hashtable.h>
+
+struct insn_state {
+	struct cfi_reg cfa;
+	struct cfi_reg regs[CFI_NUM_REGS];
+	int stack_size;
+	unsigned char type;
+	bool bp_scratch;
+	bool drap;
+	int drap_reg, drap_offset;
+	struct cfi_reg vals[CFI_NUM_REGS];
+};
+
+struct instruction {
+	struct list_head list;
+	struct hlist_node hash;
+	struct section *sec;
+	unsigned long offset;
+	unsigned int len;
+	unsigned char type;
+	unsigned long immediate;
+	bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
+	bool retpoline_safe;
+	struct symbol *call_dest;
+	struct instruction *jump_dest;
+	struct instruction *first_jump_src;
+	struct list_head alts;
+	struct symbol *func;
+	struct stack_op stack_op;
+	struct insn_state state;
+	struct orc_entry orc;
+};
+
+struct objtool_file {
+	struct elf *elf;
+	struct list_head insn_list;
+	DECLARE_HASHTABLE(insn_hash, 16);
+	struct section *rodata, *whitelist;
+	bool ignore_unreachables, c_file, hints;
+};
+
+int check(const char *objname, bool orc);
+
+struct instruction *find_insn(struct objtool_file *file,
+			      struct section *sec, unsigned long offset);
+
+#define for_each_insn(file, insn)					\
+	list_for_each_entry(insn, &file->insn_list, list)
+
+#define sec_for_each_insn(file, sec, insn)				\
+	for (insn = find_insn(file, sec, 0);				\
+	     insn && &insn->list != &file->insn_list &&			\
+			insn->sec == sec;				\
+	     insn = list_next_entry(insn, list))
+
+
+#endif /* _CHECK_H */
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index faacf0c..4e60e10 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -31,13 +31,6 @@
 #include "elf.h"
 #include "warn.h"
 
-/*
- * Fallback for systems without this "read, mmaping if possible" cmd.
- */
-#ifndef ELF_C_READ_MMAP
-#define ELF_C_READ_MMAP ELF_C_READ
-#endif
-
 struct section *find_section_by_name(struct elf *elf, const char *name)
 {
 	struct section *sec;
@@ -86,6 +79,19 @@
 	return NULL;
 }
 
+struct symbol *find_symbol_by_name(struct elf *elf, const char *name)
+{
+	struct section *sec;
+	struct symbol *sym;
+
+	list_for_each_entry(sec, &elf->sections, list)
+		list_for_each_entry(sym, &sec->symbol_list, list)
+			if (!strcmp(sym->name, name))
+				return sym;
+
+	return NULL;
+}
+
 struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
 {
 	struct symbol *sym;
@@ -140,12 +146,12 @@
 	int i;
 
 	if (elf_getshdrnum(elf->elf, &sections_nr)) {
-		perror("elf_getshdrnum");
+		WARN_ELF("elf_getshdrnum");
 		return -1;
 	}
 
 	if (elf_getshdrstrndx(elf->elf, &shstrndx)) {
-		perror("elf_getshdrstrndx");
+		WARN_ELF("elf_getshdrstrndx");
 		return -1;
 	}
 
@@ -166,37 +172,37 @@
 
 		s = elf_getscn(elf->elf, i);
 		if (!s) {
-			perror("elf_getscn");
+			WARN_ELF("elf_getscn");
 			return -1;
 		}
 
 		sec->idx = elf_ndxscn(s);
 
 		if (!gelf_getshdr(s, &sec->sh)) {
-			perror("gelf_getshdr");
+			WARN_ELF("gelf_getshdr");
 			return -1;
 		}
 
 		sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name);
 		if (!sec->name) {
-			perror("elf_strptr");
+			WARN_ELF("elf_strptr");
 			return -1;
 		}
 
-		sec->elf_data = elf_getdata(s, NULL);
-		if (!sec->elf_data) {
-			perror("elf_getdata");
-			return -1;
+		if (sec->sh.sh_size != 0) {
+			sec->data = elf_getdata(s, NULL);
+			if (!sec->data) {
+				WARN_ELF("elf_getdata");
+				return -1;
+			}
+			if (sec->data->d_off != 0 ||
+			    sec->data->d_size != sec->sh.sh_size) {
+				WARN("unexpected data attributes for %s",
+				     sec->name);
+				return -1;
+			}
 		}
-
-		if (sec->elf_data->d_off != 0 ||
-		    sec->elf_data->d_size != sec->sh.sh_size) {
-			WARN("unexpected data attributes for %s", sec->name);
-			return -1;
-		}
-
-		sec->data = (unsigned long)sec->elf_data->d_buf;
-		sec->len = sec->elf_data->d_size;
+		sec->len = sec->sh.sh_size;
 	}
 
 	/* sanity check, one more call to elf_nextscn() should return NULL */
@@ -210,10 +216,11 @@
 
 static int read_symbols(struct elf *elf)
 {
-	struct section *symtab;
-	struct symbol *sym;
+	struct section *symtab, *sec;
+	struct symbol *sym, *pfunc;
 	struct list_head *entry, *tmp;
 	int symbols_nr, i;
+	char *coldstr;
 
 	symtab = find_section_by_name(elf, ".symtab");
 	if (!symtab) {
@@ -233,15 +240,15 @@
 
 		sym->idx = i;
 
-		if (!gelf_getsym(symtab->elf_data, i, &sym->sym)) {
-			perror("gelf_getsym");
+		if (!gelf_getsym(symtab->data, i, &sym->sym)) {
+			WARN_ELF("gelf_getsym");
 			goto err;
 		}
 
 		sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
 				       sym->sym.st_name);
 		if (!sym->name) {
-			perror("elf_strptr");
+			WARN_ELF("elf_strptr");
 			goto err;
 		}
 
@@ -288,6 +295,30 @@
 		hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx);
 	}
 
+	/* Create parent/child links for any cold subfunctions */
+	list_for_each_entry(sec, &elf->sections, list) {
+		list_for_each_entry(sym, &sec->symbol_list, list) {
+			if (sym->type != STT_FUNC)
+				continue;
+			sym->pfunc = sym->cfunc = sym;
+			coldstr = strstr(sym->name, ".cold.");
+			if (coldstr) {
+				coldstr[0] = '\0';
+				pfunc = find_symbol_by_name(elf, sym->name);
+				coldstr[0] = '.';
+
+				if (!pfunc) {
+					WARN("%s(): can't find parent function",
+					     sym->name);
+					goto err;
+				}
+
+				sym->pfunc = pfunc;
+				pfunc->cfunc = sym;
+			}
+		}
+	}
+
 	return 0;
 
 err:
@@ -323,8 +354,8 @@
 			}
 			memset(rela, 0, sizeof(*rela));
 
-			if (!gelf_getrela(sec->elf_data, i, &rela->rela)) {
-				perror("gelf_getrela");
+			if (!gelf_getrela(sec->data, i, &rela->rela)) {
+				WARN_ELF("gelf_getrela");
 				return -1;
 			}
 
@@ -348,9 +379,10 @@
 	return 0;
 }
 
-struct elf *elf_open(const char *name)
+struct elf *elf_open(const char *name, int flags)
 {
 	struct elf *elf;
+	Elf_Cmd cmd;
 
 	elf_version(EV_CURRENT);
 
@@ -363,27 +395,28 @@
 
 	INIT_LIST_HEAD(&elf->sections);
 
-	elf->name = strdup(name);
-	if (!elf->name) {
-		perror("strdup");
-		goto err;
-	}
-
-	elf->fd = open(name, O_RDONLY);
+	elf->fd = open(name, flags);
 	if (elf->fd == -1) {
 		fprintf(stderr, "objtool: Can't open '%s': %s\n",
 			name, strerror(errno));
 		goto err;
 	}
 
-	elf->elf = elf_begin(elf->fd, ELF_C_READ_MMAP, NULL);
+	if ((flags & O_ACCMODE) == O_RDONLY)
+		cmd = ELF_C_READ_MMAP;
+	else if ((flags & O_ACCMODE) == O_RDWR)
+		cmd = ELF_C_RDWR;
+	else /* O_WRONLY */
+		cmd = ELF_C_WRITE;
+
+	elf->elf = elf_begin(elf->fd, cmd, NULL);
 	if (!elf->elf) {
-		perror("elf_begin");
+		WARN_ELF("elf_begin");
 		goto err;
 	}
 
 	if (!gelf_getehdr(elf->elf, &elf->ehdr)) {
-		perror("gelf_getehdr");
+		WARN_ELF("gelf_getehdr");
 		goto err;
 	}
 
@@ -403,12 +436,212 @@
 	return NULL;
 }
 
+struct section *elf_create_section(struct elf *elf, const char *name,
+				   size_t entsize, int nr)
+{
+	struct section *sec, *shstrtab;
+	size_t size = entsize * nr;
+	struct Elf_Scn *s;
+	Elf_Data *data;
+
+	sec = malloc(sizeof(*sec));
+	if (!sec) {
+		perror("malloc");
+		return NULL;
+	}
+	memset(sec, 0, sizeof(*sec));
+
+	INIT_LIST_HEAD(&sec->symbol_list);
+	INIT_LIST_HEAD(&sec->rela_list);
+	hash_init(sec->rela_hash);
+	hash_init(sec->symbol_hash);
+
+	list_add_tail(&sec->list, &elf->sections);
+
+	s = elf_newscn(elf->elf);
+	if (!s) {
+		WARN_ELF("elf_newscn");
+		return NULL;
+	}
+
+	sec->name = strdup(name);
+	if (!sec->name) {
+		perror("strdup");
+		return NULL;
+	}
+
+	sec->idx = elf_ndxscn(s);
+	sec->len = size;
+	sec->changed = true;
+
+	sec->data = elf_newdata(s);
+	if (!sec->data) {
+		WARN_ELF("elf_newdata");
+		return NULL;
+	}
+
+	sec->data->d_size = size;
+	sec->data->d_align = 1;
+
+	if (size) {
+		sec->data->d_buf = malloc(size);
+		if (!sec->data->d_buf) {
+			perror("malloc");
+			return NULL;
+		}
+		memset(sec->data->d_buf, 0, size);
+	}
+
+	if (!gelf_getshdr(s, &sec->sh)) {
+		WARN_ELF("gelf_getshdr");
+		return NULL;
+	}
+
+	sec->sh.sh_size = size;
+	sec->sh.sh_entsize = entsize;
+	sec->sh.sh_type = SHT_PROGBITS;
+	sec->sh.sh_addralign = 1;
+	sec->sh.sh_flags = SHF_ALLOC;
+
+
+	/* Add section name to .shstrtab */
+	shstrtab = find_section_by_name(elf, ".shstrtab");
+	if (!shstrtab) {
+		WARN("can't find .shstrtab section");
+		return NULL;
+	}
+
+	s = elf_getscn(elf->elf, shstrtab->idx);
+	if (!s) {
+		WARN_ELF("elf_getscn");
+		return NULL;
+	}
+
+	data = elf_newdata(s);
+	if (!data) {
+		WARN_ELF("elf_newdata");
+		return NULL;
+	}
+
+	data->d_buf = sec->name;
+	data->d_size = strlen(name) + 1;
+	data->d_align = 1;
+
+	sec->sh.sh_name = shstrtab->len;
+
+	shstrtab->len += strlen(name) + 1;
+	shstrtab->changed = true;
+
+	return sec;
+}
+
+struct section *elf_create_rela_section(struct elf *elf, struct section *base)
+{
+	char *relaname;
+	struct section *sec;
+
+	relaname = malloc(strlen(base->name) + strlen(".rela") + 1);
+	if (!relaname) {
+		perror("malloc");
+		return NULL;
+	}
+	strcpy(relaname, ".rela");
+	strcat(relaname, base->name);
+
+	sec = elf_create_section(elf, relaname, sizeof(GElf_Rela), 0);
+	free(relaname);
+	if (!sec)
+		return NULL;
+
+	base->rela = sec;
+	sec->base = base;
+
+	sec->sh.sh_type = SHT_RELA;
+	sec->sh.sh_addralign = 8;
+	sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
+	sec->sh.sh_info = base->idx;
+	sec->sh.sh_flags = SHF_INFO_LINK;
+
+	return sec;
+}
+
+int elf_rebuild_rela_section(struct section *sec)
+{
+	struct rela *rela;
+	int nr, idx = 0, size;
+	GElf_Rela *relas;
+
+	nr = 0;
+	list_for_each_entry(rela, &sec->rela_list, list)
+		nr++;
+
+	size = nr * sizeof(*relas);
+	relas = malloc(size);
+	if (!relas) {
+		perror("malloc");
+		return -1;
+	}
+
+	sec->data->d_buf = relas;
+	sec->data->d_size = size;
+
+	sec->sh.sh_size = size;
+
+	idx = 0;
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		relas[idx].r_offset = rela->offset;
+		relas[idx].r_addend = rela->addend;
+		relas[idx].r_info = GELF_R_INFO(rela->sym->idx, rela->type);
+		idx++;
+	}
+
+	return 0;
+}
+
+int elf_write(struct elf *elf)
+{
+	struct section *sec;
+	Elf_Scn *s;
+
+	/* Update section headers for changed sections: */
+	list_for_each_entry(sec, &elf->sections, list) {
+		if (sec->changed) {
+			s = elf_getscn(elf->elf, sec->idx);
+			if (!s) {
+				WARN_ELF("elf_getscn");
+				return -1;
+			}
+			if (!gelf_update_shdr(s, &sec->sh)) {
+				WARN_ELF("gelf_update_shdr");
+				return -1;
+			}
+		}
+	}
+
+	/* Make sure the new section header entries get updated properly. */
+	elf_flagelf(elf->elf, ELF_C_SET, ELF_F_DIRTY);
+
+	/* Write all changes to the file. */
+	if (elf_update(elf->elf, ELF_C_WRITE) < 0) {
+		WARN_ELF("elf_update");
+		return -1;
+	}
+
+	return 0;
+}
+
 void elf_close(struct elf *elf)
 {
 	struct section *sec, *tmpsec;
 	struct symbol *sym, *tmpsym;
 	struct rela *rela, *tmprela;
 
+	if (elf->elf)
+		elf_end(elf->elf);
+
+	if (elf->fd > 0)
+		close(elf->fd);
+
 	list_for_each_entry_safe(sec, tmpsec, &elf->sections, list) {
 		list_for_each_entry_safe(sym, tmpsym, &sec->symbol_list, list) {
 			list_del(&sym->list);
@@ -423,11 +656,6 @@
 		list_del(&sec->list);
 		free(sec);
 	}
-	if (elf->name)
-		free(elf->name);
-	if (elf->fd > 0)
-		close(elf->fd);
-	if (elf->elf)
-		elf_end(elf->elf);
+
 	free(elf);
 }
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index 731973e..de5cd2d 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -28,6 +28,13 @@
 # define elf_getshdrstrndx elf_getshstrndx
 #endif
 
+/*
+ * Fallback for systems without this "read, mmaping if possible" cmd.
+ */
+#ifndef ELF_C_READ_MMAP
+#define ELF_C_READ_MMAP ELF_C_READ
+#endif
+
 struct section {
 	struct list_head list;
 	GElf_Shdr sh;
@@ -37,11 +44,11 @@
 	DECLARE_HASHTABLE(rela_hash, 16);
 	struct section *base, *rela;
 	struct symbol *sym;
-	Elf_Data *elf_data;
+	Elf_Data *data;
 	char *name;
 	int idx;
-	unsigned long data;
 	unsigned int len;
+	bool changed, text;
 };
 
 struct symbol {
@@ -54,6 +61,7 @@
 	unsigned char bind, type;
 	unsigned long offset;
 	unsigned int len;
+	struct symbol *pfunc, *cfunc;
 };
 
 struct rela {
@@ -76,16 +84,23 @@
 };
 
 
-struct elf *elf_open(const char *name);
+struct elf *elf_open(const char *name, int flags);
 struct section *find_section_by_name(struct elf *elf, const char *name);
 struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
+struct symbol *find_symbol_by_name(struct elf *elf, const char *name);
 struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
 struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
 struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
 				     unsigned int len);
 struct symbol *find_containing_func(struct section *sec, unsigned long offset);
+struct section *elf_create_section(struct elf *elf, const char *name, size_t
+				   entsize, int nr);
+struct section *elf_create_rela_section(struct elf *elf, struct section *base);
+int elf_rebuild_rela_section(struct section *sec);
+int elf_write(struct elf *elf);
 void elf_close(struct elf *elf);
 
-
+#define for_each_sec(file, sec)						\
+	list_for_each_entry(sec, &file->elf->sections, list)
 
 #endif /* _OBJTOOL_ELF_H */
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 46c326d..07f3299 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -31,11 +31,10 @@
 #include <stdlib.h>
 #include <subcmd/exec-cmd.h>
 #include <subcmd/pager.h>
+#include <linux/kernel.h>
 
 #include "builtin.h"
 
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
-
 struct cmd_struct {
 	const char *name;
 	int (*fn)(int, const char **);
@@ -43,10 +42,11 @@
 };
 
 static const char objtool_usage_string[] =
-	"objtool [OPTIONS] COMMAND [ARGS]";
+	"objtool COMMAND [ARGS]";
 
 static struct cmd_struct objtool_cmds[] = {
 	{"check",	cmd_check,	"Perform stack metadata validation on an object file" },
+	{"orc",		cmd_orc,	"Generate in-place ORC unwind tables for an object file" },
 };
 
 bool help;
@@ -70,7 +70,7 @@
 
 	printf("\n");
 
-	exit(1);
+	exit(129);
 }
 
 static void handle_options(int *argc, const char ***argv)
@@ -86,9 +86,7 @@
 			break;
 		} else {
 			fprintf(stderr, "Unknown option: %s\n", cmd);
-			fprintf(stderr, "\n Usage: %s\n",
-				objtool_usage_string);
-			exit(1);
+			cmd_usage();
 		}
 
 		(*argv)++;
diff --git a/tools/objtool/orc.h b/tools/objtool/orc.h
new file mode 100644
index 0000000..b0e92a6
--- /dev/null
+++ b/tools/objtool/orc.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_H
+#define _ORC_H
+
+#include <asm/orc_types.h>
+
+struct objtool_file;
+
+int create_orc(struct objtool_file *file);
+int create_orc_sections(struct objtool_file *file);
+
+int orc_dump(const char *objname);
+
+#endif /* _ORC_H */
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
new file mode 100644
index 0000000..c334382
--- /dev/null
+++ b/tools/objtool/orc_dump.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <unistd.h>
+#include "orc.h"
+#include "warn.h"
+
+static const char *reg_name(unsigned int reg)
+{
+	switch (reg) {
+	case ORC_REG_PREV_SP:
+		return "prevsp";
+	case ORC_REG_DX:
+		return "dx";
+	case ORC_REG_DI:
+		return "di";
+	case ORC_REG_BP:
+		return "bp";
+	case ORC_REG_SP:
+		return "sp";
+	case ORC_REG_R10:
+		return "r10";
+	case ORC_REG_R13:
+		return "r13";
+	case ORC_REG_BP_INDIRECT:
+		return "bp(ind)";
+	case ORC_REG_SP_INDIRECT:
+		return "sp(ind)";
+	default:
+		return "?";
+	}
+}
+
+static const char *orc_type_name(unsigned int type)
+{
+	switch (type) {
+	case ORC_TYPE_CALL:
+		return "call";
+	case ORC_TYPE_REGS:
+		return "regs";
+	case ORC_TYPE_REGS_IRET:
+		return "iret";
+	default:
+		return "?";
+	}
+}
+
+static void print_reg(unsigned int reg, int offset)
+{
+	if (reg == ORC_REG_BP_INDIRECT)
+		printf("(bp%+d)", offset);
+	else if (reg == ORC_REG_SP_INDIRECT)
+		printf("(sp%+d)", offset);
+	else if (reg == ORC_REG_UNDEFINED)
+		printf("(und)");
+	else
+		printf("%s%+d", reg_name(reg), offset);
+}
+
+int orc_dump(const char *_objname)
+{
+	int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
+	struct orc_entry *orc = NULL;
+	char *name;
+	size_t nr_sections;
+	Elf64_Addr orc_ip_addr = 0;
+	size_t shstrtab_idx;
+	Elf *elf;
+	Elf_Scn *scn;
+	GElf_Shdr sh;
+	GElf_Rela rela;
+	GElf_Sym sym;
+	Elf_Data *data, *symtab = NULL, *rela_orc_ip = NULL;
+
+
+	objname = _objname;
+
+	elf_version(EV_CURRENT);
+
+	fd = open(objname, O_RDONLY);
+	if (fd == -1) {
+		perror("open");
+		return -1;
+	}
+
+	elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+	if (!elf) {
+		WARN_ELF("elf_begin");
+		return -1;
+	}
+
+	if (elf_getshdrnum(elf, &nr_sections)) {
+		WARN_ELF("elf_getshdrnum");
+		return -1;
+	}
+
+	if (elf_getshdrstrndx(elf, &shstrtab_idx)) {
+		WARN_ELF("elf_getshdrstrndx");
+		return -1;
+	}
+
+	for (i = 0; i < nr_sections; i++) {
+		scn = elf_getscn(elf, i);
+		if (!scn) {
+			WARN_ELF("elf_getscn");
+			return -1;
+		}
+
+		if (!gelf_getshdr(scn, &sh)) {
+			WARN_ELF("gelf_getshdr");
+			return -1;
+		}
+
+		name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+		if (!name) {
+			WARN_ELF("elf_strptr");
+			return -1;
+		}
+
+		data = elf_getdata(scn, NULL);
+		if (!data) {
+			WARN_ELF("elf_getdata");
+			return -1;
+		}
+
+		if (!strcmp(name, ".symtab")) {
+			symtab = data;
+		} else if (!strcmp(name, ".orc_unwind")) {
+			orc = data->d_buf;
+			orc_size = sh.sh_size;
+		} else if (!strcmp(name, ".orc_unwind_ip")) {
+			orc_ip = data->d_buf;
+			orc_ip_addr = sh.sh_addr;
+		} else if (!strcmp(name, ".rela.orc_unwind_ip")) {
+			rela_orc_ip = data;
+		}
+	}
+
+	if (!symtab || !orc || !orc_ip)
+		return 0;
+
+	if (orc_size % sizeof(*orc) != 0) {
+		WARN("bad .orc_unwind section size");
+		return -1;
+	}
+
+	nr_entries = orc_size / sizeof(*orc);
+	for (i = 0; i < nr_entries; i++) {
+		if (rela_orc_ip) {
+			if (!gelf_getrela(rela_orc_ip, i, &rela)) {
+				WARN_ELF("gelf_getrela");
+				return -1;
+			}
+
+			if (!gelf_getsym(symtab, GELF_R_SYM(rela.r_info), &sym)) {
+				WARN_ELF("gelf_getsym");
+				return -1;
+			}
+
+			scn = elf_getscn(elf, sym.st_shndx);
+			if (!scn) {
+				WARN_ELF("elf_getscn");
+				return -1;
+			}
+
+			if (!gelf_getshdr(scn, &sh)) {
+				WARN_ELF("gelf_getshdr");
+				return -1;
+			}
+
+			name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+			if (!name || !*name) {
+				WARN_ELF("elf_strptr");
+				return -1;
+			}
+
+			printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
+
+		} else {
+			printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
+		}
+
+
+		printf(" sp:");
+
+		print_reg(orc[i].sp_reg, orc[i].sp_offset);
+
+		printf(" bp:");
+
+		print_reg(orc[i].bp_reg, orc[i].bp_offset);
+
+		printf(" type:%s\n", orc_type_name(orc[i].type));
+	}
+
+	elf_end(elf);
+	close(fd);
+
+	return 0;
+}
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
new file mode 100644
index 0000000..18384d9
--- /dev/null
+++ b/tools/objtool/orc_gen.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "orc.h"
+#include "check.h"
+#include "warn.h"
+
+int create_orc(struct objtool_file *file)
+{
+	struct instruction *insn;
+
+	for_each_insn(file, insn) {
+		struct orc_entry *orc = &insn->orc;
+		struct cfi_reg *cfa = &insn->state.cfa;
+		struct cfi_reg *bp = &insn->state.regs[CFI_BP];
+
+		if (cfa->base == CFI_UNDEFINED) {
+			orc->sp_reg = ORC_REG_UNDEFINED;
+			continue;
+		}
+
+		switch (cfa->base) {
+		case CFI_SP:
+			orc->sp_reg = ORC_REG_SP;
+			break;
+		case CFI_SP_INDIRECT:
+			orc->sp_reg = ORC_REG_SP_INDIRECT;
+			break;
+		case CFI_BP:
+			orc->sp_reg = ORC_REG_BP;
+			break;
+		case CFI_BP_INDIRECT:
+			orc->sp_reg = ORC_REG_BP_INDIRECT;
+			break;
+		case CFI_R10:
+			orc->sp_reg = ORC_REG_R10;
+			break;
+		case CFI_R13:
+			orc->sp_reg = ORC_REG_R13;
+			break;
+		case CFI_DI:
+			orc->sp_reg = ORC_REG_DI;
+			break;
+		case CFI_DX:
+			orc->sp_reg = ORC_REG_DX;
+			break;
+		default:
+			WARN_FUNC("unknown CFA base reg %d",
+				  insn->sec, insn->offset, cfa->base);
+			return -1;
+		}
+
+		switch(bp->base) {
+		case CFI_UNDEFINED:
+			orc->bp_reg = ORC_REG_UNDEFINED;
+			break;
+		case CFI_CFA:
+			orc->bp_reg = ORC_REG_PREV_SP;
+			break;
+		case CFI_BP:
+			orc->bp_reg = ORC_REG_BP;
+			break;
+		default:
+			WARN_FUNC("unknown BP base reg %d",
+				  insn->sec, insn->offset, bp->base);
+			return -1;
+		}
+
+		orc->sp_offset = cfa->offset;
+		orc->bp_offset = bp->offset;
+		orc->type = insn->state.type;
+	}
+
+	return 0;
+}
+
+static int create_orc_entry(struct section *u_sec, struct section *ip_relasec,
+				unsigned int idx, struct section *insn_sec,
+				unsigned long insn_off, struct orc_entry *o)
+{
+	struct orc_entry *orc;
+	struct rela *rela;
+
+	if (!insn_sec->sym) {
+		WARN("missing symbol for section %s", insn_sec->name);
+		return -1;
+	}
+
+	/* populate ORC data */
+	orc = (struct orc_entry *)u_sec->data->d_buf + idx;
+	memcpy(orc, o, sizeof(*orc));
+
+	/* populate rela for ip */
+	rela = malloc(sizeof(*rela));
+	if (!rela) {
+		perror("malloc");
+		return -1;
+	}
+	memset(rela, 0, sizeof(*rela));
+
+	rela->sym = insn_sec->sym;
+	rela->addend = insn_off;
+	rela->type = R_X86_64_PC32;
+	rela->offset = idx * sizeof(int);
+
+	list_add_tail(&rela->list, &ip_relasec->rela_list);
+	hash_add(ip_relasec->rela_hash, &rela->hash, rela->offset);
+
+	return 0;
+}
+
+int create_orc_sections(struct objtool_file *file)
+{
+	struct instruction *insn, *prev_insn;
+	struct section *sec, *u_sec, *ip_relasec;
+	unsigned int idx;
+
+	struct orc_entry empty = {
+		.sp_reg = ORC_REG_UNDEFINED,
+		.bp_reg  = ORC_REG_UNDEFINED,
+		.type    = ORC_TYPE_CALL,
+	};
+
+	sec = find_section_by_name(file->elf, ".orc_unwind");
+	if (sec) {
+		WARN("file already has .orc_unwind section, skipping");
+		return -1;
+	}
+
+	/* count the number of needed orcs */
+	idx = 0;
+	for_each_sec(file, sec) {
+		if (!sec->text)
+			continue;
+
+		prev_insn = NULL;
+		sec_for_each_insn(file, sec, insn) {
+			if (!prev_insn ||
+			    memcmp(&insn->orc, &prev_insn->orc,
+				   sizeof(struct orc_entry))) {
+				idx++;
+			}
+			prev_insn = insn;
+		}
+
+		/* section terminator */
+		if (prev_insn)
+			idx++;
+	}
+	if (!idx)
+		return -1;
+
+
+	/* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
+	sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
+	if (!sec)
+		return -1;
+
+	ip_relasec = elf_create_rela_section(file->elf, sec);
+	if (!ip_relasec)
+		return -1;
+
+	/* create .orc_unwind section */
+	u_sec = elf_create_section(file->elf, ".orc_unwind",
+				   sizeof(struct orc_entry), idx);
+
+	/* populate sections */
+	idx = 0;
+	for_each_sec(file, sec) {
+		if (!sec->text)
+			continue;
+
+		prev_insn = NULL;
+		sec_for_each_insn(file, sec, insn) {
+			if (!prev_insn || memcmp(&insn->orc, &prev_insn->orc,
+						 sizeof(struct orc_entry))) {
+
+				if (create_orc_entry(u_sec, ip_relasec, idx,
+						     insn->sec, insn->offset,
+						     &insn->orc))
+					return -1;
+
+				idx++;
+			}
+			prev_insn = insn;
+		}
+
+		/* section terminator */
+		if (prev_insn) {
+			if (create_orc_entry(u_sec, ip_relasec, idx,
+					     prev_insn->sec,
+					     prev_insn->offset + prev_insn->len,
+					     &empty))
+				return -1;
+
+			idx++;
+		}
+	}
+
+	if (elf_rebuild_rela_section(ip_relasec))
+		return -1;
+
+	return 0;
+}
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index bff8abb..84f001d 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -91,16 +91,16 @@
 	alt->jump_or_nop = entry->jump_or_nop;
 
 	if (alt->group) {
-		alt->orig_len = *(unsigned char *)(sec->data + offset +
+		alt->orig_len = *(unsigned char *)(sec->data->d_buf + offset +
 						   entry->orig_len);
-		alt->new_len = *(unsigned char *)(sec->data + offset +
+		alt->new_len = *(unsigned char *)(sec->data->d_buf + offset +
 						  entry->new_len);
 	}
 
 	if (entry->feature) {
 		unsigned short feature;
 
-		feature = *(unsigned short *)(sec->data + offset +
+		feature = *(unsigned short *)(sec->data->d_buf + offset +
 					      entry->feature);
 
 		/*
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
new file mode 100755
index 0000000..1470e74
--- /dev/null
+++ b/tools/objtool/sync-check.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+FILES='
+arch/x86/lib/insn.c
+arch/x86/lib/inat.c
+arch/x86/lib/x86-opcode-map.txt
+arch/x86/tools/gen-insn-attr-x86.awk
+arch/x86/include/asm/insn.h
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/inat_types.h
+arch/x86/include/asm/orc_types.h
+'
+
+check()
+{
+	local file=$1
+
+	diff $file ../../$file > /dev/null ||
+		echo "Warning: synced file at 'tools/objtool/$file' differs from latest kernel version at '$file'"
+}
+
+if [ ! -d ../../kernel ] || [ ! -d ../../tools ] || [ ! -d ../objtool ]; then
+	exit 0
+fi
+
+for i in $FILES; do
+  check $i
+done
diff --git a/tools/objtool/warn.h b/tools/objtool/warn.h
index ac7e075..afd9f7a 100644
--- a/tools/objtool/warn.h
+++ b/tools/objtool/warn.h
@@ -18,6 +18,13 @@
 #ifndef _WARN_H
 #define _WARN_H
 
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "elf.h"
+
 extern const char *objname;
 
 static inline char *offstr(struct section *sec, unsigned long offset)
@@ -57,4 +64,7 @@
 	free(_str);					\
 })
 
+#define WARN_ELF(format, ...)				\
+	WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
+
 #endif /* _WARN_H */
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 0bda2cc..a4f98e1 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -51,6 +51,7 @@
 tools/include/asm-generic/bitops/atomic.h
 tools/include/asm-generic/bitops/const_hweight.h
 tools/include/asm-generic/bitops/__ffs.h
+tools/include/asm-generic/bitops/__ffz.h
 tools/include/asm-generic/bitops/__fls.h
 tools/include/asm-generic/bitops/find.h
 tools/include/asm-generic/bitops/fls64.h
@@ -60,7 +61,9 @@
 tools/include/linux/atomic.h
 tools/include/linux/bitops.h
 tools/include/linux/compiler.h
+tools/include/linux/compiler-gcc.h
 tools/include/linux/coresight-pmu.h
+tools/include/linux/bug.h
 tools/include/linux/filter.h
 tools/include/linux/hash.h
 tools/include/linux/kernel.h
@@ -70,12 +73,15 @@
 tools/include/uapi/asm-generic/mman.h
 tools/include/uapi/linux/bpf.h
 tools/include/uapi/linux/bpf_common.h
+tools/include/uapi/linux/fcntl.h
 tools/include/uapi/linux/hw_breakpoint.h
 tools/include/uapi/linux/mman.h
 tools/include/uapi/linux/perf_event.h
+tools/include/uapi/linux/stat.h
 tools/include/linux/poison.h
 tools/include/linux/rbtree.h
 tools/include/linux/rbtree_augmented.h
+tools/include/linux/refcount.h
 tools/include/linux/string.h
 tools/include/linux/stringify.h
 tools/include/linux/types.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 2b92ffe..ad3726c 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -177,6 +177,36 @@
 endif
 endif
 
+# The fixdep build - we force fixdep tool to be built as
+# the first target in the separate make session not to be
+# disturbed by any parallel make jobs. Once fixdep is done
+# we issue the requested build with FIXDEP=1 variable.
+#
+# The fixdep build is disabled for $(NON_CONFIG_TARGETS)
+# targets, because it's not necessary.
+
+ifdef FIXDEP
+  force_fixdep := 0
+else
+  force_fixdep := $(config)
+endif
+
+export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK
+export HOSTCC HOSTLD HOSTAR
+
+include $(srctree)/tools/build/Makefile.include
+
+ifeq ($(force_fixdep),1)
+goals := $(filter-out all sub-make, $(MAKECMDGOALS))
+
+$(goals) all: sub-make
+
+sub-make: fixdep
+	@./check-headers.sh
+	$(Q)$(MAKE) FIXDEP=1 -f Makefile.perf $(goals)
+
+else # force_fixdep
+
 # Set FEATURE_TESTS to 'all' so all possible feature checkers are executed.
 # Without this setting the output feature dump file misses some features, for
 # example, liberty. Select all checkers so we won't get an incomplete feature
@@ -348,10 +378,6 @@
 
 PERF_IN := $(OUTPUT)perf-in.o
 
-export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK
-export HOSTCC HOSTLD HOSTAR
-include $(srctree)/tools/build/Makefile.include
-
 JEVENTS       := $(OUTPUT)pmu-events/jevents
 JEVENTS_IN    := $(OUTPUT)pmu-events/jevents-in.o
 
@@ -362,99 +388,6 @@
 build := -f $(srctree)/tools/build/Makefile.build dir=. obj
 
 $(PERF_IN): prepare FORCE
-	@(test -f ../../include/uapi/linux/perf_event.h && ( \
-        (diff -B ../include/uapi/linux/perf_event.h ../../include/uapi/linux/perf_event.h >/dev/null) \
-        || echo "Warning: tools/include/uapi/linux/perf_event.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/linux/hash.h && ( \
-        (diff -B ../include/linux/hash.h ../../include/linux/hash.h >/dev/null) \
-        || echo "Warning: tools/include/linux/hash.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/uapi/linux/hw_breakpoint.h && ( \
-        (diff -B ../include/uapi/linux/hw_breakpoint.h ../../include/uapi/linux/hw_breakpoint.h >/dev/null) \
-        || echo "Warning: tools/include/uapi/linux/hw_breakpoint.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/asm/disabled-features.h && ( \
-        (diff -B ../arch/x86/include/asm/disabled-features.h ../../arch/x86/include/asm/disabled-features.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/asm/disabled-features.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/asm/required-features.h && ( \
-        (diff -B ../arch/x86/include/asm/required-features.h ../../arch/x86/include/asm/required-features.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/asm/required-features.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/asm/cpufeatures.h && ( \
-        (diff -B ../arch/x86/include/asm/cpufeatures.h ../../arch/x86/include/asm/cpufeatures.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/asm/cpufeatures.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/lib/memcpy_64.S && ( \
-        (diff -B ../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memcpy_64.S >/dev/null) \
-        || echo "Warning: tools/arch/x86/lib/memcpy_64.S differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/lib/memset_64.S && ( \
-        (diff -B ../arch/x86/lib/memset_64.S ../../arch/x86/lib/memset_64.S >/dev/null) \
-        || echo "Warning: tools/arch/x86/lib/memset_64.S differs from kernel" >&2 )) || true
-	@(test -f ../../arch/arm/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/arm/include/uapi/asm/perf_regs.h ../../arch/arm/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/arm/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/arm64/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/arm64/include/uapi/asm/perf_regs.h ../../arch/arm64/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/arm64/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/powerpc/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/powerpc/include/uapi/asm/perf_regs.h ../../arch/powerpc/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/powerpc/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/perf_regs.h ../../arch/x86/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/kvm.h ../../arch/x86/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/uapi/asm/kvm_perf.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/kvm_perf.h ../../arch/x86/include/uapi/asm/kvm_perf.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/kvm_perf.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/uapi/asm/svm.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/svm.h ../../arch/x86/include/uapi/asm/svm.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/svm.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/uapi/asm/vmx.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/vmx.h ../../arch/x86/include/uapi/asm/vmx.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/vmx.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/powerpc/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/powerpc/include/uapi/asm/kvm.h ../../arch/powerpc/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/powerpc/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/s390/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/s390/include/uapi/asm/kvm.h ../../arch/s390/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/s390/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/s390/include/uapi/asm/kvm_perf.h && ( \
-        (diff -B ../arch/s390/include/uapi/asm/kvm_perf.h ../../arch/s390/include/uapi/asm/kvm_perf.h >/dev/null) \
-        || echo "Warning: tools/arch/s390/include/uapi/asm/kvm_perf.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/s390/include/uapi/asm/sie.h && ( \
-        (diff -B ../arch/s390/include/uapi/asm/sie.h ../../arch/s390/include/uapi/asm/sie.h >/dev/null) \
-        || echo "Warning: tools/arch/s390/include/uapi/asm/sie.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/arm/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/arm/include/uapi/asm/kvm.h ../../arch/arm/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/arm/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/arm64/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/arm64/include/uapi/asm/kvm.h ../../arch/arm64/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/arm64/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/asm-generic/bitops/arch_hweight.h && ( \
-        (diff -B ../include/asm-generic/bitops/arch_hweight.h ../../include/asm-generic/bitops/arch_hweight.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/arch_hweight.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/asm-generic/bitops/const_hweight.h && ( \
-        (diff -B ../include/asm-generic/bitops/const_hweight.h ../../include/asm-generic/bitops/const_hweight.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/const_hweight.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/asm-generic/bitops/__fls.h && ( \
-        (diff -B ../include/asm-generic/bitops/__fls.h ../../include/asm-generic/bitops/__fls.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/__fls.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/asm-generic/bitops/fls.h && ( \
-        (diff -B ../include/asm-generic/bitops/fls.h ../../include/asm-generic/bitops/fls.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/fls.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/asm-generic/bitops/fls64.h && ( \
-        (diff -B ../include/asm-generic/bitops/fls64.h ../../include/asm-generic/bitops/fls64.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/fls64.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/linux/coresight-pmu.h && ( \
-	(diff -B ../include/linux/coresight-pmu.h ../../include/linux/coresight-pmu.h >/dev/null) \
-	|| echo "Warning: tools/include/linux/coresight-pmu.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/uapi/asm-generic/mman-common.h && ( \
-	(diff -B ../include/uapi/asm-generic/mman-common.h ../../include/uapi/asm-generic/mman-common.h >/dev/null) \
-	|| echo "Warning: tools/include/uapi/asm-generic/mman-common.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/uapi/asm-generic/mman.h && ( \
-	(diff -B -I "^#include <\(uapi/\)*asm-generic/mman-common.h>$$" ../include/uapi/asm-generic/mman.h ../../include/uapi/asm-generic/mman.h >/dev/null) \
-	|| echo "Warning: tools/include/uapi/asm-generic/mman.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/uapi/linux/mman.h && ( \
-	(diff -B -I "^#include <\(uapi/\)*asm/mman.h>$$" ../include/uapi/linux/mman.h ../../include/uapi/linux/mman.h >/dev/null) \
-	|| echo "Warning: tools/include/uapi/linux/mman.h differs from kernel" >&2 )) || true
 	$(Q)$(MAKE) $(build)=perf
 
 $(JEVENTS_IN): FORCE
@@ -470,7 +403,7 @@
 	$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
 		$(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
 
-$(GTK_IN): fixdep FORCE
+$(GTK_IN): FORCE
 	$(Q)$(MAKE) $(build)=gtk
 
 $(OUTPUT)libperf-gtk.so: $(GTK_IN) $(PERFLIBS)
@@ -515,7 +448,7 @@
 __build-dir = $(subst $(OUTPUT),,$(dir $@))
 build-dir   = $(if $(__build-dir),$(__build-dir),.)
 
-prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h fixdep archheaders
+prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders
 
 $(OUTPUT)%.o: %.c prepare FORCE
 	$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -555,7 +488,7 @@
 
 LIBPERF_IN := $(OUTPUT)libperf-in.o
 
-$(LIBPERF_IN): prepare fixdep FORCE
+$(LIBPERF_IN): prepare FORCE
 	$(Q)$(MAKE) $(build)=libperf
 
 $(LIB_FILE): $(LIBPERF_IN)
@@ -563,10 +496,10 @@
 
 LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
 
-$(LIBTRACEEVENT): fixdep FORCE
+$(LIBTRACEEVENT): FORCE
 	$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
 
-libtraceevent_plugins: fixdep FORCE
+libtraceevent_plugins: FORCE
 	$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
 
 $(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
@@ -579,21 +512,21 @@
 install-traceevent-plugins: libtraceevent_plugins
 	$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
 
-$(LIBAPI): fixdep FORCE
+$(LIBAPI): FORCE
 	$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
 
 $(LIBAPI)-clean:
 	$(call QUIET_CLEAN, libapi)
 	$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
 
-$(LIBBPF): fixdep FORCE
+$(LIBBPF): FORCE
 	$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
 
 $(LIBBPF)-clean:
 	$(call QUIET_CLEAN, libbpf)
 	$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null
 
-$(LIBSUBCMD): fixdep FORCE
+$(LIBSUBCMD): FORCE
 	$(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) $(OUTPUT)libsubcmd.a
 
 $(LIBSUBCMD)-clean:
@@ -790,3 +723,4 @@
 .PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
 .PHONY: libtraceevent_plugins archheaders
 
+endif # force_fixdep
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 555263e..e93ef0b 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -335,6 +335,9 @@
 326	common	copy_file_range		sys_copy_file_range
 327	64	preadv2			sys_preadv2
 328	64	pwritev2		sys_pwritev2
+329	common	pkey_mprotect		sys_pkey_mprotect
+330	common	pkey_alloc		sys_pkey_alloc
+331	common	pkey_free		sys_pkey_free
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
@@ -374,5 +377,5 @@
 543	x32	io_setup		compat_sys_io_setup
 544	x32	io_submit		compat_sys_io_submit
 545	x32	execveat		compat_sys_execveat/ptregs
-534	x32	preadv2			compat_sys_preadv2
-535	x32	pwritev2		compat_sys_pwritev2
+546	x32	preadv2			compat_sys_preadv64v2
+547	x32	pwritev2		compat_sys_pwritev64v2
diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
index a74a48d..2eb1154 100644
--- a/tools/perf/arch/x86/util/header.c
+++ b/tools/perf/arch/x86/util/header.c
@@ -69,7 +69,7 @@
 {
 	char *buf = malloc(128);
 
-	if (__get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
+	if (buf && __get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
 		free(buf);
 		return NULL;
 	}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 68861e8..43d5f35 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -2042,11 +2042,16 @@
 		return 0;
 
 	if (transaction_run) {
+		struct parse_events_error errinfo;
+
 		if (pmu_have_event("cpu", "cycles-ct") &&
 		    pmu_have_event("cpu", "el-start"))
-			err = parse_events(evsel_list, transaction_attrs, NULL);
+			err = parse_events(evsel_list, transaction_attrs,
+					   &errinfo);
 		else
-			err = parse_events(evsel_list, transaction_limited_attrs, NULL);
+			err = parse_events(evsel_list,
+					   transaction_limited_attrs,
+					   &errinfo);
 		if (err) {
 			fprintf(stderr, "Cannot set up transaction events\n");
 			return -1;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index c61e012..e68c866 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1061,8 +1061,10 @@
 
 static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
 {
-	if (!strcmp(var, "top.call-graph"))
-		var = "call-graph.record-mode"; /* fall-through */
+	if (!strcmp(var, "top.call-graph")) {
+		var = "call-graph.record-mode";
+		return perf_default_config(var, value, cb);
+	}
 	if (!strcmp(var, "top.children")) {
 		symbol_conf.cumulate_callchain = perf_config_bool(var, value);
 		return 0;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 4c596ba..0fd8bfb 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -679,6 +679,10 @@
 	{ .name	    = "mlockall",   .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
 	{ .name	    = "mmap",	    .hexret = true,
+/* The standard mmap maps to old_mmap on s390x */
+#if defined(__s390x__)
+	.alias = "old_mmap",
+#endif
 	  .arg_scnprintf = { [0] = SCA_HEX,	  /* addr */
 			     [2] = SCA_MMAP_PROT, /* prot */
 			     [3] = SCA_MMAP_FLAGS, /* flags */ }, },
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
new file mode 100755
index 0000000..83fe220
--- /dev/null
+++ b/tools/perf/check-headers.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+
+HEADERS='
+include/uapi/linux/fcntl.h
+include/uapi/linux/perf_event.h
+include/uapi/linux/stat.h
+include/linux/hash.h
+include/uapi/linux/hw_breakpoint.h
+arch/x86/include/asm/disabled-features.h
+arch/x86/include/asm/required-features.h
+arch/x86/include/asm/cpufeatures.h
+arch/arm/include/uapi/asm/perf_regs.h
+arch/arm64/include/uapi/asm/perf_regs.h
+arch/powerpc/include/uapi/asm/perf_regs.h
+arch/x86/include/uapi/asm/perf_regs.h
+arch/x86/include/uapi/asm/kvm.h
+arch/x86/include/uapi/asm/kvm_perf.h
+arch/x86/include/uapi/asm/svm.h
+arch/x86/include/uapi/asm/vmx.h
+arch/powerpc/include/uapi/asm/kvm.h
+arch/s390/include/uapi/asm/kvm.h
+arch/s390/include/uapi/asm/kvm_perf.h
+arch/s390/include/uapi/asm/sie.h
+arch/arm/include/uapi/asm/kvm.h
+arch/arm64/include/uapi/asm/kvm.h
+include/asm-generic/bitops/arch_hweight.h
+include/asm-generic/bitops/const_hweight.h
+include/asm-generic/bitops/__fls.h
+include/asm-generic/bitops/fls.h
+include/asm-generic/bitops/fls64.h
+include/linux/coresight-pmu.h
+include/uapi/asm-generic/mman-common.h
+'
+
+check () {
+  file=$1
+  opts=
+
+  shift
+  while [ -n "$*" ]; do
+    opts="$opts \"$1\""
+    shift
+  done
+
+  cmd="diff $opts ../$file ../../$file > /dev/null"
+
+  test -f ../../$file &&
+  eval $cmd || echo "Warning: $file differs from kernel" >&2
+}
+
+
+# simple diff check
+for i in $HEADERS; do
+  check $i -B
+done
+
+# diff with extra ignore lines
+check arch/x86/lib/memcpy_64.S        -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"
+check arch/x86/lib/memset_64.S        -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"
+check include/uapi/asm-generic/mman.h -B -I "^#include <\(uapi/\)*asm-generic/mman-common.h>"
+check include/uapi/linux/mman.h       -B -I "^#include <\(uapi/\)*asm/mman.h>"
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index a508233..2aabf0a 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -123,7 +123,7 @@
 
 		if (pair && UM(pair->start) == mem_start) {
 next_pair:
-			if (strcmp(sym->name, pair->name) == 0) {
+			if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
 				/*
 				 * kallsyms don't have the symbol end, so we
 				 * set that by using the next symbol start - 1,
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index bce80f8..f55d108 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -681,14 +681,14 @@
 	struct perf_evsel_config_term *term;
 	struct list_head *config_terms = &evsel->config_terms;
 	struct perf_event_attr *attr = &evsel->attr;
-	struct callchain_param param;
+	/* callgraph default */
+	struct callchain_param param = {
+		.record_mode = callchain_param.record_mode,
+	};
 	u32 dump_size = 0;
 	int max_stack = 0;
 	const char *callgraph_buf = NULL;
 
-	/* callgraph default */
-	param.record_mode = callchain_param.record_mode;
-
 	list_for_each_entry(term, config_terms, list) {
 		switch (term->type) {
 		case PERF_EVSEL__CONFIG_TERM_PERIOD:
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 28bdb48..ab36aa5 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1454,8 +1454,16 @@
 
 		dso__set_build_id(dso, &bev->build_id);
 
-		if (!is_kernel_module(filename, cpumode))
-			dso->kernel = dso_type;
+		if (dso_type != DSO_TYPE_USER) {
+			struct kmod_path m = { .name = NULL, };
+
+			if (!kmod_path__parse_name(&m, filename) && m.kmod)
+				dso__set_short_name(dso, strdup(m.name), true);
+			else
+				dso->kernel = dso_type;
+
+			free(m.name);
+		}
 
 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
 				  sbuild_id);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 10849a0..ad613ea 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -865,7 +865,7 @@
 	 * cumulated only one time to prevent entries more than 100%
 	 * overhead.
 	 */
-	he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
+	he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
 	if (he_cache == NULL)
 		return -ENOMEM;
 
@@ -1030,8 +1030,6 @@
 	if (err)
 		return err;
 
-	iter->max_stack = max_stack_depth;
-
 	err = iter->ops->prepare_entry(iter, al);
 	if (err)
 		goto out;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index a440a04..159d616 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -102,7 +102,6 @@
 	int curr;
 
 	bool hide_unresolved;
-	int max_stack;
 
 	struct perf_evsel *evsel;
 	struct perf_sample *sample;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 7e27207..cac3953 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1300,6 +1300,7 @@
 	intel_pt_clear_tx_flags(decoder);
 	decoder->have_tma = false;
 	decoder->cbr = 0;
+	decoder->timestamp_insn_cnt = 0;
 	decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
 	decoder->overflow = true;
 	return -EOVERFLOW;
@@ -1522,6 +1523,7 @@
 		case INTEL_PT_PSBEND:
 			intel_pt_log("ERROR: Missing TIP after FUP\n");
 			decoder->pkt_state = INTEL_PT_STATE_ERR3;
+			decoder->pkt_step = 0;
 			return -ENOENT;
 
 		case INTEL_PT_OVF:
@@ -2182,14 +2184,6 @@
 	return &decoder->state;
 }
 
-static bool intel_pt_at_psb(unsigned char *buf, size_t len)
-{
-	if (len < INTEL_PT_PSB_LEN)
-		return false;
-	return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR,
-		      INTEL_PT_PSB_LEN);
-}
-
 /**
  * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
  * @buf: pointer to buffer pointer
@@ -2278,6 +2272,7 @@
  * @buf: buffer
  * @len: size of buffer
  * @tsc: TSC value returned
+ * @rem: returns remaining size when TSC is found
  *
  * Find a TSC packet in @buf and return the TSC value.  This function assumes
  * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
@@ -2285,7 +2280,8 @@
  *
  * Return: %true if TSC is found, false otherwise.
  */
-static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
+static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
+			      size_t *rem)
 {
 	struct intel_pt_pkt packet;
 	int ret;
@@ -2296,6 +2292,7 @@
 			return false;
 		if (packet.type == INTEL_PT_TSC) {
 			*tsc = packet.payload;
+			*rem = len;
 			return true;
 		}
 		if (packet.type == INTEL_PT_PSBEND)
@@ -2346,6 +2343,8 @@
  * @len_a: size of first buffer
  * @buf_b: second buffer
  * @len_b: size of second buffer
+ * @consecutive: returns true if there is data in buf_b that is consecutive
+ *               to buf_a
  *
  * If the trace contains TSC we can look at the last TSC of @buf_a and the
  * first TSC of @buf_b in order to determine if the buffers overlap, and then
@@ -2358,33 +2357,41 @@
 static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
 						size_t len_a,
 						unsigned char *buf_b,
-						size_t len_b)
+						size_t len_b, bool *consecutive)
 {
 	uint64_t tsc_a, tsc_b;
 	unsigned char *p;
-	size_t len;
+	size_t len, rem_a, rem_b;
 
 	p = intel_pt_last_psb(buf_a, len_a);
 	if (!p)
 		return buf_b; /* No PSB in buf_a => no overlap */
 
 	len = len_a - (p - buf_a);
-	if (!intel_pt_next_tsc(p, len, &tsc_a)) {
+	if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
 		/* The last PSB+ in buf_a is incomplete, so go back one more */
 		len_a -= len;
 		p = intel_pt_last_psb(buf_a, len_a);
 		if (!p)
 			return buf_b; /* No full PSB+ => assume no overlap */
 		len = len_a - (p - buf_a);
-		if (!intel_pt_next_tsc(p, len, &tsc_a))
+		if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
 			return buf_b; /* No TSC in buf_a => assume no overlap */
 	}
 
 	while (1) {
 		/* Ignore PSB+ with no TSC */
-		if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) &&
-		    intel_pt_tsc_cmp(tsc_a, tsc_b) < 0)
-			return buf_b; /* tsc_a < tsc_b => no overlap */
+		if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
+			int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
+
+			/* Same TSC, so buffers are consecutive */
+			if (!cmp && rem_b >= rem_a) {
+				*consecutive = true;
+				return buf_b + len_b - (rem_b - rem_a);
+			}
+			if (cmp < 0)
+				return buf_b; /* tsc_a < tsc_b => no overlap */
+		}
 
 		if (!intel_pt_step_psb(&buf_b, &len_b))
 			return buf_b + len_b; /* No PSB in buf_b => no data */
@@ -2398,6 +2405,8 @@
  * @buf_b: second buffer
  * @len_b: size of second buffer
  * @have_tsc: can use TSC packets to detect overlap
+ * @consecutive: returns true if there is data in buf_b that is consecutive
+ *               to buf_a
  *
  * When trace samples or snapshots are recorded there is the possibility that
  * the data overlaps.  Note that, for the purposes of decoding, data is only
@@ -2408,7 +2417,7 @@
  */
 unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
 				     unsigned char *buf_b, size_t len_b,
-				     bool have_tsc)
+				     bool have_tsc, bool *consecutive)
 {
 	unsigned char *found;
 
@@ -2420,7 +2429,8 @@
 		return buf_b; /* No overlap */
 
 	if (have_tsc) {
-		found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b);
+		found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
+						  consecutive);
 		if (found)
 			return found;
 	}
@@ -2435,28 +2445,16 @@
 	}
 
 	/* Now len_b >= len_a */
-	if (len_b > len_a) {
-		/* The leftover buffer 'b' must start at a PSB */
-		while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
-			if (!intel_pt_step_psb(&buf_a, &len_a))
-				return buf_b; /* No overlap */
-		}
-	}
-
 	while (1) {
 		/* Potential overlap so check the bytes */
 		found = memmem(buf_a, len_a, buf_b, len_a);
-		if (found)
+		if (found) {
+			*consecutive = true;
 			return buf_b + len_a;
+		}
 
 		/* Try again at next PSB in buffer 'a' */
 		if (!intel_pt_step_psb(&buf_a, &len_a))
 			return buf_b; /* No overlap */
-
-		/* The leftover buffer 'b' must start at a PSB */
-		while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
-			if (!intel_pt_step_psb(&buf_a, &len_a))
-				return buf_b; /* No overlap */
-		}
 	}
 }
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index 8939998..9ae4df1 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -103,7 +103,7 @@
 
 unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
 				     unsigned char *buf_b, size_t len_b,
-				     bool have_tsc);
+				     bool have_tsc, bool *consecutive);
 
 int intel_pt__strerror(int code, char *buf, size_t buflen);
 
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index dc041d4..b1161d7 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -131,6 +131,7 @@
 	bool stop;
 	bool step_through_buffers;
 	bool use_buffer_pid_tid;
+	bool sync_switch;
 	pid_t pid, tid;
 	int cpu;
 	int switch_state;
@@ -194,14 +195,17 @@
 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
 				   struct auxtrace_buffer *b)
 {
+	bool consecutive = false;
 	void *start;
 
 	start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
-				      pt->have_tsc);
+				      pt->have_tsc, &consecutive);
 	if (!start)
 		return -EINVAL;
 	b->use_size = b->data + b->size - start;
 	b->use_data = start;
+	if (b->use_size && consecutive)
+		b->consecutive = true;
 	return 0;
 }
 
@@ -928,10 +932,12 @@
 			if (pt->timeless_decoding || !pt->have_sched_switch)
 				ptq->use_buffer_pid_tid = true;
 		}
+
+		ptq->sync_switch = pt->sync_switch;
 	}
 
 	if (!ptq->on_heap &&
-	    (!pt->sync_switch ||
+	    (!ptq->sync_switch ||
 	     ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
 		const struct intel_pt_state *state;
 		int ret;
@@ -1333,7 +1339,7 @@
 	if (pt->synth_opts.last_branch)
 		intel_pt_update_last_branch_rb(ptq);
 
-	if (!pt->sync_switch)
+	if (!ptq->sync_switch)
 		return 0;
 
 	if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
@@ -1414,6 +1420,21 @@
 	return switch_ip;
 }
 
+static void intel_pt_enable_sync_switch(struct intel_pt *pt)
+{
+	unsigned int i;
+
+	pt->sync_switch = true;
+
+	for (i = 0; i < pt->queues.nr_queues; i++) {
+		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
+		struct intel_pt_queue *ptq = queue->priv;
+
+		if (ptq)
+			ptq->sync_switch = true;
+	}
+}
+
 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
 {
 	const struct intel_pt_state *state = ptq->state;
@@ -1430,7 +1451,7 @@
 			if (pt->switch_ip) {
 				intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
 					     pt->switch_ip, pt->ptss_ip);
-				pt->sync_switch = true;
+				intel_pt_enable_sync_switch(pt);
 			}
 		}
 	}
@@ -1446,9 +1467,9 @@
 		if (state->err) {
 			if (state->err == INTEL_PT_ERR_NODATA)
 				return 1;
-			if (pt->sync_switch &&
+			if (ptq->sync_switch &&
 			    state->from_ip >= pt->kernel_start) {
-				pt->sync_switch = false;
+				ptq->sync_switch = false;
 				intel_pt_next_tid(pt, ptq);
 			}
 			if (pt->synth_opts.errors) {
@@ -1474,7 +1495,7 @@
 				     state->timestamp, state->est_timestamp);
 			ptq->timestamp = state->est_timestamp;
 		/* Use estimated TSC in unknown switch state */
-		} else if (pt->sync_switch &&
+		} else if (ptq->sync_switch &&
 			   ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
 			   intel_pt_is_switch_ip(ptq, state->to_ip) &&
 			   ptq->next_tid == -1) {
@@ -1621,7 +1642,7 @@
 		return 1;
 
 	ptq = intel_pt_cpu_to_ptq(pt, cpu);
-	if (!ptq)
+	if (!ptq || !ptq->sync_switch)
 		return 1;
 
 	switch (ptq->switch_state) {
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 4d2e22f..c93dacc 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2609,6 +2609,14 @@
 
 out:
 	free(nbase);
+
+	/* Final validation */
+	if (ret >= 0 && !is_c_func_name(buf)) {
+		pr_warning("Internal error: \"%s\" is an invalid event name.\n",
+			   buf);
+		ret = -EINVAL;
+	}
+
 	return ret;
 }
 
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 783a53f..b46e1cf 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -38,6 +38,14 @@
 		return 0;
 
 	mod = dwfl_addrmodule(ui->dwfl, ip);
+	if (mod) {
+		Dwarf_Addr s;
+
+		dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+		if (s != al->map->start)
+			mod = 0;
+	}
+
 	if (!mod)
 		mod = dwfl_report_elf(ui->dwfl, dso->short_name,
 				      dso->long_name, -1, al->map->start,
@@ -167,12 +175,16 @@
 {
 	struct unwind_info *ui = arg;
 	Dwarf_Addr pc;
+	bool isactivation;
 
-	if (!dwfl_frame_pc(state, &pc, NULL)) {
+	if (!dwfl_frame_pc(state, &pc, &isactivation)) {
 		pr_err("%s", dwfl_errmsg(-1));
 		return DWARF_CB_ABORT;
 	}
 
+	if (!isactivation)
+		--pc;
+
 	return entry(pc, ui) || !(--ui->max_stack) ?
 	       DWARF_CB_ABORT : DWARF_CB_OK;
 }
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 20c2e57..1203834 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -646,6 +646,17 @@
 
 		while (!ret && (unw_step(&c) > 0) && i < max_stack) {
 			unw_get_reg(&c, UNW_REG_IP, &ips[i]);
+
+			/*
+			 * Decrement the IP for any non-activation frames.
+			 * this is required to properly find the srcline
+			 * for caller frames.
+			 * See also the documentation for dwfl_frame_pc(),
+			 * which this code tries to replicate.
+			 */
+			if (unw_is_signal_frame(&c) <= 0)
+				--ips[i];
+
 			++i;
 		}
 
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 85c5680..dfb010b 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -207,7 +207,7 @@
 
 		size -= ret;
 		off_in += ret;
-		off_out -= ret;
+		off_out += ret;
 	}
 	munmap(ptr, off_in + size);
 
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 43899e0..e72d370 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -23,8 +23,6 @@
 #endif
 #endif
 
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
-
 #ifdef __GNUC__
 #define TYPEOF(x) (__typeof__(x))
 #else
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index a899ef81..76faf5b 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -94,6 +94,7 @@
 	for TARGET in $(TARGETS); do \
 		echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
 		echo "echo ========================================" >> $(ALL_SCRIPT); \
+		echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
 		echo "cd $$TARGET" >> $(ALL_SCRIPT); \
 		make -s --no-print-directory -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
 		echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 17e16fc..99d7f13 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -29,9 +29,11 @@
 		echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
 	fi
 	if [ "$OLD_FWPATH" = "" ]; then
-		OLD_FWPATH=" "
+		# A zero-length write won't work; write a null byte
+		printf '\000' >/sys/module/firmware_class/parameters/path
+	else
+		echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
 	fi
-	echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
 	rm -f "$FW"
 	rmdir "$FWPATH"
 }
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
new file mode 100644
index 0000000..5ba7303
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
@@ -0,0 +1,46 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Kprobe event string type argument
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+echo 0 > events/enable
+echo > kprobe_events
+
+case `uname -m` in
+x86_64)
+  ARG2=%si
+  OFFS=8
+;;
+i[3456]86)
+  ARG2=%cx
+  OFFS=4
+;;
+aarch64)
+  ARG2=%x1
+  OFFS=8
+;;
+arm*)
+  ARG2=%r1
+  OFFS=4
+;;
+*)
+  echo "Please implement other architecture here"
+  exit_untested
+esac
+
+: "Test get argument (1)"
+echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string" > kprobe_events
+echo 1 > events/kprobes/testprobe/enable
+! echo test >> kprobe_events
+tail -n 1 trace | grep -qe "testprobe.* arg1=\"test\""
+
+echo 0 > events/kprobes/testprobe/enable
+: "Test get argument (2)"
+echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string arg2=+0(+${OFFS}(${ARG2})):string" > kprobe_events
+echo 1 > events/kprobes/testprobe/enable
+! echo test1 test2 >> kprobe_events
+tail -n 1 trace | grep -qe "testprobe.* arg1=\"test1\" arg2=\"test2\""
+
+echo 0 > events/enable
+echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
new file mode 100644
index 0000000..231bcd2
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
@@ -0,0 +1,97 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Kprobe event argument syntax
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue
+
+echo 0 > events/enable
+echo > kprobe_events
+
+PROBEFUNC="vfs_read"
+GOODREG=
+BADREG=
+GOODSYM="_sdata"
+if ! grep -qw ${GOODSYM} /proc/kallsyms ; then
+  GOODSYM=$PROBEFUNC
+fi
+BADSYM="deaqswdefr"
+SYMADDR=0x`grep -w ${GOODSYM} /proc/kallsyms | cut -f 1 -d " "`
+GOODTYPE="x16"
+BADTYPE="y16"
+
+case `uname -m` in
+x86_64|i[3456]86)
+  GOODREG=%ax
+  BADREG=%ex
+;;
+aarch64)
+  GOODREG=%x0
+  BADREG=%ax
+;;
+arm*)
+  GOODREG=%r0
+  BADREG=%ax
+;;
+esac
+
+test_goodarg() # Good-args
+{
+  while [ "$1" ]; do
+    echo "p ${PROBEFUNC} $1" > kprobe_events
+    shift 1
+  done;
+}
+
+test_badarg() # Bad-args
+{
+  while [ "$1" ]; do
+    ! echo "p ${PROBEFUNC} $1" > kprobe_events
+    shift 1
+  done;
+}
+
+echo > kprobe_events
+
+: "Register access"
+test_goodarg ${GOODREG}
+test_badarg ${BADREG}
+
+: "Symbol access"
+test_goodarg "@${GOODSYM}" "@${SYMADDR}" "@${GOODSYM}+10" "@${GOODSYM}-10"
+test_badarg "@" "@${BADSYM}" "@${GOODSYM}*10" "@${GOODSYM}/10" \
+	    "@${GOODSYM}%10" "@${GOODSYM}&10" "@${GOODSYM}|10"
+
+: "Stack access"
+test_goodarg "\$stack" "\$stack0" "\$stack1"
+test_badarg "\$stackp" "\$stack0+10" "\$stack1-10"
+
+: "Retval access"
+echo "r ${PROBEFUNC} \$retval" > kprobe_events
+! echo "p ${PROBEFUNC} \$retval" > kprobe_events
+
+: "Comm access"
+test_goodarg "\$comm"
+
+: "Indirect memory access"
+test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \
+	"+0(\$stack1)" "+10(@${GOODSYM}-10)" "+0(+10(+20(\$stack)))"
+test_badarg "+(${GOODREG})" "(${GOODREG}+10)" "-(${GOODREG})" "(${GOODREG})" \
+	"+10(\$comm)" "+0(${GOODREG})+10"
+
+: "Name assignment"
+test_goodarg "varname=${GOODREG}"
+test_badarg "varname=varname2=${GOODREG}"
+
+: "Type syntax"
+test_goodarg "${GOODREG}:${GOODTYPE}"
+test_badarg "${GOODREG}::${GOODTYPE}" "${GOODREG}:${BADTYPE}" \
+	"${GOODTYPE}:${GOODREG}"
+
+: "Combination check"
+
+test_goodarg "\$comm:string" "+0(\$stack):string"
+test_badarg "\$comm:x64" "\$stack:string" "${GOODREG}:string"
+
+echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
new file mode 100644
index 0000000..4fda01a
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
@@ -0,0 +1,43 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Kprobe events - probe points
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+TARGET_FUNC=create_trace_kprobe
+
+dec_addr() { # hexaddr
+  printf "%d" "0x"`echo $1 | tail -c 8`
+}
+
+set_offs() { # prev target next
+  A1=`dec_addr $1`
+  A2=`dec_addr $2`
+  A3=`dec_addr $3`
+  TARGET="0x$2" # an address
+  PREV=`expr $A1 - $A2` # offset to previous symbol
+  NEXT=+`expr $A3 - $A2` # offset to next symbol
+  OVERFLOW=+`printf "0x%x" ${PREV}` # overflow offset to previous symbol
+}
+
+# We have to decode symbol addresses to get correct offsets.
+# If the offset is not an instruction boundary, it cause -EILSEQ.
+set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs`
+
+UINT_TEST=no
+# printf "%x" -1 returns (unsigned long)-1.
+if [ `printf "%x" -1 | wc -c` != 9 ]; then
+  UINT_TEST=yes
+fi
+
+echo 0 > events/enable
+echo > kprobe_events
+echo "p:testprobe ${TARGET_FUNC}" > kprobe_events
+echo "p:testprobe ${TARGET}" > kprobe_events
+echo "p:testprobe ${TARGET_FUNC}${NEXT}" > kprobe_events
+! echo "p:testprobe ${TARGET_FUNC}${PREV}" > kprobe_events
+if [ "${UINT_TEST}" = yes ]; then
+! echo "p:testprobe ${TARGET_FUNC}${OVERFLOW}" > kprobe_events
+fi
+echo > kprobe_events
+clear_trace
diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config
new file mode 100644
index 0000000..835c7f4
--- /dev/null
+++ b/tools/testing/selftests/memfd/config
@@ -0,0 +1 @@
+CONFIG_FUSE_FS=m
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 4124593..9b654a0 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -97,6 +97,8 @@
 
 static void sock_fanout_set_ebpf(int fd)
 {
+	static char log_buf[65536];
+
 	const int len_off = __builtin_offsetof(struct __sk_buff, len);
 	struct bpf_insn prog[] = {
 		{ BPF_ALU64 | BPF_MOV | BPF_X,   6, 1, 0, 0 },
@@ -109,7 +111,6 @@
 		{ BPF_ALU   | BPF_MOV | BPF_K,   0, 0, 0, 0 },
 		{ BPF_JMP   | BPF_EXIT,          0, 0, 0, 0 }
 	};
-	char log_buf[512];
 	union bpf_attr attr;
 	int pfd;
 
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index 4a82174..cad14cd 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -21,6 +21,7 @@
 #include <sys/epoll.h>
 #include <sys/types.h>
 #include <sys/socket.h>
+#include <sys/resource.h>
 #include <unistd.h>
 
 #ifndef ARRAY_SIZE
@@ -190,11 +191,14 @@
 	struct sockaddr * const saddr = new_any_sockaddr(p.send_family, sport);
 	struct sockaddr * const daddr =
 		new_loopback_sockaddr(p.send_family, p.recv_port);
-	const int fd = socket(p.send_family, p.protocol, 0);
+	const int fd = socket(p.send_family, p.protocol, 0), one = 1;
 
 	if (fd < 0)
 		error(1, errno, "failed to create send socket");
 
+	if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)))
+		error(1, errno, "failed to set reuseaddr");
+
 	if (bind(fd, saddr, sockaddr_size()))
 		error(1, errno, "failed to bind send socket");
 
@@ -433,6 +437,21 @@
 	}
 }
 
+static struct rlimit rlim_old, rlim_new;
+
+static  __attribute__((constructor)) void main_ctor(void)
+{
+	getrlimit(RLIMIT_MEMLOCK, &rlim_old);
+	rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
+	rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
+	setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+}
+
+static __attribute__((destructor)) void main_dtor(void)
+{
+	setrlimit(RLIMIT_MEMLOCK, &rlim_old);
+}
+
 int main(void)
 {
 	fprintf(stderr, "---- IPv4 UDP ----\n");
diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c
index 35ade74..3ae77ba 100644
--- a/tools/testing/selftests/powerpc/mm/subpage_prot.c
+++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c
@@ -135,6 +135,16 @@
 	return 0;
 }
 
+static int syscall_available(void)
+{
+	int rc;
+
+	errno = 0;
+	rc = syscall(__NR_subpage_prot, 0, 0, 0);
+
+	return rc == 0 || (errno != ENOENT && errno != ENOSYS);
+}
+
 int test_anon(void)
 {
 	unsigned long align;
@@ -145,6 +155,8 @@
 	void *mallocblock;
 	unsigned long mallocsize;
 
+	SKIP_IF(!syscall_available());
+
 	if (getpagesize() != 0x10000) {
 		fprintf(stderr, "Kernel page size must be 64K!\n");
 		return 1;
@@ -180,6 +192,8 @@
 	off_t filesize;
 	int fd;
 
+	SKIP_IF(!syscall_available());
+
 	fd = open(file_name, O_RDWR);
 	if (fd == -1) {
 		perror("failed to open file");
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
index d9c49f4..e79ccd6 100644
--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
@@ -42,12 +42,12 @@
 	printf("Check DSCR TM context switch: ");
 	fflush(stdout);
 	for (;;) {
-		rv = 1;
 		asm __volatile__ (
 			/* set a known value into the DSCR */
 			"ld      3, %[dscr1];"
 			"mtspr   %[sprn_dscr], 3;"
 
+			"li      %[rv], 1;"
 			/* start and suspend a transaction */
 			"tbegin.;"
 			"beq     1f;"
diff --git a/tools/testing/selftests/pstore/config b/tools/testing/selftests/pstore/config
index 6a8e5a9..d148f9f 100644
--- a/tools/testing/selftests/pstore/config
+++ b/tools/testing/selftests/pstore/config
@@ -2,3 +2,4 @@
 CONFIG_PSTORE=y
 CONFIG_PSTORE_PMSG=y
 CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=m
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index cbb0564..d5be7b5 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1318,7 +1318,7 @@
 	iov.iov_len = sizeof(regs);
 	ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
 #endif
-	EXPECT_EQ(0, ret);
+	EXPECT_EQ(0, ret) {}
 
 #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
     defined(__s390__) || defined(__hppa__)
@@ -1692,7 +1692,11 @@
 #endif
 
 #ifndef SECCOMP_FILTER_FLAG_TSYNC
-#define SECCOMP_FILTER_FLAG_TSYNC 1
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
+#endif
+
+#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
 #endif
 
 #ifndef seccomp
@@ -1791,6 +1795,78 @@
 	}
 }
 
+/*
+ * Test detection of known and unknown filter flags. Userspace needs to be able
+ * to check if a filter flag is supported by the current kernel and a good way
+ * of doing that is by attempting to enter filter mode, with the flag bit in
+ * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
+ * that the flag is valid and EINVAL indicates that the flag is invalid.
+ */
+TEST(detect_seccomp_filter_flags)
+{
+	unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
+				 SECCOMP_FILTER_FLAG_SPEC_ALLOW };
+	unsigned int flag, all_flags;
+	int i;
+	long ret;
+
+	/* Test detection of known-good filter flags */
+	for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
+		int bits = 0;
+
+		flag = flags[i];
+		/* Make sure the flag is a single bit! */
+		while (flag) {
+			if (flag & 0x1)
+				bits ++;
+			flag >>= 1;
+		}
+		ASSERT_EQ(1, bits);
+		flag = flags[i];
+
+		ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+		ASSERT_NE(ENOSYS, errno) {
+			TH_LOG("Kernel does not support seccomp syscall!");
+		}
+		EXPECT_EQ(-1, ret);
+		EXPECT_EQ(EFAULT, errno) {
+			TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
+			       flag);
+		}
+
+		all_flags |= flag;
+	}
+
+	/* Test detection of all known-good filter flags */
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
+	EXPECT_EQ(-1, ret);
+	EXPECT_EQ(EFAULT, errno) {
+		TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
+		       all_flags);
+	}
+
+	/* Test detection of an unknown filter flag */
+	flag = -1;
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+	EXPECT_EQ(-1, ret);
+	EXPECT_EQ(EINVAL, errno) {
+		TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
+		       flag);
+	}
+
+	/*
+	 * Test detection of an unknown filter flag that may simply need to be
+	 * added to this test
+	 */
+	flag = flags[ARRAY_SIZE(flags) - 1] << 1;
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+	EXPECT_EQ(-1, ret);
+	EXPECT_EQ(EINVAL, errno) {
+		TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
+		       flag);
+	}
+}
+
 TEST(TSYNC_first)
 {
 	struct sock_filter filter[] = {
diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
index 1c12536..18f5235 100644
--- a/tools/thermal/tmon/sysfs.c
+++ b/tools/thermal/tmon/sysfs.c
@@ -486,6 +486,7 @@
 int update_thermal_data()
 {
 	int i;
+	int next_thermal_record = cur_thermal_record + 1;
 	char tz_name[256];
 	static unsigned long samples;
 
@@ -495,9 +496,9 @@
 	}
 
 	/* circular buffer for keeping historic data */
-	if (cur_thermal_record >= NR_THERMAL_RECORDS)
-		cur_thermal_record = 0;
-	gettimeofday(&trec[cur_thermal_record].tv, NULL);
+	if (next_thermal_record >= NR_THERMAL_RECORDS)
+		next_thermal_record = 0;
+	gettimeofday(&trec[next_thermal_record].tv, NULL);
 	if (tmon_log) {
 		fprintf(tmon_log, "%lu ", ++samples);
 		fprintf(tmon_log, "%3.1f ", p_param.t_target);
@@ -507,11 +508,12 @@
 		snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE,
 			ptdata.tzi[i].instance);
 		sysfs_get_ulong(tz_name, "temp",
-				&trec[cur_thermal_record].temp[i]);
+				&trec[next_thermal_record].temp[i]);
 		if (tmon_log)
 			fprintf(tmon_log, "%lu ",
-				trec[cur_thermal_record].temp[i]/1000);
+				trec[next_thermal_record].temp[i] / 1000);
 	}
+	cur_thermal_record = next_thermal_record;
 	for (i = 0; i < ptdata.nr_cooling_dev; i++) {
 		char cdev_name[256];
 		unsigned long val;
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
index 9aa1965..b43138f 100644
--- a/tools/thermal/tmon/tmon.c
+++ b/tools/thermal/tmon/tmon.c
@@ -336,7 +336,6 @@
 			show_data_w();
 			show_cooling_device();
 		}
-		cur_thermal_record++;
 		time_elapsed += ticktime;
 		controller_handler(trec[0].temp[target_tz_index] / 1000,
 				&yk);
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 31f5625..1ebbf23 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -208,8 +208,8 @@
 	u8 prop;
 	int ret;
 
-	ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
-			     &prop, 1);
+	ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
+				  &prop, 1);
 
 	if (ret)
 		return ret;
@@ -339,8 +339,9 @@
 		 * this very same byte in the last iteration. Reuse that.
 		 */
 		if (byte_offset != last_byte_offset) {
-			ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
-					     &pendmask, 1);
+			ret = kvm_read_guest_lock(vcpu->kvm,
+						  pendbase + byte_offset,
+						  &pendmask, 1);
 			if (ret) {
 				kfree(intids);
 				return ret;
@@ -628,7 +629,7 @@
 		return false;
 
 	/* Each 1st level entry is represented by a 64-bit value. */
-	if (kvm_read_guest(its->dev->kvm,
+	if (kvm_read_guest_lock(its->dev->kvm,
 			   BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
 			   &indirect_ptr, sizeof(indirect_ptr)))
 		return false;
@@ -1152,8 +1153,8 @@
 	cbaser = CBASER_ADDRESS(its->cbaser);
 
 	while (its->cwriter != its->creadr) {
-		int ret = kvm_read_guest(kvm, cbaser + its->creadr,
-					 cmd_buf, ITS_CMD_SIZE);
+		int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
+					      cmd_buf, ITS_CMD_SIZE);
 		/*
 		 * If kvm_read_guest() fails, this could be due to the guest
 		 * programming a bogus value in CBASER or something else going
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index eaae725..4f2a2df 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1466,7 +1466,8 @@
 
 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
 			       unsigned long addr, bool *async,
-			       bool write_fault, kvm_pfn_t *p_pfn)
+			       bool write_fault, bool *writable,
+			       kvm_pfn_t *p_pfn)
 {
 	unsigned long pfn;
 	int r;
@@ -1492,6 +1493,8 @@
 
 	}
 
+	if (writable)
+		*writable = true;
 
 	/*
 	 * Get a reference here because callers of *hva_to_pfn* and
@@ -1557,7 +1560,7 @@
 	if (vma == NULL)
 		pfn = KVM_PFN_ERR_FAULT;
 	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
-		r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn);
+		r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
 		if (r == -EAGAIN)
 			goto retry;
 		if (r < 0)